diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 8127a08e366d8f96b05f0dacf71cd55634f68a1d..d10bcca6c3fb8ceaa280fa0a62234cff7eeee52c 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -1559,7 +1559,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw KernelVersion: 4.3 Contact: linux-iio@vger.kernel.org Description: - Raw (unscaled no offset etc.) percentage reading of a substance. + Raw (unscaled no offset etc.) reading of a substance. Units + after application of scale and offset are percents. What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3802e753e03dd1565dd12c0faa187c4688976db2..80c2f0b25047671fc22c03000a9405a7d954a2fc 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -494,10 +494,14 @@ cut the overhead, others just disable the usage. So only cgroup_disable=memory is actually worthy} - cgroup_no_v1= [KNL] Disable one, multiple, all cgroup controllers in v1 - Format: { controller[,controller...] | "all" } + cgroup_no_v1= [KNL] Disable cgroup controllers and named hierarchies in v1 + Format: { { controller | "all" | "named" } + [,{ controller | "all" | "named" }...] } Like cgroup_disable, but only applies to cgroup v1; the blacklisted controllers remain available in cgroup2. + "all" blacklists all controllers and "named" disables + named mounts. Specifying both "all" and "named" disables + all v1 hierarchies. cgroup.memory= [KNL] Pass options to the cgroup memory controller. Format: diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst index 70e180e6b93dcc74d25fb6133bfaeb018166b870..9f3e5dc311840c385d04fee9007e0bbdf7afb8f6 100644 --- a/Documentation/driver-api/libata.rst +++ b/Documentation/driver-api/libata.rst @@ -250,7 +250,7 @@ High-level taskfile hooks :: - void (*qc_prep) (struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); int (*qc_issue) (struct ata_queued_cmd *qc); diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt index 71b63c2b98410cd9d657b9876897fcdb65189076..a8f1a58e36922e8ae17b4ffa0ea7caf918f858e5 100644 --- a/Documentation/filesystems/affs.txt +++ b/Documentation/filesystems/affs.txt @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: - R maps to r for user, group and others. On directories, R implies x. - - If both W and D are allowed, w will be set. + - W maps to w. - E maps to x. - - H and P are always retained and ignored under Linux. + - D is ignored. - - A is always reset when a file is written to. + - H, S and P are always retained and ignored under Linux. + + - A is cleared when a file is written to. User id and group id will be used unless set[gu]id are given as mount options. Since most of the Amiga file systems are single user systems @@ -111,11 +113,13 @@ Linux -> Amiga: The Linux rwxrwxrwx file mode is handled as follows: - - r permission will set R for user, group and others. + - r permission will allow R for user, group and others. + + - w permission will allow W for user, group and others. - - w permission will set W and D for user, group and others. + - x permission of the user will allow E for plain files. - - x permission of the user will set E for plain files. + - D will be allowed for user, group and others. - All other flags (suid, sgid, ...) are ignored and will not be retained. diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst new file mode 100644 index 0000000000000000000000000000000000000000..c776b6eee969f35f6a64f72c514446b5ca53e805 --- /dev/null +++ b/Documentation/kbuild/llvm.rst @@ -0,0 +1,87 @@ +============================== +Building Linux with Clang/LLVM +============================== + +This document covers how to build the Linux kernel with Clang and LLVM +utilities. + +About +----- + +The Linux kernel has always traditionally been compiled with GNU toolchains +such as GCC and binutils. Ongoing work has allowed for `Clang +`_ and `LLVM `_ utilities to be +used as viable substitutes. Distributions such as `Android +`_, `ChromeOS +`_, and `OpenMandriva +`_ use Clang built kernels. `LLVM is a +collection of toolchain components implemented in terms of C++ objects +`_. Clang is a front-end to LLVM that +supports C and the GNU C extensions required by the kernel, and is pronounced +"klang," not "see-lang." + +Clang +----- + +The compiler used can be swapped out via `CC=` command line argument to `make`. +`CC=` should be set when selecting a config and during a build. + + make CC=clang defconfig + + make CC=clang + +Cross Compiling +--------------- + +A single Clang compiler binary will typically contain all supported backends, +which can help simplify cross compiling. + + ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang + +`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead +`CROSS_COMPILE` is used to set a command line flag: `--target `. For +example: + + clang --target aarch64-linux-gnu foo.c + +LLVM Utilities +-------------- + +LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1` +to enable them. + + make LLVM=1 + +They can be enabled individually. The full list of the parameters: + + make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ + READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ + HOSTLD=ld.lld + +Currently, the integrated assembler is disabled by default. You can pass +`LLVM_IAS=1` to enable it. + +Getting Help +------------ + +- `Website `_ +- `Mailing List `_: +- `Issue Tracker `_ +- IRC: #clangbuiltlinux on chat.freenode.net +- `Telegram `_: @ClangBuiltLinux +- `Wiki `_ +- `Beginner Bugs `_ + +Getting LLVM +------------- + +- http://releases.llvm.org/download.html +- https://github.com/llvm/llvm-project +- https://llvm.org/docs/GettingStarted.html +- https://llvm.org/docs/CMake.html +- https://apt.llvm.org/ +- https://www.archlinux.org/packages/extra/x86_64/llvm/ +- https://github.com/ClangBuiltLinux/tc-build +- https://github.com/ClangBuiltLinux/linux/wiki/Building-Clang-from-source +- https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/ diff --git a/MAINTAINERS b/MAINTAINERS index 8a9c4ce954ee9fd7729f95353ae6fe7cf54b2e22..5ad444893d3ec7de80deb816e3dfaff65d5cff84 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3619,6 +3619,15 @@ M: Miguel Ojeda S: Maintained F: .clang-format +CLANG/LLVM BUILD SUPPORT +L: clang-built-linux@googlegroups.com +W: https://clangbuiltlinux.github.io/ +B: https://github.com/ClangBuiltLinux/linux/issues +C: irc://chat.freenode.net/clangbuiltlinux +S: Supported +K: \b(?i:clang|llvm)\b +F: Documentation/kbuild/llvm.rst + CLEANCACHE API M: Konrad Rzeszutek Wilk L: linux-kernel@vger.kernel.org diff --git a/Makefile b/Makefile index 77b37e0ba4c07aa8a193a8c711409dc3422b2d49..8201f9cc9af4a19fe64cb0c571d2c4a3362b6352 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 136 +SUBLEVEL = 152 EXTRAVERSION = NAME = "People's Front" @@ -358,8 +358,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) -HOSTCC = gcc -HOSTCXX = g++ +ifneq ($(LLVM),) +HOSTCC = clang +HOSTCXX = clang++ +else +HOSTCC = gcc +HOSTCXX = g++ +endif KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \ $(HOSTCFLAGS) @@ -368,15 +373,28 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS) KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS) # Make variables (CC, etc...) -AS = $(CROSS_COMPILE)as -LD = $(CROSS_COMPILE)ld -REAL_CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E +ifneq ($(LLVM),) +CC = clang +LD = ld.lld +AR = llvm-ar +NM = llvm-nm +OBJCOPY = llvm-objcopy +OBJDUMP = llvm-objdump +READELF = llvm-readelf +OBJSIZE = llvm-size +STRIP = llvm-strip +else +REAL_CC = $(CROSS_COMPILE)gcc +LD = $(CROSS_COMPILE)ld AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +READELF = $(CROSS_COMPILE)readelf +OBJSIZE = $(CROSS_COMPILE)size +STRIP = $(CROSS_COMPILE)strip +endif LEX = flex YACC = bison AWK = awk @@ -436,8 +454,8 @@ KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS +export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS @@ -499,7 +517,9 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif +ifneq ($(LLVM_IAS),1) CLANG_FLAGS += -no-integrated-as +endif CLANG_FLAGS += $(call cc-option, -Wno-misleading-indentation) CLANG_FLAGS += $(call cc-option, -Wno-bool-operation) CLANG_FLAGS += -Werror=unknown-warning-option @@ -761,9 +781,18 @@ KBUILD_CFLAGS += -fomit-frame-pointer endif endif -# Initialize all stack variables with a pattern, if desired. -ifdef CONFIG_INIT_STACK_ALL -KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern +# Initialize all stack variables with a 0xAA pattern. +ifdef CONFIG_INIT_STACK_ALL_PATTERN +KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern +endif + +# Initialize all stack variables with a zero value. +ifdef CONFIG_INIT_STACK_ALL_ZERO +# Future support for zero initialization is still being debated, see +# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being +# renamed or dropped. +KBUILD_CFLAGS += -ftrivial-auto-var-init=zero +KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang endif KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments) diff --git a/android/abi_gki_aarch64 b/android/abi_gki_aarch64 index 9f00353c4e5519df4f5b93347764af1c71ca6041..7bb7e004c3de16849ca6722708ac8f6cadc80098 100644 --- a/android/abi_gki_aarch64 +++ b/android/abi_gki_aarch64 @@ -1,4 +1,4 @@ -[abi_whitelist] +[abi_symbol_list] # commonly used symbols __cfi_slowpath __const_udelay diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 90d96578660afd74f4c33546e1c2f5119da50d4d..b81396432ec10895a5f9750d0fa963855d456b28 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -3,12 +3,12 @@ - + - - + + - + @@ -18,20 +18,20 @@ - - - - + + + + - - - - + + + + - - + + @@ -39,48 +39,48 @@ - - - - - - - - - + + + + + + + + + - - - - - - - - - + + + + + + + + + - + - + - + - - + + - - + + - + - + @@ -104,61 +104,61 @@ - + - + - - + + - - - - + + + + - - + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - - + + + - - - - - - - - - + + + + + + + + + @@ -167,29 +167,29 @@ - + - + - + - + - + - - - - - - + + + + + + @@ -216,28 +216,28 @@ - - + + - + - + - - - + + + - + - + - + @@ -247,185 +247,185 @@ - + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - + + + + + + + + + - + - + - + - - - - - - + + + + + + - - - - - - - - + + + + + + + + - - + + - - + + - + - - + + @@ -440,32 +440,32 @@ - - - - - - - - - - - - - + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + @@ -473,158 +473,158 @@ - - - + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -633,29 +633,29 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + @@ -666,15 +666,15 @@ - + - - - - - - - + + + + + + + @@ -684,297 +684,297 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - + - - - - + + + + - - - + + + - - - - - - - - - - - - + + + + + + + + + + + + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + - - + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + @@ -986,20 +986,20 @@ - + - + - + - - - + + + @@ -1009,71 +1009,71 @@ - - - - - + + + + + - - - - - - - - + + + + + + + + - + - - + + - + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - + @@ -1082,28 +1082,28 @@ - + - - - - - - - + + + + + + + - - + + - - - - - - + + + + + + @@ -1118,205 +1118,205 @@ - + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + - - - - - - - + + + + + + + - - + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - - - + + + - - - - - + + + + + - - - + + + - + - + - - - - - - - - + + + + + + + + - - + + @@ -1333,21 +1333,21 @@ - - - + + + - - - - + + + + - - + + - - + + @@ -1359,33 +1359,33 @@ - - - - + + + + - - + + - + - - - - - - - - - - - - + + + + + + + + + + + + @@ -1405,22 +1405,22 @@ - - - - - - - + + + + + + + - - + + - + - - + + @@ -1428,52 +1428,52 @@ - - - - - - - + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + @@ -1481,116 +1481,116 @@ - - - + + + - - - - + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + @@ -1599,89 +1599,89 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + @@ -1689,26 +1689,26 @@ - - - - - - - - - - + + + + + + + + + + - - - - - - - + + + + + + + - + @@ -1722,26 +1722,26 @@ - - + + - - - + + + - - - - + + + + - - - + + + @@ -1758,8 +1758,8 @@ - - + + @@ -1768,87 +1768,87 @@ - - - - - + + + + + - + - - - + + + - + - + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + @@ -1856,8 +1856,8 @@ - - + + @@ -1874,18 +1874,18 @@ - - - + + + - + - - + + - + @@ -1897,8 +1897,8 @@ - - + + @@ -1906,24 +1906,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -1933,8 +1933,8 @@ - - + + @@ -1944,22 +1944,22 @@ - - - - - - - - - - + + + + + + + + + + - + - + @@ -1971,219 +1971,219 @@ - - + + - + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -2218,17 +2218,17 @@ - + - + - - + + @@ -2245,17 +2245,17 @@ - - - - - - - - - - - + + + + + + + + + + + @@ -2263,130 +2263,130 @@ - - - - - - + + + + + + - + - + - - + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - - - - - + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - + - + - + - + - - + + - - - - - - - - - - + + + + + + + + + + - - - + + + @@ -2394,53 +2394,54 @@ - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - + @@ -2450,109 +2451,109 @@ - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - + @@ -2563,47 +2564,47 @@ - - - - - - - - + + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - - - - - + + + + + + @@ -2614,19 +2615,20 @@ + - + - + @@ -2636,7 +2638,7 @@ - + @@ -2644,33 +2646,33 @@ - - + + - + - - + + - + - + - + - + @@ -2683,13 +2685,13 @@ - + - + - + @@ -2697,7 +2699,7 @@ - + @@ -2708,9 +2710,9 @@ - - - + + + @@ -2747,59 +2749,37 @@ - - - - - - - - - - - - - - - - - - - - - - @@ -2895,7 +2875,7 @@ - + @@ -2924,15 +2904,15 @@ - - + + - + @@ -2977,7 +2957,7 @@ - + @@ -3148,7 +3128,7 @@ - + @@ -3433,10 +3413,10 @@ - + - + @@ -3449,28 +3429,21 @@ - + - - - - - - - - - - - + - + + + + - + @@ -3478,6 +3451,14 @@ + + + + + + + + @@ -3612,7 +3593,7 @@ - + @@ -3726,7 +3707,7 @@ - + @@ -3735,7 +3716,7 @@ - + @@ -3911,8 +3892,8 @@ - - + + @@ -3952,7 +3933,7 @@ - + @@ -3993,13 +3974,13 @@ - + - + @@ -4157,8 +4138,8 @@ - - + + @@ -4727,31 +4708,31 @@ - + - + - + - + - + - + - + - + - + @@ -5182,7 +5163,7 @@ - + @@ -5242,21 +5223,21 @@ - + - + - + - + - + - + @@ -5276,7 +5257,7 @@ - + @@ -5284,7 +5265,7 @@ - + @@ -5296,7 +5277,7 @@ - + @@ -5324,7 +5305,7 @@ - + @@ -5414,8 +5395,8 @@ - - + + @@ -5472,36 +5453,36 @@ - + - + - + - + - + - + - + - + - + - + @@ -5509,7 +5490,7 @@ - + @@ -5533,7 +5514,7 @@ - + @@ -5544,7 +5525,7 @@ - + @@ -5562,26 +5543,26 @@ - + - + - + - + - + - + @@ -5592,7 +5573,7 @@ - + @@ -5600,7 +5581,7 @@ - + @@ -5608,7 +5589,7 @@ - + @@ -5645,8 +5626,8 @@ - - + + @@ -5668,10 +5649,10 @@ - + - + @@ -5680,92 +5661,30 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - + @@ -5779,15 +5698,29 @@ - + + + + + + + + + + + + + + + - + - + @@ -5798,7 +5731,7 @@ - + @@ -5809,13 +5742,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -5837,22 +5823,15 @@ - - - - - - - - - - - - - + + + + + + - + @@ -5861,6 +5840,14 @@ + + + + + + + + @@ -5881,12 +5868,17 @@ + + + + + - + @@ -5951,7 +5943,7 @@ - + @@ -5999,8 +5991,8 @@ - - + + @@ -6053,14 +6045,14 @@ - - + + - + @@ -6069,7 +6061,7 @@ - + @@ -6132,7 +6124,7 @@ - + @@ -6360,10 +6352,10 @@ - + - + @@ -6891,7 +6883,7 @@ - + @@ -6905,7 +6897,7 @@ - + @@ -6919,7 +6911,7 @@ - + @@ -6933,7 +6925,7 @@ - + @@ -6944,7 +6936,7 @@ - + @@ -6952,15 +6944,26 @@ - + - + - + + + + + + + + + + + + @@ -6971,7 +6974,7 @@ - + @@ -6979,9 +6982,10 @@ - + + @@ -7126,14 +7130,19 @@ - - - - - - - - + + + + + + + + + + + + + @@ -7229,164 +7238,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -7532,11 +7383,114 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7640,6 +7594,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7687,6 +7670,23 @@ + + + + + + + + + + + + + + + + + @@ -7819,7 +7819,7 @@ - + @@ -7832,7 +7832,7 @@ - + @@ -8037,27 +8037,12 @@ - - + - + - - - - - - - - - - - - - - - + @@ -8074,7 +8059,7 @@ - + @@ -8088,23 +8073,7 @@ - - - - - - - - - - - - - - - - - + @@ -8112,7 +8081,7 @@ - + @@ -8121,14 +8090,14 @@ - - - - + + + + - - + + @@ -8177,11 +8146,10 @@ - - + @@ -8200,22 +8168,22 @@ - + - + - + - + - + - + @@ -8505,8 +8473,8 @@ - - + + @@ -8517,21 +8485,21 @@ - - + + - - - + + + - - + + - - + + @@ -8637,7 +8605,7 @@ - + @@ -8661,7 +8629,7 @@ - + @@ -8803,6 +8771,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -8865,6 +8862,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -8878,6 +8904,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -9296,49 +9366,26 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -9351,7 +9398,6 @@ - @@ -9661,7 +9707,7 @@ - + @@ -9946,75 +9992,75 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -10026,7 +10072,7 @@ - + @@ -10333,19 +10379,19 @@ - + - + - + - + @@ -10480,7 +10526,7 @@ - + @@ -10498,16 +10544,16 @@ - + - + - + @@ -10558,7 +10604,7 @@ - + @@ -10566,10 +10612,10 @@ - + - + @@ -10603,7 +10649,7 @@ - + @@ -10615,10 +10661,10 @@ - + - + @@ -10631,7 +10677,7 @@ - + @@ -10643,18 +10689,18 @@ - + - + - + - + @@ -10733,10 +10779,10 @@ - + - + @@ -10759,8 +10805,8 @@ - - + + @@ -10768,8 +10814,8 @@ - - + + @@ -10777,8 +10823,8 @@ - - + + @@ -10801,14 +10847,14 @@ - - + + - - + + @@ -10816,8 +10862,8 @@ - - + + @@ -10828,8 +10874,8 @@ - - + + @@ -11102,7 +11148,7 @@ - + @@ -11119,38 +11165,27 @@ - - - - - - - - - - - - - - - - - - + + + + + + + - + - + - + - + - + @@ -11161,7 +11196,15 @@ - + + + + + + + + + @@ -11169,12 +11212,17 @@ - + - + + + + + + @@ -11697,7 +11745,7 @@ - + @@ -11897,7 +11945,7 @@ - + @@ -12050,7 +12098,7 @@ - + @@ -12097,7 +12145,7 @@ - + @@ -12156,8 +12204,8 @@ - - + + @@ -12385,8 +12433,8 @@ - - + + @@ -12968,7 +13016,7 @@ - + @@ -13018,7 +13066,7 @@ - + @@ -13168,8 +13216,8 @@ - - + + @@ -13293,7 +13341,7 @@ - + @@ -13447,7 +13495,7 @@ - + @@ -13989,21 +14037,86 @@ - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -14367,6 +14480,20 @@ + + + + + + + + + + + + + + @@ -14390,6 +14517,17 @@ + + + + + + + + + + + @@ -14397,23 +14535,6 @@ - - - - - - - - - - - - - - - - - @@ -14747,7 +14868,7 @@ - + @@ -15394,7 +15515,7 @@ - + @@ -15402,7 +15523,7 @@ - + @@ -15413,7 +15534,7 @@ - + @@ -15421,15 +15542,15 @@ - + - + - + @@ -15437,7 +15558,7 @@ - + @@ -15445,7 +15566,7 @@ - + @@ -15453,9 +15574,9 @@ - + - + @@ -15464,15 +15585,15 @@ - + - + - + @@ -15480,7 +15601,7 @@ - + @@ -15539,7 +15660,7 @@ - + @@ -15585,7 +15706,42 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -15593,10 +15749,10 @@ - + - + @@ -16017,98 +16173,14 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -16216,7 +16288,7 @@ - + @@ -16363,114 +16435,114 @@ - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -16505,7 +16577,7 @@ - + @@ -16525,15 +16597,15 @@ - + - + - + @@ -16775,7 +16847,7 @@ - + @@ -16794,7 +16866,7 @@ - + @@ -16802,6 +16874,14 @@ + + + + + + + + @@ -17037,212 +17117,211 @@ - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -17444,10 +17523,9 @@ - - + @@ -17630,6 +17708,14 @@ + + + + + + + + @@ -18215,24 +18301,24 @@ - + - + - + - + - + @@ -18272,7 +18358,7 @@ - + @@ -18541,7 +18627,6 @@ - @@ -18574,7 +18659,7 @@ - + @@ -18658,7 +18743,7 @@ - + @@ -18669,10 +18754,10 @@ - - - - + + + + @@ -18795,7 +18880,6 @@ - @@ -19106,6 +19190,154 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -19113,69 +19345,69 @@ - - + + - - + + - - + + - - + + - - + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -19184,61 +19416,61 @@ - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + @@ -19275,7 +19507,6 @@ - @@ -20180,7 +20411,6 @@ - @@ -20220,7 +20450,7 @@ - + @@ -20233,61 +20463,61 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -20828,7 +21058,7 @@ - + @@ -20841,40 +21071,40 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -21120,8 +21350,8 @@ - - + + @@ -21162,8 +21392,8 @@ - - + + @@ -21180,8 +21410,8 @@ - - + + @@ -21230,8 +21460,8 @@ - - + + @@ -21266,8 +21496,8 @@ - - + + @@ -21283,8 +21513,8 @@ - - + + @@ -21312,8 +21542,8 @@ - - + + @@ -21329,8 +21559,8 @@ - - + + @@ -21733,47 +21963,47 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -21797,13 +22027,13 @@ - + - + @@ -21885,6 +22115,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -21981,7 +22266,7 @@ - + @@ -22192,6 +22477,20 @@ + + + + + + + + + + + + + + @@ -22638,13 +22937,9 @@ - - - - @@ -23022,7 +23317,7 @@ - + @@ -23781,7 +24076,7 @@ - + @@ -24588,7 +24883,7 @@ - + @@ -25348,7 +25643,7 @@ - + @@ -25383,7 +25678,7 @@ - + @@ -26893,7 +27188,7 @@ - + @@ -26909,41 +27204,41 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -27231,7 +27526,7 @@ - + @@ -27274,28 +27569,28 @@ - + - + - + - + - + - + - + - + @@ -28125,14 +28420,34 @@ - + - + - + + + + + + + + + + + + + + + + + + + + + @@ -28273,31 +28588,31 @@ - - + + - - + + - - + + - - + + - - - - + + + + - - - + + + @@ -28334,6 +28649,14 @@ + + + + + + + + @@ -28547,6 +28870,23 @@ + + + + + + + + + + + + + + + + + @@ -28841,26 +29181,26 @@ - + - + - + - + - + - + @@ -29223,14 +29563,6 @@ - - - - - - - - @@ -29477,10 +29809,10 @@ - + - + @@ -29549,10 +29881,10 @@ - + - + @@ -29594,18 +29926,18 @@ - + - + - + - + - + @@ -29625,7 +29957,7 @@ - + @@ -29815,7 +30147,7 @@ - + @@ -29854,8 +30186,6 @@ - - @@ -29898,7 +30228,7 @@ - + @@ -30544,14 +30874,14 @@ - - - - + + + + - - + + @@ -30674,6 +31004,9 @@ + + + @@ -30707,131 +31040,142 @@ - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + @@ -30857,7 +31201,7 @@ - + @@ -30870,17 +31214,6 @@ - - - - - - - - - - - @@ -31099,33 +31432,34 @@ - - - - - - - - - - + + + + + + + + + + + - + - + - + - + - + - + @@ -31162,13 +31496,13 @@ - + - + @@ -31201,7 +31535,7 @@ - + @@ -31215,6 +31549,17 @@ + + + + + + + + + + + @@ -31331,7 +31676,7 @@ - + @@ -31363,7 +31708,7 @@ - + @@ -31379,85 +31724,85 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -31478,7 +31823,7 @@ - + @@ -31569,7 +31914,7 @@ - + @@ -31949,7 +32294,7 @@ - + @@ -31957,7 +32302,7 @@ - + @@ -31967,17 +32312,17 @@ - + - + - + @@ -31992,7 +32337,6 @@ - @@ -32078,7 +32422,7 @@ - + @@ -32199,25 +32543,25 @@ - + - + - + - + - + - + - + @@ -32307,7 +32651,7 @@ - + @@ -32326,7 +32670,7 @@ - + @@ -32951,7 +33295,7 @@ - + @@ -33022,6 +33366,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -33124,6 +33491,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -33160,7 +33547,7 @@ - + @@ -33180,21 +33567,21 @@ - + - + - + - + - + - + @@ -33205,7 +33592,7 @@ - + @@ -33213,7 +33600,7 @@ - + @@ -33227,7 +33614,7 @@ - + @@ -33357,12 +33744,47 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -33483,7 +33905,7 @@ - + @@ -33502,7 +33924,6 @@ - @@ -33511,9 +33932,43 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -33522,18 +33977,18 @@ - + - + - + - + @@ -33556,7 +34011,7 @@ - + @@ -33577,7 +34032,7 @@ - + @@ -33719,16 +34174,16 @@ - + - + - + - + @@ -33797,7 +34252,7 @@ - + @@ -33824,62 +34279,62 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -33949,25 +34404,25 @@ - + - + - + - + - + - + @@ -34178,11 +34633,11 @@ - + - + @@ -34322,8 +34777,6 @@ - - @@ -34357,7 +34810,7 @@ - + @@ -34444,7 +34897,7 @@ - + @@ -34497,108 +34950,108 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -34608,27 +35061,27 @@ - + - + - + - + - + - + - + - + @@ -34636,109 +35089,109 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -35046,7 +35499,7 @@ - + @@ -35242,7 +35695,7 @@ - + @@ -35260,7 +35713,7 @@ - + @@ -35272,7 +35725,7 @@ - + @@ -35331,6 +35784,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -35383,164 +35868,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -35590,6 +35917,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -35604,6 +35963,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -35709,7 +36094,7 @@ - + @@ -35783,7 +36168,7 @@ - + @@ -35793,19 +36178,19 @@ - + - + - + - + - + @@ -35846,7 +36231,7 @@ - + @@ -35901,7 +36286,7 @@ - + @@ -35954,10 +36339,10 @@ - + - + @@ -35969,7 +36354,7 @@ - + @@ -35994,7 +36379,7 @@ - + @@ -36013,7 +36398,7 @@ - + @@ -36108,9 +36493,9 @@ - + - + @@ -36175,57 +36560,57 @@ - - - - - + + + + + - - + + - - + + - - - + + + - - - - + + + + - - - - - - - - + + + + + + + + - - - - - - - + + + + + + + - - - - - - + + + + + + @@ -36282,7 +36667,6 @@ - @@ -36293,7 +36677,7 @@ - + @@ -36312,67 +36696,67 @@ - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -36498,7 +36882,7 @@ - + @@ -36899,10 +37283,10 @@ - + - + @@ -37410,10 +37794,10 @@ - + - + @@ -37464,28 +37848,6 @@ - - - - - - - - - - - - - - - - - - - - - - @@ -37537,7 +37899,7 @@ - + @@ -37550,21 +37912,21 @@ - + - + - + - + - + @@ -37572,7 +37934,7 @@ - + @@ -38062,10 +38424,10 @@ - + - + @@ -38120,7 +38482,7 @@ - + @@ -38129,7 +38491,7 @@ - + @@ -38163,7 +38525,7 @@ - + @@ -38186,7 +38548,7 @@ - + @@ -38287,7 +38649,7 @@ - + @@ -38331,7 +38693,7 @@ - + @@ -38353,7 +38715,7 @@ - + @@ -38362,7 +38724,7 @@ - + @@ -38421,10 +38783,10 @@ - + - + @@ -38602,7 +38964,7 @@ - + @@ -38617,7 +38979,7 @@ - + @@ -38633,10 +38995,10 @@ - + - + @@ -38667,13 +39029,13 @@ - + - + @@ -38816,15 +39178,15 @@ - + - + - + - + @@ -38837,26 +39199,26 @@ - + - + - + - + - + - + - + @@ -38864,7 +39226,7 @@ - + @@ -38874,10 +39236,10 @@ - + - + @@ -38965,13 +39327,13 @@ - + - + @@ -39024,13 +39386,13 @@ - + - + @@ -39955,9 +40317,9 @@ - + - + @@ -40110,8 +40472,6 @@ - - @@ -40260,10 +40620,10 @@ - + - + @@ -40275,7 +40635,7 @@ - + @@ -40348,7 +40708,7 @@ - + @@ -40356,7 +40716,7 @@ - + @@ -40364,7 +40724,7 @@ - + @@ -40593,7 +40953,7 @@ - + @@ -40660,6 +41020,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -40682,7 +41068,7 @@ - + @@ -40700,7 +41086,7 @@ - + @@ -40733,7 +41119,7 @@ - + @@ -40751,7 +41137,7 @@ - + @@ -40965,7 +41351,7 @@ - + @@ -40978,7 +41364,7 @@ - + @@ -41071,7 +41457,7 @@ - + @@ -41087,17 +41473,17 @@ - + - + - + - + @@ -41128,28 +41514,28 @@ - + - + - + - + - + - + - + - + @@ -41296,7 +41682,7 @@ - + @@ -41438,6 +41824,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -41452,6 +41912,17 @@ + + + + + + + + + + + @@ -41493,7 +41964,7 @@ - + @@ -41676,26 +42147,7 @@ - - - - - - - - - - - - - - - - - - - - + @@ -41703,15 +42155,20 @@ - + + + + + + - + - + @@ -41719,127 +42176,32 @@ - + - + + + + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - - - - - - - - - - - - @@ -41890,7 +42252,7 @@ - + @@ -41898,16 +42260,16 @@ - + - + - + @@ -41915,12 +42277,12 @@ - + - + @@ -41934,7 +42296,7 @@ - + @@ -41943,18 +42305,18 @@ - + - + - + - + @@ -41970,12 +42332,12 @@ - + - + @@ -41985,25 +42347,6 @@ - - - - - - - - - - - - - - - - - - - @@ -42013,7 +42356,7 @@ - + @@ -42169,15 +42512,11 @@ - - - - @@ -42220,6 +42559,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -42439,7 +42798,7 @@ - + @@ -42457,7 +42816,7 @@ - + @@ -42484,7 +42843,7 @@ - + @@ -42699,13 +43058,13 @@ - + - + - + @@ -42729,7 +43088,7 @@ - + @@ -42747,13 +43106,13 @@ - + - + @@ -42762,7 +43121,7 @@ - + @@ -42771,11 +43130,11 @@ - + - + @@ -42784,7 +43143,7 @@ - + @@ -42792,165 +43151,165 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -42976,15 +43335,21 @@ - + - + + + + - + - + + + + @@ -42992,7 +43357,7 @@ - + @@ -43028,7 +43393,7 @@ - + @@ -43141,8 +43506,8 @@ - - + + @@ -43236,7 +43601,7 @@ - + @@ -43511,10 +43876,10 @@ - + - + @@ -43582,13 +43947,13 @@ - + - + - + @@ -43597,10 +43962,10 @@ - + - + @@ -43608,7 +43973,7 @@ - + @@ -43616,7 +43981,7 @@ - + @@ -43625,7 +43990,7 @@ - + @@ -43826,7 +44191,7 @@ - + @@ -43838,7 +44203,7 @@ - + @@ -43942,7 +44307,7 @@ - + @@ -43953,8 +44318,8 @@ - - + + @@ -43965,7 +44330,7 @@ - + @@ -44049,7 +44414,7 @@ - + @@ -44162,7 +44527,7 @@ - + @@ -44171,143 +44536,44 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -44345,7 +44611,21 @@ - + + + + + + + + + + + + + + + @@ -44353,7 +44633,70 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -44379,7 +44722,18 @@ - + + + + + + + + + + + + @@ -44393,7 +44747,7 @@ - + @@ -44404,7 +44758,7 @@ - + @@ -44424,7 +44778,7 @@ - + @@ -44432,7 +44786,24 @@ - + + + + + + + + + + + + + + + + + + @@ -44640,6 +45011,11 @@ + + + + + @@ -44958,166 +45334,8 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + @@ -45125,13 +45343,13 @@ - + - + @@ -45140,34 +45358,54 @@ - + - + - + - + - + - + + + + + + + - + - + - + - + - + + + + + + + + + + + + + + + @@ -45385,6 +45623,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -45405,6 +45674,173 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -45413,6 +45849,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -45780,6 +46322,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -45894,28 +46459,21 @@ - + - - - - - - - - - - - + - + + + + - + @@ -45923,6 +46481,14 @@ + + + + + + + + @@ -46052,7 +46618,7 @@ - + @@ -46383,13 +46949,13 @@ - + - + @@ -46411,23 +46977,17 @@ - - - - - - - - - + + + - - + + - - + + - + @@ -46923,7 +47483,7 @@ - + @@ -47712,23 +48272,23 @@ - + - + - + - + - + @@ -47736,7 +48296,7 @@ - + @@ -47744,7 +48304,7 @@ - + @@ -47752,21 +48312,21 @@ - + - + - + - + - + - + @@ -47777,15 +48337,7 @@ - - - - - - - - - + @@ -47793,7 +48345,7 @@ - + @@ -47801,7 +48353,7 @@ - + @@ -47809,7 +48361,7 @@ - + @@ -47817,7 +48369,7 @@ - + @@ -47828,7 +48380,7 @@ - + @@ -47836,7 +48388,7 @@ - + @@ -47942,12 +48494,7 @@ - - - - - - + @@ -48018,10 +48565,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -48120,7 +48737,7 @@ - + @@ -48344,7 +48961,7 @@ - + @@ -48484,6 +49101,82 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -48555,7 +49248,6 @@ - @@ -48668,6 +49360,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -48827,33 +49557,33 @@ - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - + @@ -48862,7 +49592,6 @@ - @@ -48939,6 +49668,14 @@ + + + + + + + + @@ -48955,7 +49692,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -48972,6 +49826,10 @@ + + + + @@ -49035,6 +49893,23 @@ + + + + + + + + + + + + + + + + + @@ -49140,7 +50015,7 @@ - + @@ -49171,6 +50046,18 @@ + + + + + + + + + + + + @@ -49182,7 +50069,7 @@ - + @@ -49190,29 +50077,29 @@ - + - + - + - + - + - + - + - + @@ -49431,7 +50318,6 @@ - @@ -49606,7 +50492,7 @@ - + @@ -49617,7 +50503,7 @@ - + @@ -50092,7 +50978,7 @@ - + @@ -50104,7 +50990,7 @@ - + @@ -50124,12 +51010,12 @@ - + - + @@ -50481,7 +51367,7 @@ - + @@ -50532,6 +51418,23 @@ + + + + + + + + + + + + + + + + + @@ -50545,31 +51448,31 @@ - - - + + + - - - + + + - - - + + + - - + + - - + + - - + + @@ -50578,50 +51481,50 @@ - - - - + + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + @@ -50629,20 +51532,20 @@ - - + + - - + + - - + + - - + + @@ -50691,7 +51594,6 @@ - @@ -51118,7 +52020,7 @@ - + @@ -51161,31 +52063,31 @@ - + - + - + - + - + - + - + - + - + @@ -51194,10 +52096,10 @@ - + - + @@ -51220,23 +52122,6 @@ - - - - - - - - - - - - - - - - - @@ -51463,6 +52348,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -51648,96 +52559,95 @@ - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -52056,7 +52966,7 @@ - + @@ -52064,7 +52974,7 @@ - + @@ -52172,7 +53082,7 @@ - + @@ -52224,7 +53134,7 @@ - + @@ -52507,50 +53417,50 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -52588,7 +53498,7 @@ - + @@ -52734,19 +53644,30 @@ - + - + - + - + + + + + + + + + + + + @@ -53090,7 +54011,7 @@ - + @@ -53103,13 +54024,13 @@ - + - + - + @@ -53127,28 +54048,28 @@ - + - + - + - + - + - + - + - + @@ -53160,23 +54081,6 @@ - - - - - - - - - - - - - - - - - @@ -53308,7 +54212,7 @@ - + @@ -53340,21 +54244,21 @@ - + - + - + - + @@ -53366,7 +54270,7 @@ - + @@ -53524,7 +54428,7 @@ - + @@ -53534,6 +54438,20 @@ + + + + + + + + + + + + + + @@ -53547,19 +54465,19 @@ - + - + - + - + - + @@ -53567,7 +54485,7 @@ - + @@ -53578,7 +54496,7 @@ - + @@ -53586,7 +54504,7 @@ - + @@ -53597,7 +54515,7 @@ - + @@ -53605,7 +54523,7 @@ - + @@ -53649,8 +54567,8 @@ - - + + @@ -53658,31 +54576,31 @@ - - + + - - - - + + + + - - + + - - - + + + - - + + - - + + @@ -53706,31 +54624,31 @@ - + - + - + - + - + - + - + - + @@ -53911,7 +54829,7 @@ - + @@ -53921,85 +54839,85 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -54067,7 +54985,7 @@ - + @@ -54077,34 +54995,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -54310,7 +55228,7 @@ - + @@ -54523,9 +55441,9 @@ - + - + @@ -54534,7 +55452,7 @@ - + @@ -54572,37 +55490,45 @@ - + - + - + - + - + - + - + + + + + + + + + - + - + - + @@ -54774,7 +55700,6 @@ - @@ -55512,7 +56437,7 @@ - + @@ -55533,7 +56458,7 @@ - + @@ -56142,12 +57067,7 @@ - - - - - - + @@ -56375,17 +57295,17 @@ - + - + - + @@ -56407,8 +57327,8 @@ - - + + @@ -57115,23 +58035,6 @@ - - - - - - - - - - - - - - - - - @@ -57144,7 +58047,7 @@ - + @@ -57207,35 +58110,35 @@ - + - + - + - + - + - + - + - + - + @@ -57642,6 +58545,10 @@ + + + + @@ -57822,6 +58729,20 @@ + + + + + + + + + + + + + + @@ -57857,7 +58778,6 @@ - @@ -57874,7 +58794,7 @@ - + @@ -57883,7 +58803,7 @@ - + @@ -57911,26 +58831,26 @@ - + - + - + - + - + - + - + @@ -58245,164 +59165,17 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - + + - - + + - + @@ -58860,8 +59633,6 @@ - - @@ -59027,23 +59798,6 @@ - - - - - - - - - - - - - - - - - @@ -59303,7 +60057,7 @@ - + @@ -59334,19 +60088,19 @@ - + - + - + - + - + @@ -59403,16 +60157,16 @@ - + - + - + - + @@ -59767,7 +60521,7 @@ - + @@ -59807,79 +60561,79 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -60051,7 +60805,7 @@ - + @@ -60386,6 +61140,23 @@ + + + + + + + + + + + + + + + + + @@ -60762,7 +61533,7 @@ - + @@ -60805,34 +61576,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -60874,34 +61645,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -60913,40 +61684,40 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -61390,7 +62161,7 @@ - + @@ -61436,22 +62207,22 @@ - + - + - + - + - + - + @@ -63120,47 +63891,47 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -63193,20 +63964,20 @@ - + - + - + - + @@ -63237,7 +64008,7 @@ - + @@ -63267,26 +64038,26 @@ - + - + - + - + - + - + @@ -63310,20 +64081,20 @@ - + - + - + - + @@ -63544,7 +64315,7 @@ - + @@ -63566,19 +64337,19 @@ - + - + - + - + - + @@ -63682,7 +64453,7 @@ - + @@ -64243,7 +65014,7 @@ - + @@ -64574,62 +65345,62 @@ - - + + - - + + - - + + - - - + + + - - - - - - - - - - - + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + @@ -64688,7 +65459,7 @@ - + @@ -64743,11 +65514,11 @@ - + - + @@ -64766,11 +65537,11 @@ - + - + @@ -64872,12 +65643,24 @@ + + + + + + + + + + + + @@ -64985,7 +65768,7 @@ - + @@ -64996,12 +65779,26 @@ - + - + + + + + + + + + + + + + + + - + @@ -65015,8 +65812,46 @@ - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -65058,7 +65893,7 @@ - + @@ -65066,24 +65901,21 @@ - - - - - - + + + - - + + - - + + - - + + - - + + @@ -65184,7 +66016,7 @@ - + @@ -65192,301 +66024,85 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - + + - - + + - - + + - - - + + + - - + + - - + + + + - + - + - + - + - + - + - + @@ -65500,7 +66116,7 @@ - + @@ -65520,7 +66136,30 @@ - + + + + + + + + + + + + + + + + + + + + + + + + @@ -65548,7 +66187,7 @@ - + @@ -65565,7 +66204,7 @@ - + @@ -65701,18 +66340,18 @@ - + - + - + - + @@ -65774,7 +66413,7 @@ - + @@ -65932,41 +66571,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -65987,29 +66591,86 @@ - - - + + + - - + + - - - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + @@ -66112,7 +66773,7 @@ - + @@ -66125,22 +66786,22 @@ - + - + - + - + - + - + @@ -66158,7 +66819,7 @@ - + @@ -66199,7 +66860,7 @@ - + @@ -66211,15 +66872,15 @@ - + - + - + - + @@ -66236,7 +66897,7 @@ - + @@ -66303,7 +66964,7 @@ - + @@ -66401,7 +67062,7 @@ - + @@ -66440,6 +67101,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -66537,19 +67236,19 @@ - - - - - - - - + - + + + + + + + + @@ -66561,7 +67260,6 @@ - @@ -66628,16 +67326,16 @@ - - + + - - + + @@ -66709,7 +67407,6 @@ - @@ -66765,6 +67462,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -66992,6 +67715,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -67070,13 +67933,13 @@ - + - + @@ -67092,39 +67955,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -67132,7 +67995,7 @@ - + @@ -67161,7 +68024,7 @@ - + @@ -67188,7 +68051,7 @@ - + @@ -67196,7 +68059,7 @@ - + @@ -67210,7 +68073,7 @@ - + @@ -67221,7 +68084,7 @@ - + @@ -67229,7 +68092,7 @@ - + @@ -67237,12 +68100,12 @@ - + - + @@ -67294,23 +68157,6 @@ - - - - - - - - - - - - - - - - - @@ -67322,6 +68168,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -67397,8 +68275,7 @@ - - + @@ -67406,7 +68283,7 @@ - + @@ -67415,28 +68292,24 @@ - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -67468,14 +68341,6 @@ - - - - - - - - @@ -67728,6 +68593,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -67797,6 +68688,115 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -67820,7 +68820,7 @@ - + @@ -67868,15 +68868,15 @@ - + - + - + @@ -67896,7 +68896,7 @@ - + @@ -67904,7 +68904,6 @@ - @@ -67916,10 +68915,6 @@ - - - - @@ -67937,18 +68932,11 @@ - - - - - - - - + @@ -67961,6 +68949,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -68024,6 +69032,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -68079,7 +69110,7 @@ - + @@ -68087,7 +69118,7 @@ - + @@ -68096,6 +69127,22 @@ + + + + + + + + + + + + + + + + @@ -68132,11 +69179,11 @@ - + - + @@ -68279,7 +69326,7 @@ - + @@ -68328,58 +69375,58 @@ - - - - + + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - - - - + + + + + + - - - - + + + + - - - - + + + + @@ -68425,54 +69472,54 @@ - + - - - - + + + + - - + + - - - + + + - - + + - - - - + + + + - - + + - - - - + + + + - - + + - - + + - - + + @@ -68502,39 +69549,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -68625,6 +69639,17 @@ + + + + + + + + + + + @@ -68633,6 +69658,17 @@ + + + + + + + + + + + @@ -68648,12 +69684,12 @@ - - - - - - + + + + + + @@ -68771,7 +69807,7 @@ - + @@ -68859,107 +69895,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -68969,17 +69904,6 @@ - - - - - - - - - - - @@ -69166,12 +70090,12 @@ - + - + - + @@ -69181,8 +70105,8 @@ - - + + @@ -69313,6 +70237,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -69542,10 +70537,10 @@ - + - + @@ -69573,7 +70568,7 @@ - + @@ -69611,7 +70606,7 @@ - + @@ -69632,7 +70627,7 @@ - + @@ -69656,7 +70651,7 @@ - + @@ -69968,7 +70963,7 @@ - + @@ -70032,7 +71027,7 @@ - + @@ -70082,6 +71077,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -70167,20 +71185,20 @@ - - - - + + + + - - - + + + - - - + + + @@ -70358,67 +71376,38 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -70663,7 +71652,7 @@ - + @@ -70772,7 +71761,7 @@ - + @@ -70781,7 +71770,7 @@ - + @@ -70800,21 +71789,21 @@ - + - + - + - + @@ -70860,7 +71849,7 @@ - + @@ -70932,7 +71921,7 @@ - + @@ -70962,7 +71951,7 @@ - + @@ -71401,7 +72390,7 @@ - + @@ -71934,159 +72923,159 @@ - + - + - + - + - + - + - + - + - + - + - + - - - - + - - - - - - - - + + - - + + - + - + - - - - + - + - + - + + + + - + - + - + - - + + + + + - + - + - - + + - - + + - - + + - - + + - - + + + + + - + - - + + - - + + - - + + - - + + + + + - + - + - + - - - - - - - - - - - - - + - + - + - + - + + + + + + + + + + + + + @@ -72146,7 +73135,7 @@ - + @@ -72304,6 +73293,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -72446,7 +73455,7 @@ - + @@ -72463,7 +73472,7 @@ - + @@ -72475,7 +73484,7 @@ - + @@ -72502,10 +73511,10 @@ - + - + @@ -72533,7 +73542,7 @@ - + @@ -72541,7 +73550,7 @@ - + @@ -72697,15 +73706,15 @@ - + - + - + @@ -72719,7 +73728,7 @@ - + @@ -72756,7 +73765,7 @@ - + @@ -72780,15 +73789,15 @@ - + - + - + @@ -72923,7 +73932,7 @@ - + @@ -72944,12 +73953,12 @@ - + - + @@ -72966,7 +73975,7 @@ - + @@ -72980,6 +73989,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -73025,26 +74087,26 @@ - - + + - - + + - - + + - + @@ -73353,7 +74415,7 @@ - + @@ -73540,230 +74602,242 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + @@ -73902,164 +74976,186 @@ - + - + + + + - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + @@ -74109,13 +75205,13 @@ - + - + - + @@ -74226,8 +75322,8 @@ - - + + @@ -74235,21 +75331,21 @@ - + - + - + - + @@ -74294,7 +75390,7 @@ - + @@ -74517,7 +75613,7 @@ - + @@ -74536,43 +75632,43 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -74604,7 +75700,7 @@ - + @@ -74632,52 +75728,52 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -75053,13 +76149,13 @@ - + - + - + @@ -75104,7 +76200,7 @@ - + @@ -75185,7 +76281,7 @@ - + @@ -75277,10 +76373,10 @@ - + - + @@ -75411,7 +76507,7 @@ - + @@ -75419,7 +76515,7 @@ - + @@ -75503,7 +76599,7 @@ - + @@ -76006,25 +77102,17 @@ - + - + - + - - + + - - - - - - - - @@ -77014,27 +78102,39 @@ - + - + - + - + - + - + - + + + + + + + + + + + + + - + @@ -77390,6 +78490,23 @@ + + + + + + + + + + + + + + + + + @@ -77416,7 +78533,7 @@ - + @@ -77432,113 +78549,125 @@ - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -77758,14 +78887,14 @@ - - + + - - + + @@ -77781,7 +78910,7 @@ - + @@ -77877,16 +79006,16 @@ - + - + - + @@ -77913,7 +79042,7 @@ - + @@ -77921,7 +79050,7 @@ - + @@ -77929,7 +79058,7 @@ - + @@ -77943,7 +79072,7 @@ - + @@ -77959,7 +79088,7 @@ - + @@ -77998,65 +79127,27 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - + @@ -78102,6 +79193,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -78130,6 +79247,23 @@ + + + + + + + + + + + + + + + + + @@ -78246,7 +79380,7 @@ - + @@ -78258,7 +79392,7 @@ - + @@ -78289,7 +79423,7 @@ - + @@ -78301,7 +79435,7 @@ - + @@ -78352,10 +79486,10 @@ - + - + @@ -78384,7 +79518,7 @@ - + @@ -78392,7 +79526,7 @@ - + @@ -78416,6 +79550,17 @@ + + + + + + + + + + + @@ -78560,13 +79705,13 @@ - + - + @@ -78577,8 +79722,8 @@ - - + + @@ -78992,8 +80137,8 @@ - - + + @@ -79297,7 +80442,7 @@ - + @@ -79374,7 +80519,7 @@ - + @@ -79396,7 +80541,7 @@ - + @@ -79404,7 +80549,7 @@ - + @@ -79415,18 +80560,18 @@ - + - + - + - + @@ -79522,7 +80667,7 @@ - + @@ -79716,7 +80861,7 @@ - + @@ -80220,21 +81365,21 @@ - + - + - - + + - - + + @@ -81418,7 +82563,7 @@ - + @@ -81645,7 +82790,7 @@ - + @@ -81653,7 +82798,7 @@ - + @@ -81661,7 +82806,7 @@ - + @@ -81669,7 +82814,7 @@ - + @@ -81677,7 +82822,7 @@ - + @@ -81685,7 +82830,7 @@ - + @@ -81696,7 +82841,7 @@ - + @@ -81704,7 +82849,7 @@ - + @@ -81712,7 +82857,7 @@ - + @@ -81720,23 +82865,23 @@ - + - + - + - + - + @@ -81744,7 +82889,7 @@ - + @@ -81752,7 +82897,7 @@ - + @@ -81760,7 +82905,7 @@ - + @@ -81768,7 +82913,7 @@ - + @@ -81776,10 +82921,10 @@ - + - + @@ -81793,15 +82938,15 @@ - + - + - + - + @@ -81809,15 +82954,15 @@ - + - + - + @@ -81825,7 +82970,7 @@ - + @@ -81907,10 +83052,10 @@ - - - - + + + + @@ -81974,7 +83119,7 @@ - + @@ -82186,24 +83331,7 @@ - - - - - - - - - - - - - - - - - - + @@ -82217,7 +83345,7 @@ - + @@ -82225,7 +83353,7 @@ - + @@ -82311,6 +83439,17 @@ + + + + + + + + + + + @@ -82337,6 +83476,7 @@ + @@ -82357,8 +83497,8 @@ - - + + @@ -82439,12 +83579,12 @@ - - + + - - + + @@ -82461,6 +83601,14 @@ + + + + + + + + @@ -82505,6 +83653,17 @@ + + + + + + + + + + + @@ -82575,11 +83734,32 @@ + + + + + + + + + + + + + + + + + + + + + @@ -82623,7 +83803,7 @@ - + @@ -82635,7 +83815,7 @@ - + @@ -82653,7 +83833,7 @@ - + @@ -82693,6 +83873,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -82751,6 +84031,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -82792,6 +84143,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -82931,6 +84310,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -82944,7 +84388,7 @@ - + @@ -82986,7 +84430,7 @@ - + @@ -83114,7 +84558,7 @@ - + @@ -83168,7 +84612,7 @@ - + @@ -83450,7 +84894,7 @@ - + @@ -83732,6 +85176,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -83758,28 +85222,34 @@ - + - + - + - - + + + + - - + + - - - - + + - - + + - + + + + + + + @@ -83837,43 +85307,8 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + @@ -83999,41 +85434,41 @@ - - - + + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - - + + + - - - + + + - + @@ -84085,6 +85520,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -84099,70 +85620,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -84173,34 +85630,18 @@ - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - + @@ -84222,7 +85663,24 @@ - + + + + + + + + + + + + + + + + + + @@ -84254,7 +85712,7 @@ - + @@ -84300,238 +85758,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -84539,23 +85766,6 @@ - - - - - - - - - - - - - - - - - @@ -84568,17 +85778,6 @@ - - - - - - - - - - - @@ -84681,7 +85880,6 @@ - @@ -84763,7 +85961,7 @@ - + @@ -84777,6 +85975,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -84786,107 +86007,115 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + - + - + + + + @@ -84910,71 +86139,63 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - + @@ -84985,7 +86206,7 @@ - + @@ -84993,7 +86214,7 @@ - + @@ -85001,7 +86222,24 @@ - + + + + + + + + + + + + + + + + + + @@ -85038,14 +86276,14 @@ - - - + + + - - - + + + @@ -85124,50 +86362,50 @@ - + - - - - + + + + - - - - - - + + + + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - - - + + + + @@ -85218,17 +86456,17 @@ - + - + - + - + @@ -85261,127 +86499,127 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -85415,22 +86653,22 @@ - + - + - + - + - + - + @@ -85881,23 +87119,38 @@ - + - + - + - + + + + + + + + + + - + + + + + + + - + @@ -85972,8 +87225,8 @@ - - + + @@ -85984,13 +87237,6 @@ - - - - - - - @@ -86116,56 +87362,56 @@ - - - + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - + + - - - - + + + + - - - + + + @@ -86310,6 +87556,13 @@ + + + + + + + @@ -86359,6 +87612,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -86380,10 +87665,10 @@ - + - + @@ -86391,17 +87676,35 @@ - - - + + + - - + + - - + + - + + + + + + + + + + + + + + + + + + + @@ -86412,11 +87715,11 @@ - - - - - + + + + + @@ -86492,6 +87795,14 @@ + + + + + + + + @@ -86499,7 +87810,6 @@ - @@ -86645,7 +87955,7 @@ - + @@ -86704,7 +88014,7 @@ - + @@ -86712,15 +88022,15 @@ - + - + - + - - + + @@ -87111,7 +88421,12 @@ - + + + + + + @@ -87125,7 +88440,7 @@ - + @@ -87139,20 +88454,9 @@ - + - - - - - - - - - - - - + @@ -87174,14 +88478,20 @@ - - - + + + - - + + - + + + + + + + @@ -87233,7 +88543,7 @@ - + @@ -87241,7 +88551,7 @@ - + @@ -87402,15 +88712,15 @@ - + - + - + @@ -87418,15 +88728,15 @@ - + - + - + @@ -87434,7 +88744,7 @@ - + @@ -87442,7 +88752,15 @@ - + + + + + + + + + @@ -87450,7 +88768,7 @@ - + @@ -87461,7 +88779,7 @@ - + @@ -87472,41 +88790,41 @@ - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - + @@ -87514,7 +88832,7 @@ - + @@ -87555,7 +88873,7 @@ - + @@ -87567,7 +88885,7 @@ - + @@ -87679,7 +88997,7 @@ - + @@ -87698,21 +89016,21 @@ - + - + - + @@ -87837,6 +89155,14 @@ + + + + + + + + @@ -87870,6 +89196,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -87896,6 +89248,14 @@ + + + + + + + + @@ -87945,11 +89305,11 @@ - + - + @@ -87992,18 +89352,7 @@ - - - - - - - - - - - - + @@ -88011,7 +89360,7 @@ - + @@ -88100,29 +89449,18 @@ - + - + - + - + - - - - - - - - - - - - + @@ -88157,9 +89495,9 @@ - + - + @@ -88185,7 +89523,7 @@ - + @@ -88199,23 +89537,6 @@ - - - - - - - - - - - - - - - - - @@ -88282,10 +89603,10 @@ - + - + @@ -88300,7 +89621,7 @@ - + @@ -88398,7 +89719,7 @@ - + @@ -88604,6 +89925,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -88611,40 +89988,15 @@ - - - - - - - - - - - - - - - - - - - - + - - - - + - - - - - + + - - + + @@ -88783,7 +90135,7 @@ - + @@ -88809,7 +90161,7 @@ - + @@ -88862,7 +90214,7 @@ - + @@ -88874,7 +90226,6 @@ - @@ -88927,7 +90278,7 @@ - + @@ -89090,64 +90441,64 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -89156,7 +90507,7 @@ - + @@ -90822,10 +92173,10 @@ - + - + @@ -91701,7 +93052,7 @@ - + @@ -91713,7 +93064,7 @@ - + @@ -93007,9 +94358,9 @@ - + - + @@ -93018,7 +94369,7 @@ - + @@ -93026,23 +94377,23 @@ - + - + - + - + - + @@ -93050,7 +94401,7 @@ - + @@ -93058,7 +94409,7 @@ - + @@ -93066,7 +94417,7 @@ - + @@ -93074,7 +94425,7 @@ - + @@ -93662,7 +95013,7 @@ - + @@ -93670,7 +95021,7 @@ - + @@ -93686,9 +95037,9 @@ - - - + + + @@ -93757,45 +95108,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -93809,12 +95160,12 @@ - + - + @@ -93825,7 +95176,7 @@ - + @@ -93839,7 +95190,7 @@ - + @@ -93853,12 +95204,7 @@ - - - - - - + @@ -93872,87 +95218,73 @@ - + - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -93966,7 +95298,7 @@ - + @@ -93974,7 +95306,7 @@ - + @@ -93999,7 +95331,7 @@ - + @@ -94030,7 +95362,7 @@ - + @@ -94047,7 +95379,7 @@ - + @@ -94064,7 +95396,7 @@ - + @@ -94075,23 +95407,7 @@ - - - - - - - - - - - - - - - - - + @@ -94099,7 +95415,7 @@ - + @@ -94107,7 +95423,7 @@ - + @@ -94115,7 +95431,7 @@ - + @@ -94129,7 +95445,7 @@ - + @@ -94137,7 +95453,7 @@ - + @@ -94148,7 +95464,7 @@ - + @@ -94173,7 +95489,7 @@ - + @@ -94250,6 +95566,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -94400,7 +95742,7 @@ - + @@ -94435,31 +95777,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - @@ -95111,38 +96428,38 @@ - + - + - + - + - + - + - + - + - + - + @@ -95175,9 +96492,9 @@ - - - + + + @@ -95302,6 +96619,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -95320,31 +96672,31 @@ - + - + - + - + - + - + - + @@ -95355,7 +96707,7 @@ - + @@ -95366,7 +96718,7 @@ - + @@ -95383,7 +96735,7 @@ - + @@ -95400,7 +96752,7 @@ - + @@ -95409,24 +96761,24 @@ - + - + - + - + - + - + @@ -95434,7 +96786,7 @@ - + @@ -95442,7 +96794,7 @@ - + @@ -95450,7 +96802,7 @@ - + @@ -95473,7 +96825,7 @@ - + @@ -95551,20 +96903,12 @@ - + - - - - - - - - @@ -95785,7 +97129,7 @@ - + @@ -95819,13 +97163,13 @@ - + - + - + @@ -96296,76 +97640,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -96458,7 +97732,7 @@ - + @@ -96635,7 +97909,7 @@ - + @@ -97202,7 +98476,7 @@ - + @@ -97317,10 +98591,10 @@ - + - + @@ -97402,10 +98676,10 @@ - + - + @@ -97424,10 +98698,10 @@ - + - + @@ -98282,7 +99556,7 @@ - + @@ -99203,7 +100477,7 @@ - + @@ -99232,7 +100506,7 @@ - + @@ -99240,7 +100514,7 @@ - + @@ -99248,23 +100522,23 @@ - + - + - + - + - + @@ -99539,7 +100813,7 @@ - + @@ -99919,7 +101193,7 @@ - + @@ -100019,6 +101293,6 @@ diff --git a/android/abi_gki_aarch64_cuttlefish b/android/abi_gki_aarch64_cuttlefish index d8dd477a8d31895cf182ce8e8597d5ff9c0aec54..01a2267410a866739fc9f8c4db5874ba3caf9d28 100644 --- a/android/abi_gki_aarch64_cuttlefish +++ b/android/abi_gki_aarch64_cuttlefish @@ -1,4 +1,4 @@ -[abi_whitelist] +[abi_symbol_list] # commonly used symbols add_wait_queue alloc_etherdev_mqs diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 6cc4d398a8f60b1fde5e5712110de2fd038d3af9..88d3768d176df80ae2c0a8e8b09d8ee66c6a917d 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1,4 +1,4 @@ -[abi_whitelist] +[abi_symbol_list] # commonly used symbols add_timer add_uevent_var @@ -1571,6 +1571,7 @@ set_page_dirty_lock sg_alloc_table_from_pages sysfs_remove_files + __tracepoint_gpu_mem_total unmapped_area_topdown unregister_shrinker vm_insert_page @@ -1910,6 +1911,7 @@ cpuidle_unregister_driver cpu_pm_enter cpu_pm_exit + get_next_event_cpu param_get_bool param_get_uint pending_ipi @@ -2691,6 +2693,7 @@ __usb_create_hcd usb_disabled usb_hcd_is_primary_hcd + usb_hcd_platform_shutdown usb_put_hcd usb_remove_hcd xhci_gen_setup diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index eb09d5aee9106c04da0635bc036c5fa98561bec8..0bba9e991189d37f93ebe759303ee3cc883c2624 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -507,10 +507,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) } #endif -#define ioread16be(p) be16_to_cpu(ioread16(p)) -#define ioread32be(p) be32_to_cpu(ioread32(p)) -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) +#define ioread16be(p) swab16(ioread16(p)) +#define ioread32be(p) swab32(ioread32(p)) +#define iowrite16be(v,p) iowrite16(swab16(v), (p)) +#define iowrite32be(v,p) iowrite32(swab32(v), (p)) #define inb_p inb #define inw_p inw diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index d131c54acd3ec07e43499d1cc46522281b6edbf2..f6b6e3c9ca8aa78da11358e322ed274a3e1c51f0 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -83,6 +83,8 @@ arcpct: pct { compatible = "snps,archs-pct"; + interrupt-parent = <&cpu_intc>; + interrupts = <20>; }; /* TIMER0 with interrupt for clockevent */ @@ -173,7 +175,7 @@ reg = <0x8000 0x2000>; interrupts = <10>; interrupt-names = "macirq"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; snps,pbl = <32>; snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; @@ -191,7 +193,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; - phy0: ethernet-phy@0 { + phy0: ethernet-phy@0 { /* Micrel KSZ9031 */ reg = <0>; ti,rx-internal-delay = ; ti,tx-internal-delay = ; diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index 4f6a1673b3a6eaacc80473108dec4cc8c1e4236c..ddfca2c3357a0bc4b2effb6181c74a7238ad666c 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -43,7 +43,6 @@ #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 1f5a5ffe7fcf84b5da64bc74747c50384f5bfeea..c762004572eff13b1de14e8ba5aea1f35d86ef6f 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -120,7 +120,7 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. -KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ +KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) @@ -166,7 +166,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S # The .data section is already discarded by the linker script so no need # to bother about it here. check_for_bad_syms = \ -bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ +bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ [ -z "$$bad_syms" ] || \ ( echo "following symbols must have non local/private scope:" >&2; \ echo "$$bad_syms" >&2; rm -f $@; false ) diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index e35398cc60a0602b3b6b175ae37134ae2641adc9..dd71ab08136bebdb7afcb656ab2e7ea132bfe23a 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -217,7 +217,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 273a3160457988d4396d2e5bfb8e7204d21ab795..b395cb195db21babdca16df5cd5ea996c1adbcc3 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -274,7 +274,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index a678fb7c9e3b28a2635de242968de79f72c246d4..c91716d5980c38de8938983db3459bb2a9f4e7eb 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -445,7 +445,7 @@ }; spi@18029200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18029200 0x184>, <0x18029000 0x124>, <0x1811b408 0x004>, diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi index 3e39b9a1f35d02b9d87aa9d0b9840a65b75157bc..0093548d50ff8075b513f2c6a5871b583afc6951 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi @@ -55,6 +55,8 @@ &mcbsp2 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; }; &charger { diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 86c5644f558cb248f8083103305c9c71308ed334..032e8dde138178abf73821e33c52fbd5966b8e51 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -84,6 +84,8 @@ }; &mcbsp2 { + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; status = "okay"; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 074b4ec520c638a8f5b1e5c63adf5825b150b247..b66b2bd1aa856d64075b14f940d5f3275310a1fb 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -168,7 +168,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0x0 0x1550000 0x0 0x10000>, - <0x0 0x40000000 0x0 0x40000000>; + <0x0 0x40000000 0x0 0x20000000>; reg-names = "QuadSPI", "QuadSPI-memory"; interrupts = ; clock-names = "qspi_en", "qspi"; @@ -609,7 +609,7 @@ fsl,tmr-prsc = <2>; fsl,tmr-add = <0xaaaaaaab>; fsl,tmr-fiper1 = <999999995>; - fsl,tmr-fiper2 = <99990>; + fsl,tmr-fiper2 = <999999995>; fsl,max-adj = <499999999>; }; diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index 6b2f3a4fd13d646c35e48fc46a7efa31ab42035e..7802ce842a7360a354726a57250bccd9877b858d 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts @@ -339,7 +339,7 @@ reg = <0x20>; remote = <&vin1>; - port { + ports { #address-cells = <1>; #size-cells = <0>; @@ -399,7 +399,7 @@ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; default-input = <0>; - port { + ports { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index ba5657574d9bb43dadecb26dcbee350f2a6501c1..4b1c8bec2de35af290ae8ade518eecfa4d9f05c4 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -791,7 +791,7 @@ timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; - reg = <0xffd01000 0x100>; + reg = <0xffd00100 0x100>; clocks = <&l4_sys_free_clk>; clock-names = "timer"; }; diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index d392794d9c139f139fecfa6938c3ac5cdb69bed5..de81e8b4afde9179707ed1e65bb8889166ec586a 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -532,7 +532,7 @@ }; ocotp: ocotp@400a5000 { - compatible = "fsl,vf610-ocotp"; + compatible = "fsl,vf610-ocotp", "syscon"; reg = <0x400a5000 0x1000>; clocks = <&clks VF610_CLK_OCOTP>; }; diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 7d2ca035d6c8fe58235e2b0ad3e3330c8efcf0c1..11d4ff9f3e4dff3e0a343fbae0d132da19537177 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -216,7 +216,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } @@ -248,16 +248,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_IL; } -static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; } -static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) { return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c9128bb187f9ad051dfe448b7d6f0d35b0d7c1e3..471859cbfe0bbe5462db401f621f4c7f71288877 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -234,7 +234,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool blockable); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index a89b4076cde4769db52d0ec3fbc83aed9346361e..72821b4721addd78e917b607e6b52b7377909816 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 1d5fbf1d1c675770dde85c75912b692827375e1c..8a8470d36c6596487ee93f48122a81d1a6f2a3a1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -688,6 +688,12 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -747,16 +753,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!is_default_overflow_handler(wp)) + goto unlock; +step: + enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); } diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index a56e7c856ab5648995888ae5f47c5d1ab23b08bf..d23ab9ec130a34afb9b4e8941dd9f47df760772b 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -21,6 +21,19 @@ * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * + * When compiled with clang, pc and sp are not pushed. A simple function + * prologue looks like this when built with clang: + * + * stmdb {..., fp, lr} + * add fp, sp, #x + * sub sp, sp, #y + * + * A simple function epilogue looks like this when built with clang: + * + * sub sp, fp, #x + * ldm {..., fp, pc} + * + * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ @@ -33,6 +46,16 @@ int notrace unwind_frame(struct stackframe *frame) low = frame->sp; high = ALIGN(low, THREAD_SIZE); +#ifdef CONFIG_CC_IS_CLANG + /* check current frame pointer is within bounds */ + if (fp < low + 4 || fp > high - 4) + return -EINVAL; + + frame->sp = frame->fp; + frame->fp = *(unsigned long *)(fp); + frame->pc = frame->lr; + frame->lr = *(unsigned long *)(fp + 4); +#else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) return -EINVAL; @@ -41,6 +64,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->fp = *(unsigned long *)(fp - 12); frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); +#endif return 0; } @@ -91,6 +115,8 @@ static int save_trace(struct stackframe *frame, void *d) return 0; regs = (struct pt_regs *)frame->sp; + if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) + return 0; trace->entries[trace->nr_entries++] = regs->ARM_pc; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 027bbd5060756620d8f9048ba4d97ab8a5d3037e..8e01443becc296ac609566feb6de18c7e94bf4e7 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -68,14 +68,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { + unsigned long end = frame + 4 + sizeof(struct pt_regs); + #ifdef CONFIG_KALLSYMS printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif - if (in_entry_text(from)) - dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) + dump_mem("", "Exception stack", frame + 4, end); } void dump_backtrace_stm(u32 *stack, u32 instruction) diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index e2e4df3d11e53dfc38d8456340efc6d109c417a6..21bfe9b6e16a1ef93eda6377b595f8c71bd4c3c9 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -542,13 +542,13 @@ static void __init at91_pm_sram_init(void) sram_pool = gen_pool_get(&pdev->dev, NULL); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); - return; + goto out_put_device; } sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); if (!sram_base) { pr_warn("%s: unable to alloc sram!\n", __func__); - return; + goto out_put_device; } sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); @@ -556,12 +556,17 @@ static void __init at91_pm_sram_init(void) at91_pm_suspend_in_sram_sz, false); if (!at91_suspend_sram_fn) { pr_warn("SRAM: Could not map\n"); - return; + goto out_put_device; } /* Copy the pm suspend handler to SRAM */ at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); + return; + +out_put_device: + put_device(&pdev->dev); + return; } static bool __init at91_is_pm_mode_active(int pm_mode) diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c index d4866788702cb87a98ccf144a073b2bd2d03265d..b782294ee30bc2ecba40c9e475c822e3da1cee2b 100644 --- a/arch/arm/mach-socfpga/pm.c +++ b/arch/arm/mach-socfpga/pm.c @@ -60,14 +60,14 @@ static int socfpga_setup_ocram_self_refresh(void) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -78,7 +78,7 @@ static int socfpga_setup_ocram_self_refresh(void) if (!suspend_ocram_base) { pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } /* Copy the code that puts DDR in self refresh to ocram */ @@ -92,6 +92,8 @@ static int socfpga_setup_ocram_self_refresh(void) if (!socfpga_sdram_self_refresh_in_ocram) ret = -EFAULT; +put_device: + put_device(&pdev->dev); put_node: of_node_put(np); diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index f4efff9d3afbb68e6ae9d09f0b4cd3538bb66f63..1f5ec9741e6d4af568e93f1193a34844fa882ad8 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -10,12 +10,13 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector ccflags-y += -DDISABLE_BRANCH_PROFILING -VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 -VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 -VDSO_LDFLAGS += -nostdlib -shared -VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) -VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) -VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd) +ldflags-$(CONFIG_CPU_ENDIAN_BE8) := --be8 +ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ + -z max-page-size=4096 -z common-page-size=4096 \ + -nostdlib -shared $(ldflags-y) \ + $(call ld-option, --hash-style=sysv) \ + $(call ld-option, --build-id) \ + -T obj-$(CONFIG_VDSO) += vdso.o extra-$(CONFIG_VDSO) += vdso.lds @@ -37,8 +38,8 @@ KCOV_INSTRUMENT := n $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file -$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) +$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,ld) $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE $(call if_changed,vdsomunge) @@ -48,11 +49,6 @@ $(obj)/%.so: OBJCOPYFLAGS := -S $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) -# Actual build commands -quiet_cmd_vdsold = VDSO $@ - cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \ - -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ - quiet_cmd_vdsomunge = MUNGE $@ cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@ diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index faa017d4cd56b3960b33b0469590e9876c71c613..636bab51de38d4e5b9348db84be5e11ec4187657 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -151,6 +151,7 @@ }; &qspi { + status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index ea854f689fda89fe8c19526ce99c7724e41c0bd5..6bfb7bbd264afd132626db3810ca94a687743e18 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -745,7 +745,7 @@ }; qspi: spi@66470200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; reg = <0x66470200 0x184>, <0x66470000 0x124>, <0x67017408 0x004>, diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 00dd89b92b427382c497a3ea58ea616193ee174c..d991eae5202f287174a7a4dbabf3d7bc8f60a280 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -152,6 +152,7 @@ regulator-min-microvolt = <700000>; regulator-max-microvolt = <1150000>; regulator-enable-ramp-delay = <125>; + regulator-always-on; }; ldo8_reg: LDO8 { diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index c98bcbc8dfba33386f5cf7d66fcc8a6c9480d817..53848e0e5e0c6fcbccbbfe74867369205c0869f6 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -530,6 +530,17 @@ status = "ok"; compatible = "adi,adv7533"; reg = <0x39>; + adi,dsi-lanes = <4>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + }; + port@1 { + reg = <1>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index e80a792827edbfb6f4681f6726489d7eebcb90be..60568392d21ebf138effd170e310d69ed485ca66 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -515,7 +515,7 @@ reg = <0x39>; interrupt-parent = <&gpio1>; interrupts = <1 2>; - pd-gpio = <&gpio0 4 0>; + pd-gpios = <&gpio0 4 0>; adi,dsi-lanes = <4>; #sound-dai-cells = <0>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts index 3ab25ad402b90c8166883b2fb0d2dbbb727be27a..6cbdd66921aab44f5b839d0c1098124c1664a874 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts @@ -19,6 +19,12 @@ model = "Globalscale Marvell ESPRESSOBin Board"; compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710"; + aliases { + ethernet0 = ð0; + serial0 = &uart0; + serial1 = &uart1; + }; + chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi index 390a2fa285145ff78f097e342e83a85e4862aa26..6754817658fa4f383779732ac4d6cdec7d1db5d6 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi @@ -516,7 +516,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; cdc_pdm_lines_sus: pdm_lines_off { @@ -529,7 +529,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <2>; - bias-disable; + bias-pull-down; }; }; }; @@ -545,7 +545,7 @@ pins = "gpio113", "gpio114", "gpio115", "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -573,7 +573,7 @@ pinconf { pins = "gpio110"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -599,7 +599,7 @@ pinconf { pins = "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_mclk_tlmm_lines_sus: mclk_lines_off { @@ -627,7 +627,7 @@ pins = "gpio112", "gpio117", "gpio118", "gpio119"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_sec_tlmm_lines_sus: tlmm_lines_off { diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index 1315972412df39e5268a91299b0f4226a380b431..23098c13ad83b33dc4af2deeb7c174b6258aa3b9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -159,7 +159,7 @@ pinctrl-0 = <&rgmii_pins>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>; tx_delay = <0x10>; rx_delay = <0x10>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 0130b9f98c9deefca654794f941e7dec85e655c0..b155f657292bdbd06f19a59fa61cfa9ff553c810 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -101,7 +101,7 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; @@ -156,7 +156,7 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; tx_delay = <0x10>; diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index a40178d482e958813c05a16d1f00a791f676cdc2..d79a0e2287aef3ffb320a541f6089ad93ea947f6 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -492,7 +492,7 @@ CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y -CONFIG_INIT_STACK_ALL=y +CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_CRYPTO_ADIANTUM=y CONFIG_CRYPTO_LZ4=y diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index d4ce5a0aa9fa16ab3b3861586708ce0d1f2e0107..8cb6bfe1ef507a8c4767b8212a96f92fd10befbb 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -17,11 +17,11 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y @@ -41,6 +41,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_FHANDLE is not set CONFIG_KALLSYMS_ALL=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set @@ -67,7 +68,6 @@ CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set @@ -95,7 +95,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y @@ -119,6 +118,7 @@ CONFIG_CMA_AREAS=8 CONFIG_ZSMALLOC=y CONFIG_BALANCE_ANON_FILE_RECLAIM=y CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -264,6 +264,7 @@ CONFIG_NET_ACT_SKBEDIT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y CONFIG_SOCKEV_NLMCAST=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y @@ -276,6 +277,7 @@ CONFIG_RFKILL=y CONFIG_NFC_NQ=y CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set CONFIG_REGMAP_WCD_IRQ=y CONFIG_DMA_CMA=y CONFIG_MTD=m @@ -317,6 +319,7 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_SKY2=y CONFIG_RMNET=y CONFIG_SMSC911X=y @@ -342,6 +345,7 @@ CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set # CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set @@ -456,9 +460,9 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y -CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y @@ -524,7 +528,10 @@ CONFIG_ION_POOL_AUTO_REFILL=y CONFIG_QPNP_REVID=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y CONFIG_MSM_11AD=m CONFIG_USB_BAM=y CONFIG_MDSS_PLL=y @@ -628,11 +635,13 @@ CONFIG_QCOM_L2_COUNTERS=y CONFIG_RAS=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y CONFIG_QCOM_QFPROM=y CONFIG_NVMEM_SPMI_SDAM=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_SENSORS_SSC=y CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -669,7 +678,6 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCE=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index b4ea97330263c7e69c365ac38d03aa3903e362c3..52a871b84b8e424d85a7a6294d366c576b63e5b1 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -16,12 +16,12 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y CONFIG_DEBUG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y @@ -42,6 +42,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_FHANDLE is not set CONFIG_KALLSYMS_ALL=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB_FREELIST_RANDOM=y @@ -67,7 +68,6 @@ CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set @@ -97,7 +97,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y @@ -125,6 +124,7 @@ CONFIG_CMA_AREAS=8 CONFIG_ZSMALLOC=y CONFIG_BALANCE_ANON_FILE_RECLAIM=y CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -271,6 +271,7 @@ CONFIG_NET_ACT_SKBEDIT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y CONFIG_SOCKEV_NLMCAST=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y @@ -284,6 +285,7 @@ CONFIG_RFKILL=y CONFIG_NFC_NQ=y CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set CONFIG_REGMAP_WCD_IRQ=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y @@ -327,6 +329,7 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_RMNET=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y @@ -350,6 +353,7 @@ CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set # CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set @@ -485,9 +489,9 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y -CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y @@ -558,7 +562,10 @@ CONFIG_ION_POOL_AUTO_REFILL=y CONFIG_QPNP_REVID=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y CONFIG_MSM_11AD=m CONFIG_USB_BAM=y CONFIG_MDSS_PLL=y @@ -667,11 +674,13 @@ CONFIG_QCOM_L2_COUNTERS=y CONFIG_RAS=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y CONFIG_QCOM_QFPROM=y CONFIG_NVMEM_SPMI_SDAM=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_SENSORS_SSC=y CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -710,7 +719,6 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCE=y diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 0b6f5a7d4027c56fcd868e3aecc0f93ef1f10432..fd11e0d70e446d5fbbc26e6dc464ac95b6ac5d3a 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -30,16 +30,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 8b284cbf8162ff14e305dfbff89b86a41683d3f5..a3b6f58d188c949d5627abc516c09163ad43e91e 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -83,11 +83,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 102b5a5c47b6cb4a00040e7efb295d357b20c8a3..e3c0dba5bddea7f772336dc2c19976741bad82f6 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -87,6 +87,34 @@ extern u32 __init_stage2_translation(void); *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp @@ -111,6 +139,21 @@ extern u32 __init_stage2_translation(void); kern_hyp_va \vcpu .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 778cb4f868d9b6e839ae374ce2b09335371bdd43..669c960dd069ca2f2592f5d3ccdcde88cd380801 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -303,7 +303,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } @@ -311,7 +311,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ + kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */ } static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) @@ -340,6 +340,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e9afdfcb8403cf5d5f45011e5b5cc7f228111d59..5e720742d647959fb1bf47e160bd08607b5af5ff 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -370,7 +370,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool blockable); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index eb16aaba095d73ed4452d73832a28ad6279a7da6..cb6635fa994d1dae32592acc2d5fa8e7713826cb 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -155,11 +155,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), - /* Linux doesn't care about the EL3 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_END, }; @@ -301,7 +300,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = { }; static const struct arm64_ftr_bits ftr_id_dfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + /* [31:28] TraceFilt */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), @@ -671,9 +670,6 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); - /* - * EL3 is not our concern. - */ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 9c5c5d6ccb07a779b9664ca3f4993103da599fa4..8fddb291c0e2cd5693ba6900d5e7eb3614559245 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -22,16 +22,21 @@ endif CC_COMPAT ?= $(CC) CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) + +ifeq ($(LLVM),1) +LD_COMPAT ?= $(LD) +else +LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld +endif else CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc +LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld endif cc32-option = $(call try-run,\ $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-disable-warning = $(call try-run,\ $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) -cc32-ldoption = $(call try-run,\ - $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-as-instr = $(call try-run,\ printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) @@ -114,14 +119,10 @@ dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) VDSO_CFLAGS += $(dmbinstr) VDSO_AFLAGS += $(dmbinstr) -VDSO_LDFLAGS := $(VDSO_CPPFLAGS) # From arm vDSO Makefile -VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 -VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 -VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft -VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--hash-style=sysv) -VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--build-id) -VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd) +VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1 +VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096 +VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id # Borrow vdsomunge.c from the arm vDSO @@ -182,8 +183,8 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) quiet_cmd_vdsold = LD32 $@ - cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ - -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ + cmd_vdsold = $(LD_COMPAT) $(VDSO_LDFLAGS) \ + -T $(filter %.lds,$^) $(filter %.o,$^) -o $@ quiet_cmd_vdsocc = CC32 $@ cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< quiet_cmd_vdsocc_gettimeofday = CC32 $@ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index fd1c65b901dbc879164538425037c4a614a91fab..86a57e2fa9af8b093cf211173c942e500295891b 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -24,6 +24,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -39,6 +46,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 2cee0c3f61eb65b288d9020a250b45469e2e3124..4343d708ce5da3a976cc2455059e6a5dbb9f61d1 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -17,6 +17,7 @@ #include +#include #include #include #include @@ -69,6 +70,20 @@ ENTRY(__guest_enter) // Store the host regs save_callee_saved_regs x1 + // Now the host state is stored if we have a pending RAS SError it must + // affect the host. If any asynchronous exception is pending we defer + // the guest entry. The DSB isn't necessary before v8.2 as any SError + // would be fatal. +alternative_if ARM64_HAS_RAS_EXTN + dsb nshst + isb +alternative_else_nop_endif + mrs x1, isr_el1 + cbz x1, 1f + mov x0, #ARM_EXCEPTION_IRQ + ret + +1: add x29, x0, #VCPU_CONTEXT // Restore guest regs x0-x17 @@ -151,18 +166,22 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: + msr daifset, #4 // Mask aborts + ret + + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 24b4fbafe3e4ac9f9c30aaa2da04c16a798bf9ff..ea063312bca18d1c7d3373844f5618fb83a62337 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -26,6 +26,30 @@ #include #include +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .pushsection .hyp.text, "ax" @@ -162,28 +186,24 @@ el1_error: mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit +el2_sync: + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + + eret + el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret ENTRY(__hyp_do_panic) @@ -212,7 +232,6 @@ ENDPROC(\label) invalid_vector el2t_irq_invalid invalid_vector el2t_fiq_invalid invalid_vector el2t_error_invalid - invalid_vector el2h_sync_invalid invalid_vector el2h_irq_invalid invalid_vector el2h_fiq_invalid invalid_vector el1_fiq_invalid @@ -240,7 +259,7 @@ ENTRY(__kvm_hyp_vector) invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t - invalid_vect el2h_sync_invalid // Synchronous EL2h + valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f3978931aaf4076028a6411cf286479e70c08570..15312e429b7d1b4d63b1286b4ab699152a2ce6c7 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -34,6 +35,9 @@ #include #include +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -264,10 +268,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = 1; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & 1)) @@ -426,7 +430,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_dabt_isextabt(vcpu) && - !kvm_vcpu_dabt_iss1tw(vcpu); + !kvm_vcpu_abt_iss1tw(vcpu); if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); @@ -626,7 +630,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, * making sure it is a kernel address and not a PC-relative * reference. */ - asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); + asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); __hyp_do_panic(str_va, spsr, elr, @@ -663,3 +667,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) unreachable(); } + +asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state); + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 79e5cc70f1fddbda61199f449b3b2c32732330e4..561e2573bd34c53fef4ace83ebb3eb82063d262b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -499,7 +499,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMMAP_EARLY, NULL); + MEMINIT_EARLY, NULL); return 0; } @@ -508,8 +508,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, - NULL); + memmap_init_zone(size, nid, zone, start_pfn, + MEMINIT_EARLY, NULL); } else { struct page *start; struct memmap_init_callback_data args; diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h index 9138a624c5c81339d8bf59246e8703134dca247b..692f90e7fecc13186cebf6f94c884f11f8500c08 100644 --- a/arch/m68k/include/asm/m53xxacr.h +++ b/arch/m68k/include/asm/m53xxacr.h @@ -89,9 +89,9 @@ * coherency though in all cases. And for copyback caches we will need * to push cached data as well. */ -#define CACHE_INIT CACR_CINVA -#define CACHE_INVALIDATE CACR_CINVA -#define CACHE_INVALIDATED CACR_CINVA +#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC) +#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA) +#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA) #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ (0x000f0000) + \ diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 9bfa170157688f1b16859e04d2dd0d3dec10cda8..c432bfafe63e2bbdf6adee42b69e4d6b7c64174a 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 static __inline__ void iop_stop(volatile struct mac_iop *iop) { - iop->status_ctrl &= ~IOP_RUN; + iop->status_ctrl = IOP_AUTOINC; } static __inline__ void iop_start(volatile struct mac_iop *iop) @@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop) iop->status_ctrl = IOP_RUN | IOP_AUTOINC; } -static __inline__ void iop_bypass(volatile struct mac_iop *iop) -{ - iop->status_ctrl |= IOP_BYPASS; -} - static __inline__ void iop_interrupt(volatile struct mac_iop *iop) { - iop->status_ctrl |= IOP_IRQ; + iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC; } static int iop_alive(volatile struct mac_iop *iop) @@ -244,7 +239,6 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_SCC]->status_ctrl = 0x87; iop_scc_present = 1; } else { iop_base[IOP_NUM_SCC] = NULL; @@ -256,7 +250,7 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_ISM]->status_ctrl = 0; + iop_stop(iop_base[IOP_NUM_ISM]); iop_ism_present = 1; } else { iop_base[IOP_NUM_ISM] = NULL; @@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan) msg->status = IOP_MSGSTATUS_UNUSED; msg = msg->next; iop_send_queue[iop_num][chan] = msg; - if (msg) iop_do_send(msg); + if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) + iop_do_send(msg); } /* @@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata, if (!(q = iop_send_queue[iop_num][chan])) { iop_send_queue[iop_num][chan] = msg; + iop_do_send(msg); } else { while (q->next) q = q->next; q->next = msg; } - if (iop_readb(iop_base[iop_num], - IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) { - iop_do_send(msg); - } - return 0; } diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 96810d91da2bd9cf2318e0dda4b40c092c3ea7d7..4a25ce6a1823d99eeb8d879ac53ee28a2fd492c7 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -273,6 +273,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) { int tmp = Q40_RTC_CTRL; + pll->pll_ctrl = 0; pll->pll_value = tmp & Q40_RTC_PLL_MASK; if (tmp & Q40_RTC_PLL_SIGN) pll->pll_value = -pll->pll_value; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f020bec96265b012bc08ec48e5d211584768600f..08ff7f5cfac972c53f3a07d98ff2edda6bd9c354 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -854,6 +854,7 @@ config SNI_RM select I8253 select I8259 select ISA + select MIPS_L1_CACHE_SHIFT_6 select SWAP_IO_SPACE if CPU_BIG_ENDIAN select SYS_HAS_CPU_R4X00 select SYS_HAS_CPU_R5000 diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index bfdfaf32d2c49742066329e800a6b535f363c3e1..75189ff2f3c78315f6c550174188a03d7563ab00 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -517,6 +517,7 @@ static int __init dwc3_octeon_device_init(void) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { + put_device(&pdev->dev); dev_err(&pdev->dev, "No memory resources\n"); return -ENXIO; } @@ -528,8 +529,10 @@ static int __init dwc3_octeon_device_init(void) * know the difference. */ base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) + if (IS_ERR(base)) { + put_device(&pdev->dev); return PTR_ERR(base); + } mutex_lock(&dwc3_octeon_clocks_mutex); dwc3_octeon_clocks_start(&pdev->dev, (u64)base); diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index a45af3de075d900cf819071c390820c7f749f47b..d43e4ab20b2382a85c800e8b3f65e9b35a28eeae 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_34K: case CPU_1004K: case CPU_74K: + case CPU_1074K: case CPU_M14KC: case CPU_M14KEC: case CPU_INTERAPTIV: diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2b3fdfc9e0e77dffdc1503063e34035bde857579..c254761cb8ad91bb1d9861c48cb23d88ea5e613e 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -936,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool blockable); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 5ec546b5eed1c0c6a46b6bfbc5dadf0188667a42..d16e6654a65553b669cb93139c09d9c98ee2e0af 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { + bmips_cpu_setup(); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c index cd3e1f82e1a5df356e41990652712466d6e147d0..08ad6371fbe087b2c174613c1a8f0eb19824dff7 100644 --- a/arch/mips/kernel/topology.c +++ b/arch/mips/kernel/topology.c @@ -20,7 +20,7 @@ static int __init topology_init(void) for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); - c->hotpluggable = 1; + c->hotpluggable = !!i; ret = register_cpu(c, i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e7f5ef6bed0fe8a86e39497f8c1791690c3b0bc6..79485790f7b56bef66b0e8de1b7a3bc649c09fd8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -131,6 +131,8 @@ void kvm_arch_check_processor_compat(void *rtn) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { + case KVM_VM_MIPS_AUTO: + break; #ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: #else diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index d8dcdb350405900928b83e7afa2112ecf3122518..098a7afd4d384cc4cac9cba8f67bedb6fecc9700 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 05a539d3a5970f1d21e49ddb7142eba18b86fe61..7650edd5cf7ff3351c53eeba9accbb608a26855f 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1789,7 +1789,11 @@ static void setup_scache(void) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); + + if (current_cpu_type() == CPU_BMIPS5000) + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } + #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index f9407e17047624e046c696235ae05d8e53e8780f..c6af7047eb0d2c3ab30cd514bd338a0457d23ba3 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c @@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = { }, }; -static u32 a20r_ack_hwint(void) +/* + * Trigger chipset to update CPU's CAUSE IP field + */ +static u32 a20r_update_cause_ip(void) { u32 status = read_c0_status(); @@ -205,12 +208,14 @@ static void a20r_hwint(void) int irq; clear_c0_status(IE_IRQ0); - status = a20r_ack_hwint(); + status = a20r_update_cause_ip(); cause = read_c0_cause(); irq = ffs(((cause & status) >> 8) & 0xf8); if (likely(irq > 0)) do_IRQ(SNI_A20R_IRQ_BASE + irq - 1); + + a20r_update_cause_ip(); set_c0_status(IE_IRQ0); } diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c index 530a36f465ced43318fd2da77db94ae7cecdcc78..afcc86726448e36d7ec6f8670c429bed67cb4ba1 100644 --- a/arch/mips/vdso/genvdso.c +++ b/arch/mips/vdso/genvdso.c @@ -126,6 +126,7 @@ static void *map_vdso(const char *path, size_t *_size) if (fstat(fd, &stat) != 0) { fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -134,6 +135,7 @@ static void *map_vdso(const char *path, size_t *_size) if (addr == MAP_FAILED) { fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -143,6 +145,7 @@ static void *map_vdso(const char *path, size_t *_size) if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) { fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name, path); + close(fd); return NULL; } @@ -154,6 +157,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF class\n", program_name, path); + close(fd); return NULL; } @@ -165,6 +169,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF data order\n", program_name, path); + close(fd); return NULL; } @@ -172,15 +177,18 @@ static void *map_vdso(const char *path, size_t *_size) fprintf(stderr, "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n", program_name, path); + close(fd); return NULL; } else if (swap_uint16(ehdr->e_type) != ET_DYN) { fprintf(stderr, "%s: '%s' has invalid ELF type (expected ET_DYN)\n", program_name, path); + close(fd); return NULL; } *_size = stat.st_size; + close(fd); return addr; } @@ -284,10 +292,12 @@ int main(int argc, char **argv) /* Calculate and write symbol offsets to */ if (!get_symbols(dbg_vdso_path, dbg_vdso)) { unlink(out_path); + fclose(out_file); return EXIT_FAILURE; } fprintf(out_file, "};\n"); + fclose(out_file); return EXIT_SUCCESS; } diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c index 43f140a28bc7257ddbc20a713fe10e900dd371e0..54d38809e22cbd621202ebe779f33fc0db289477 100644 --- a/arch/openrisc/kernel/stacktrace.c +++ b/arch/openrisc/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = NULL; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = (unsigned long *) &sp; - else - sp = (unsigned long *) KSTK_ESP(tsk); + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index b747bf1fc1b637e18c3a8beeaeab3417f9ace37b..4272d9123f9ed3b859e57932a6034be158b4f406 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -20,7 +20,7 @@ #include #include -static void cache_loop(struct page *page, const unsigned int reg) +static __always_inline void cache_loop(struct page *page, const unsigned int reg) { unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index dbaaca84f27f342ef1c1b8c743e66fa4d6a6f8eb..640d46edf32e71cd9a117f2e4d0ea1611de1698c 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -26,6 +26,67 @@ #define __smp_rmb() mb() #define __smp_wmb() mb() +#define __smp_store_release(p, v) \ +do { \ + typeof(p) __p = (p); \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("stb,ma %0,0(%1)" \ + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("sth,ma %0,0(%1)" \ + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("stw,ma %0,0(%1)" \ + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("std,ma %0,0(%1)" \ + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + typeof(p) __p = (p); \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("ldb,ma 0(%1),%0" \ + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("ldh,ma 0(%1),%0" \ + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("ldw,ma 0(%1),%0" \ + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("ldd,ma 0(%1),%0" \ + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + } \ + __u.__val; \ +}) #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c3d81a5ef840fe46ffe740174c7ec..068958575871781355486cafe95bdba8c76b8658 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8e34f6e4195e98d2c494f190693a9..2e4d1f05a92646b36b7d4eaabffaad6637bac430 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 7d5ddf53750ce7f0f6974f240604cbb9322d5188..7a83b5e136e0dfc70055eacbf6f3b16afcf67dc7 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -122,7 +122,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ elf_util.c $(zlib-y) devtree.c stdlib.c \ oflib.c ofconsole.c cuboot.c -src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c +src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 48e3743faedfdecd0cec33322877ee7490662aab..83c78427c20be5a0e02bde0de2c4c732b0f303ae 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -127,7 +127,7 @@ int serial_console_init(void) dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); #endif -#ifdef CONFIG_PPC_MPC52XX +#ifdef CONFIG_PPC_MPC52xx else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 4504380c7a9221e9955691b27237e7fb6ce42492..60839eeada8b7acb6611777f5b275456720f1619 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -110,7 +110,6 @@ CONFIG_FB_NVIDIA=y CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 7032d4244ec5f5e7f96b8029481164e23e25c709..e30af76f47537ee2572bca18b616b1dfd7644989 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -779,7 +779,6 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index a790d5cf6ea37da3bbb99e879cc59753757f1921..684e8ae00d1603c7788d2d0b5e7c0d4521f342cf 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -163,4 +163,7 @@ #define KVM_INST_FETCH_FAILED -1 +/* Extract PO and XOP opcode fields */ +#define PO_XOP_OPCODE_MASK 0xfc0007fe + #endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 2f95e38f05491a6dcb60a1adda6ff850ce39c1b9..7b54d8412367e36b62dc6f381e72362ac0dd8d3a 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -68,7 +68,8 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, + bool blockable); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index dce863a7635cd8d0fe5bb7e9f305017eda0a2adb..8e5b7d0b851c61246b00d2f3e8d53e322777ffbc 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -10,8 +10,6 @@ #ifdef CONFIG_SMP -#include - #define __my_cpu_offset local_paca->data_offset #endif /* CONFIG_SMP */ @@ -19,4 +17,6 @@ #include +#include + #endif /* _ASM_POWERPC_PERCPU_H_ */ diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 458b928dbd8447008a7f6c83ff9db27d06d7508d..5bef78e2b4c1430fc4daea5db0230b81751f7e13 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -183,7 +183,7 @@ __init_LPCR_ISA300: __init_FSCR: mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB + ori r3,r3,FSCR_TAR|FSCR_EBB mtspr SPRN_FSCR,r3 blr diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index f9fe2080ceb90bac3dfed1ed583a6edc8875cca7..eed3543aeca46d064acb24e0097e7e92b5823233 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -100,7 +100,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev) if (!tbl) return 0; - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); + mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + + tbl->it_page_shift - 1); mask += mask - 1; return mask; diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index fe3c6f3bd3b6226727fffc12b0ec0e3bef1fb4b4..d123cba0992d079609ea1352ea8c1918e437e481 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -502,7 +502,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) rc = 1; if (pe->state & EEH_PE_ISOLATED) { pe->check_count++; - if (pe->check_count % EEH_MAX_FAILS == 0) { + if (pe->check_count == EEH_MAX_FAILS) { dn = pci_device_to_OF_node(dev); if (dn) location = of_get_property(dn, "ibm,loc-code", diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d5f351f02c1534847925bdf50db3b9489456d67a..7781f0168ce8c75c59168a4467b882e8a0dd1b9e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -430,11 +430,11 @@ void system_reset_exception(struct pt_regs *regs) #ifdef CONFIG_PPC_BOOK3S_64 BUG_ON(get_paca()->in_nmi == 0); if (get_paca()->in_nmi > 1) - nmi_panic(regs, "Unrecoverable nested System Reset"); + die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable System Reset"); + die("Unrecoverable System Reset", regs, SIGABRT); if (!nested) nmi_exit(); @@ -775,7 +775,7 @@ void machine_check_exception(struct pt_regs *regs) /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable Machine check"); + die("Unrecoverable Machine check", regs, SIGBUS); return; diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65b3bdb99f0bae809a92fc76ce66335025a8f1ae..31ab6eb61e26e4d3ca130f1d3d0a6a3a9a225d48 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -705,7 +705,7 @@ int vdso_getcpu_init(void) node = cpu_to_node(cpu); WARN_ON_ONCE(node > 0xffff); - val = (cpu & 0xfff) | ((node & 0xffff) << 16); + val = (cpu & 0xffff) | ((node & 0xffff) << 16); mtspr(SPRN_SPRG_VDSO_WRITE, val); get_paca()->sprg_vdso = val; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index cc05f346e04219d0c737948c463d890f55bf6deb..bc9d1321dc730770c4c1093b0e115f323cfb6c61 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -812,7 +812,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); } diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 31cd0f327c8a2d5af48401001be86a1747b8ae51..e7fd60cf97804011f8a8d3e987cdb1b46b911023 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -6,6 +6,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include @@ -47,7 +49,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) u64 newmsr, bescr; int ra, rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For treclaim., tsr., and trechkpt. instructions if bit + * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section + * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit + * 31 is an acceptable way to handle these invalid forms that have + * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ + * bit 31 set) can generate a softpatch interrupt. Hence both forms + * are handled below for these instructions so they behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -108,7 +121,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return RESUME_GUEST; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ @@ -143,7 +157,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = msr; return RESUME_GUEST; - case PPC_INST_TRECLAIM: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { /* generate an illegal instruction interrupt */ @@ -179,7 +194,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; - case PPC_INST_TRECHKPT: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { @@ -211,6 +227,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) } /* What should we do here? We didn't recognize the instruction */ - WARN_ON_ONCE(1); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); + return RESUME_GUEST; } diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index 3cf5863bc06e8513d5cb7d359f401846ba07aab5..3c7ca2fa19597c59eb71a424e09a975a337f69c7 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -26,7 +26,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) u64 newmsr, msr, bescr; int rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For the tsr. instruction if bit 31 = 0 then it is per + * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid + * Forms, informs specifically that ignoring bit 31 is an acceptable way + * to handle TM-related invalid forms that have bit 31 = 0. Moreover, + * for emulation purposes both forms (w/ and wo/ bit 31 set) can + * generate a softpatch interrupt. Hence both forms are handled below + * for tsr. to make them behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -76,7 +87,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return 1; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* we know the MSR has the TS field = S (0b01) here */ msr = vcpu->arch.shregs.msr; /* check for PR=1 and arch 2.06 bit set in PCR */ diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 8f2985e46f6f193d975e2d26ca92c2df60742649..bbb02195dc53076717d19ed9cc633687e8ca8014 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -737,7 +737,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6e0ff8b600ced85f19dff12e8204ad03b2096a0b..eb5252177b6621b7912fe994774436862c21a517 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -233,6 +233,9 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code, return is_exec || (address >= TASK_SIZE); } +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE +#define SIGFRAME_MAX_SIZE (4096 + 128) + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma, unsigned int flags, bool *must_retry) @@ -240,7 +243,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB + * The kernel signal delivery code writes a bit over 4KB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to @@ -265,7 +268,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 >= uregs->gpr[1]) + if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1]) return false; if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 7124af17da722fb1bab714edd47129442f3187f3..a587f901398865195326fa666a62c20795035eb8 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -81,13 +81,17 @@ int pkey_initialize(void) scan_pkey_feature(); /* - * Let's assume 32 pkeys on P8 bare metal, if its not defined by device - * tree. We make this exception since skiboot forgot to expose this - * property on power8. + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device + * tree. We make this exception since some version of skiboot forgot to + * expose this property on power8/9. */ - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) && - cpu_has_feature(CPU_FTRS_POWER8)) - pkeys_total = 32; + if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) { + unsigned long pvr = mfspr(SPRN_PVR); + + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) + pkeys_total = 32; + } /* * Adjust the upper limit, based on the number of bits supported by diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 680458064bb5dc3a17de91a040786c9b57b2404f..f227e7b7e6fcb434a8e5df2e6f4a7425ab7816d4 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2085,6 +2085,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); + } else if (period) { + /* Account for interrupt in case of invalid SIAR */ + if (perf_event_account_interrupt(event)) + power_pmu_stop(event, 0); } } diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 9f5958f1692349c5d1db79a1abb4c26c8606822d..741a8fa8a3e6be151113696cc8a8ef70a3167fac 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -46,6 +46,7 @@ config SPU_FS tristate "SPU file system" default m depends on PPC_CELL + depends on COREDUMP select SPU_BASE help The SPU file system is used to access Synergistic Processing diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index b168c3742b431c97e27423fddd0cb646c6f1c9c6..afabe691861957d63adcd2e5e0d62e390b7f4344 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -31,7 +31,7 @@ static bool rtas_hp_event; unsigned long pseries_memory_block_size(void) { struct device_node *np; - unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; + u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index e81a285f3a6ce7f8abe5ee22e7b49e5559543e07..e827108680f21a85bd354a72bdb7ad8921cd6b2e 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -118,7 +118,6 @@ static void handle_system_shutdown(char event_modifier) case EPOW_SHUTDOWN_ON_UPS: pr_emerg("Loss of system power detected. System is running on" " UPS/battery. Check RTAS error log for details\n"); - orderly_poweroff(true); break; case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index cb1f51ad48e40a3b62dc673aa5a35d17622d2f32..411f785cdfb5126d0f2ec7addba44b1f9df68833 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -627,6 +628,7 @@ static bool xive_native_provision_pages(void) pr_err("Failed to allocate provisioning page\n"); return false; } + kmemleak_ignore(p); opal_xive_donate_page(chip, __pa(p)); } return true; diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index c6dcc5291f972ab74fd534436be003ed8e2b308d..02fbc175142e29847ff673d8a37cd0f13663b124 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -63,4 +63,11 @@ do { \ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. */ #define MCOUNT_INSN_SIZE 8 + +#ifndef __ASSEMBLY__ +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + #endif diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 6d39f64e4dce4eefbe060dce93e1c47f563c113c..fa8530f05ed4f7a25c34365bee02995d1960e8dc 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return __ftrace_modify_call(rec->ip, addr, false); } + +/* + * This is called early on, and isn't wrapped by + * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold + * text_mutex, which triggers a lockdep failure. SMP isn't running so we could + * just directly poke the text, but it's simpler to just take the lock + * ourselves. + */ +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + int out; + + ftrace_arch_code_modify_prepare(); + out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); + ftrace_arch_code_modify_post_process(); + + return out; +} + int ftrace_update_ftrace_func(ftrace_func_t func) { int ret = __ftrace_modify_call((unsigned long)&ftrace_call, diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 0095ddb58ff6979f4883e022368a4e0e21f36085..50f6661ba5664882a84d276bcd957de17b4cb9d5 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -29,7 +29,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ prev__ = *ptr__; \ do { \ @@ -37,7 +37,7 @@ new__ = old__ op (val); \ prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ new__; \ }) @@ -68,7 +68,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ @@ -84,7 +84,7 @@ : [val__] "d" (val__) \ : "cc"); \ } \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) @@ -95,14 +95,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ old__ + val__; \ }) @@ -114,14 +114,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") @@ -136,10 +136,10 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = cmpxchg(ptr__, oval, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -152,10 +152,10 @@ ({ \ typeof(pcp) *ptr__; \ typeof(pcp) ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = xchg(ptr__, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -171,11 +171,11 @@ typeof(pcp1) *p1__; \ typeof(pcp2) *p2__; \ int ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ p1__ = raw_cpu_ptr(&(pcp1)); \ p2__ = raw_cpu_ptr(&(pcp2)); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 74a296cea21cc8062b9642bb9c81759df6ed23e8..0e6d01225a670c66a15f56591aa31ba37bbc7083 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1377,8 +1377,8 @@ static int aux_output_begin(struct perf_output_handle *handle, idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); - te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; - te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; + te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | + SDB_TE_ALERT_REQ_MASK); te->overflow = 0; } /* Save the position of empty SDBs */ @@ -1425,8 +1425,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, te = aux_sdb_trailer(aux, alert_index); do { orig_flags = te->flags; - orig_overflow = te->overflow; - *overflow = orig_overflow; + *overflow = orig_overflow = te->overflow; if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { /* * SDB is already set by hardware. @@ -1660,7 +1659,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, } /* Allocate aux_buffer struct for the event */ - aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); + aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); if (!aux) goto no_aux; sfb = &aux->sfb; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 65fefbf61e1ca94321077482ed0ad470a2a0ad9a..3ffa2847c110bcbfac536e73113d92869a8e02f4 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1286,7 +1286,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb) cb->pc == 1 && cb->qc == 0 && cb->reserved2 == 0 && - cb->key == PAGE_DEFAULT_KEY && cb->reserved3 == 0 && cb->reserved4 == 0 && cb->reserved5 == 0 && @@ -1350,7 +1349,11 @@ static int s390_runtime_instr_set(struct task_struct *target, kfree(data); return -EINVAL; } - + /* + * Override access key in any case, since user space should + * not be able to set it, nor should it care about it. + */ + ri_cb.key = PAGE_DEFAULT_KEY >> 4; preempt_disable(); if (!target->thread.ri_cb) target->thread.ri_cb = data; diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 125c7f6e87150d526a60c9e23b99c47bad1aae2b..1788a5454b6fc7a3a64b621b9b1880e35d6b705d 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->k = 1; cb->ps = 1; cb->pc = 1; - cb->key = PAGE_DEFAULT_KEY; + cb->key = PAGE_DEFAULT_KEY >> 4; cb->v = 1; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 5f85e0dfa66d1d1661bb250baad4861f42fbd371..4bda9055daefae72349152b96d70ba34d72212a3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -537,7 +537,7 @@ static struct notifier_block kdump_mem_nb = { /* * Make sure that the area behind memory_end is protected */ -static void reserve_memory_end(void) +static void __init reserve_memory_end(void) { #ifdef CONFIG_CRASH_DUMP if (ipl_info.type == IPL_TYPE_FCP_DUMP && @@ -555,7 +555,7 @@ static void reserve_memory_end(void) /* * Make sure that oldmem, where the dump is stored, is protected */ -static void reserve_oldmem(void) +static void __init reserve_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) @@ -567,7 +567,7 @@ static void reserve_oldmem(void) /* * Make sure that oldmem, where the dump is stored, is protected */ -static void remove_oldmem(void) +static void __init remove_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index f1147caebacf020e6933b1069b962f1bc2a0cfca..af69fb7fef7c7373dc86da09039291f1a4e2545a 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -85,6 +85,9 @@ device_initcall(landisk_devices_setup); static void __init landisk_setup(char **cmdline_p) { + /* I/O port identity mapping */ + __set_io_port_base(0); + /* LED ON */ __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED); diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 28cc61216b649773045d2eb16bdd997602ac9087..ed5b758c650d78dbf95ac3b8fae62a32aced2b10 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -203,7 +203,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -357,7 +357,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -396,7 +396,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 466f66c8a7f8d141d584514e7892f14d55479318..5642f025b397cd7bd3f2ae5b40acc2dac0bb4d5d 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -38,6 +38,8 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign +# Disable relocation relaxation in case the link is not PIE. +KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n @@ -100,7 +102,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o quiet_cmd_check_data_rel = DATAREL $@ define cmd_check_data_rel for obj in $(filter %.o,$^); do \ - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ + $(READELF) -S $$obj | grep -qF .rel.local && { \ echo "error: $$obj has data relocations!" >&2; \ exit 1; \ } || true; \ diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 4e2a689badda38eeaca17dc7ee80b65d938da1c8..22ac03d6b83f26dfefb9967f1bbbadd260b8ce65 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -424,7 +424,7 @@ CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y -CONFIG_INIT_STACK_ALL=y +CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_CRYPTO_ADIANTUM=y CONFIG_CRYPTO_SHA256_SSSE3=y diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index ce75be940567e9372bdb53388dd0d415d5424be9..9218cb128661142dba1e132a8016606a056d7eb9 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -216,7 +216,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -246,6 +245,7 @@ CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_OHCI_HCD=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 45b0f4d84d83b08fa0c49cf73adca97b03321650..146a12293396d1239f39cdbd14cde896d3ed0016 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -212,7 +212,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -242,6 +241,7 @@ CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_OHCI_HCD=y diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 5f6a5af9c489b7eafb2588cea8e61ba7ae84778a..77043a82da510c5c608da94d75016edc60add92c 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -127,10 +127,6 @@ ddq_add_8: /* generate a unique variable for ddq_add_x */ -.macro setddq n - var_ddq_add = ddq_add_\n -.endm - /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n @@ -140,9 +136,7 @@ ddq_add_8: .macro club name, id .altmacro - .if \name == DDQ_DATA - setddq %\id - .elseif \name == XDATA + .if \name == XDATA setxdata %\id .endif .noaltmacro @@ -165,9 +159,8 @@ ddq_add_8: .set i, 1 .rept (by - 1) - club DDQ_DATA, i club XDATA, i - vpaddq var_ddq_add(%rip), xcounter, var_xdata + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata @@ -180,8 +173,7 @@ ddq_add_8: vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - club DDQ_DATA, by - vpaddq var_ddq_add(%rip), xcounter, xcounter + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index cb2deb61c5d96dc53aabdeb7bd880ed436f75db7..29b27f9a6e1ee9cfa0ca5467e40598a2f337d90a 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -270,7 +270,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff PSHUFB_XMM %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ @@ -982,7 +982,7 @@ _initial_blocks_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1190,7 +1190,7 @@ aes_loop_par_enc_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ce7b3b22ae86bc959f8a3f2bc5acb9b4645ccef3..4876411a072a7d1da0dc65b3f96e918575404610 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1465,7 +1465,8 @@ asmlinkage void __noreturn kvm_spurious_fault(void); ____kvm_handle_fault_on_reboot(insn, "") #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e3f70c60e8ccd0dd6b60f9e666730bf5e34f3094..62f9903544b594ff7f62ebecb244d4c0213a4076 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -330,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ -static inline void mds_clear_cpu_buffers(void) +static __always_inline void mds_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS; @@ -351,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void) * * Clear CPU buffers if the corresponding static key is enabled */ -static inline void mds_user_clear_cpu_buffers(void) +static __always_inline void mds_user_clear_cpu_buffers(void) { if (static_branch_likely(&mds_user_clear)) mds_clear_cpu_buffers(); diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 19b137f1b3beb9a2eefd9d769933dea444e237ef..2ff9b98812b7637de7fedadc25aa60d20634e19a 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -4,6 +4,11 @@ #define ARCH_DEFAULT_PKEY 0 +/* + * If more than 16 keys are ever supported, a thorough audit + * will be necessary to ensure that the types that store key + * numbers and masks have sufficient capacity. + */ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 08e2f3a5f12428a371e8e3a6f77d6d042b45a6c5..15234885e60bc779b1b697e43aa59555c1ef55df 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2250,6 +2250,7 @@ static inline void __init check_timer(void) legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); + legacy_pic->unmask(0); unlock_ExtINT_logic(); @@ -2342,8 +2343,13 @@ static int mp_irqdomain_create(int ioapic) static void ioapic_destroy_irqdomain(int idx) { + struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg; + struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode; + if (ioapics[idx].irqdomain) { irq_domain_remove(ioapics[idx].irqdomain); + if (!cfg->dev) + irq_domain_free_fwnode(fn); ioapics[idx].irqdomain = NULL; } } diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 99c28c02b7a546cad3879dfbfc7058db24fbe4e3..8b7e0b46e86eaa6b826c2ced222dd693a03955c4 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -556,6 +556,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irqd->chip_data = apicd; irqd->hwirq = virq + i; irqd_set_single_target(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 1ceccc4a5472ce90d6245e781f9dd1b9af2e73b7..9cc524be3c949d43a651da75d7d116e75a3913e4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -518,7 +518,7 @@ static void do_inject(void) */ if (inj_type == DFR_INT_INJ) { i_mce.status |= MCI_STATUS_DEFERRED; - i_mce.status |= (i_mce.status & ~MCI_STATUS_UC); + i_mce.status &= ~MCI_STATUS_UC; } /* diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 4b900035f2202320e2c26891d130d16eb6eb5414..601a5da1d196a91363817c3b0d12ace5894718eb 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state) #ifdef CONFIG_ARCH_HAS_PKEYS -#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) -#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. @@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return -EINVAL; + /* + * This code should only be called with valid 'pkey' + * values originating from in-kernel users. Complain + * if a bad value is observed. + */ + WARN_ON_ONCE(pkey >= arch_max_pkey()); + /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) new_pkru_bits |= PKRU_AD_BIT; diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 519649ddf1001d423c5b0a7e02a42f7ca92e2a06..fe522691ac717ff77347a68162c912d7b50edafb 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -207,7 +207,7 @@ static void mask_and_ack_8259A(struct irq_data *data) * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { - printk(KERN_DEBUG + printk_deferred(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 8d4d5064531060e85161cc39e9fe6c83b63a25a6..1401f86e4007073d10f353088798241f5943fbb9 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -374,7 +374,7 @@ static unsigned long task_seg_base(struct task_struct *task, */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; - if (unlikely(idx >= ldt->nr_entries)) + if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 2701b370e58fef1973ccb7cdfdabb489a537eed1..1d264ba1e56d125a77f51cb6cb4843b1630dd907 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -420,8 +420,11 @@ bool unwind_next_frame(struct unwind_state *state) /* * Find the orc_entry associated with the text address. * - * Decrement call return addresses by one so they work for sibling - * calls and calls to noreturn functions. + * For a call frame (as opposed to a signal frame), state->ip points to + * the instruction after the call. That instruction's stack layout + * could be different from the call instruction's layout, for example + * if the call was to a noreturn function. So get the ORC data for the + * call instruction itself. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); if (!orc) @@ -634,6 +637,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, state->sp = task->thread.sp; state->bp = READ_ONCE_NOCHECK(frame->bp); state->ip = READ_ONCE_NOCHECK(frame->ret_addr); + state->signal = (void *)state->ip == ret_from_fork; } if (get_stack_info((unsigned long *)state->sp, state->task, diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 8c6392534d145fd1e70ca221547854e2615e6ee7..bba2f76c356dda14d704b864f631abee0a4f1eda 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2034,7 +2034,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 92ff656e18101f6dd029aeacc5294917af97e295..a2ff5c214738a0a08c9b4823e243086d17396acc 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1956,7 +1956,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); } diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index cb41b036eb2646c98c806672b71b15a834fca5be..7e0dc8c7da2c096871118e5ff42453017e8e8451 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -339,7 +339,7 @@ TRACE_EVENT( /* These depend on page entry type, so compute them now. */ __field(bool, r) __field(bool, x) - __field(u8, u) + __field(signed char, u) ), TP_fast_assign( diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2aafb6c791345f1fc49bb3d4a599a7c64a670574..cb09a0ec875000ebbb3f6348548dafa335f0e12d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3942,6 +3942,12 @@ static int iret_interception(struct vcpu_svm *svm) return 1; } +static int invd_interception(struct vcpu_svm *svm) +{ + /* Treat an INVD instruction as a NOP and just skip it. */ + return kvm_skip_emulated_instruction(&svm->vcpu); +} + static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) @@ -4831,7 +4837,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, - [SVM_EXIT_INVD] = emulate_on_interception, + [SVM_EXIT_INVD] = invd_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2f823f35dee508f99e3027e95cce839e38d6c498..d6bcbce6c15cbb143694f77b64c538c2b899171f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10128,6 +10128,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_APIC_ACCESS && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5b2440e591fc125e73180394db7860c7cd49d4ef..dd182228be714d730d2d42252fcacd65a85bd7a2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -857,7 +857,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + X86_CR4_SMEP; + unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE; if (kvm_valid_cr4(vcpu, cr4)) return 1; @@ -885,7 +886,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; - if (((cr4 ^ old_cr4) & pdptr_bits) || + if (((cr4 ^ old_cr4) & mmu_role_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); @@ -4668,10 +4669,13 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit_out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); +set_pit_out: + mutex_unlock(&kvm->lock); break; } case KVM_GET_PIT2: { @@ -4691,10 +4695,13 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit2_out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); +set_pit2_out: + mutex_unlock(&kvm->lock); break; } case KVM_REINJECT_CONTROL: { diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 7077b3e282414d123b5b58a3ebfed58fe7e5bbcc..40dbbd8f1fe41e8d8585fbd6c45d00f7caa45cc1 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -139,7 +139,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) */ if (size < 8) { if (!IS_ALIGNED(dest, 4) || size != 4) - clean_cache_range(dst, 1); + clean_cache_range(dst, size); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index d71d72cf6c66685eb9f8eee9f7bb29c4a277baa2..4686757a74d75816e95bc3b78c5397e1e724d7f4 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -322,7 +322,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, u64 addr, u64 max_addr, u64 size) { return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size, - 0, NULL, NUMA_NO_NODE); + 0, NULL, 0); } int __init setup_emu2phys_nid(int *dfl_phys_nid) diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 9112d1cb397bb56faa637216f1e39815c50edd9d..22da9bfd8a4584d79c13adf1e3aa1a19121b3296 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -25,6 +25,7 @@ #include #include #include +#include #include static int xen_pcifront_enable_irq(struct pci_dev *dev) diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index ff1d81385ed7a2658e3733ea864f0f4434ec0e65..768e1f7ab8715b3bd433fb86a08ecda7a1abebc3 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = { .read = xtensa_pmu_read, }; -static int xtensa_pmu_setup(int cpu) +static int xtensa_pmu_setup(unsigned int cpu) { unsigned i; diff --git a/block/blk-core.c b/block/blk-core.c index f61a9f139cf8d5d7476ac920a927c59f1bb04fb3..a33775cd97be1df1a7427ba7bb809f0a93ba65cc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1038,6 +1038,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; + q->backing_dev_info->io_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index da1de190a3b13ce2b62b3c1c1ec76df9fcf3e39d..d89a757cbde0f9cbbb4c449f5f515f6c6bb34c65 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -69,6 +69,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 684acaa96db7e11893b8ddd56b05de75e05ffaef..db2db0b70d34f946c6339ae278c7cf8cd211e1a3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1221,6 +1221,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, list_splice_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another diff --git a/build.config.aarch64 b/build.config.aarch64 index 7eabc9652bf507bd1d4938868134e35d9e9ecbe3..ce1709ac1812f2f6759355a7613d1de5bbdac9d5 100644 --- a/build.config.aarch64 +++ b/build.config.aarch64 @@ -2,7 +2,7 @@ ARCH=arm64 CLANG_TRIPLE=aarch64-linux-gnu- CROSS_COMPILE=aarch64-linux-androidkernel- -CROSS_COMPILE_COMPAT=arm-linux-androideabi- +CROSS_COMPILE_COMPAT=arm-linux-androidkernel- LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin LINUX_GCC_CROSS_COMPILE_COMPAT_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/ diff --git a/build.config.allmodconfig.aarch64 b/build.config.allmodconfig.aarch64 index 863ab1caddab04423b4e3649119927c07e3a0ef9..2fbe380e030a213ab79fa7db7bb6ce6d5caab0ea 100644 --- a/build.config.allmodconfig.aarch64 +++ b/build.config.allmodconfig.aarch64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.aarch64 -. ${ROOT_DIR}/common/build.config.allmodconfig +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig diff --git a/build.config.allmodconfig.arm b/build.config.allmodconfig.arm index 5dd94819c87151c4cbaaad7702633b484ae82e73..e92744a9b51827baf504ca14734052c4372e1b41 100644 --- a/build.config.allmodconfig.arm +++ b/build.config.allmodconfig.arm @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.arm -. ${ROOT_DIR}/common/build.config.allmodconfig +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.arm +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig diff --git a/build.config.allmodconfig.x86_64 b/build.config.allmodconfig.x86_64 index bedb3869d99b28cf2e5dc6befc048a1c2309b5b4..f06b30c8426fa2f6a41ac9e0539174bf19bf3214 100644 --- a/build.config.allmodconfig.x86_64 +++ b/build.config.allmodconfig.x86_64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.x86_64 -. ${ROOT_DIR}/common/build.config.allmodconfig +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig diff --git a/build.config.common b/build.config.common index 0c20768c23010cb934de79895afa843e67e81de0..1b0034f314528b4cdd9b987e194ab21cf57324a2 100644 --- a/build.config.common +++ b/build.config.common @@ -1,11 +1,7 @@ BRANCH=android-4.19-stable KMI_GENERATION=0 -KERNEL_DIR=common -CC=clang -LD=ld.lld -NM=llvm-nm -OBJCOPY=llvm-objcopy +LLVM=1 DEPMOD=depmod CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r383902/bin BUILDTOOLS_PREBUILT_BIN=build/build-tools/path/linux-x86 diff --git a/build.config.gki-debug.aarch64 b/build.config.gki-debug.aarch64 index 58cee7a6362b4daa9d07a85d42e08aab47834255..c1fe2f03a279656e39634e195b531a5d911e1d0f 100644 --- a/build.config.gki-debug.aarch64 +++ b/build.config.gki-debug.aarch64 @@ -1,3 +1,3 @@ -. ${ROOT_DIR}/common/build.config.gki.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64 TRIM_NONLISTED_KMI="" KMI_SYMBOL_LIST_STRICT_MODE="" diff --git a/build.config.gki-debug.x86_64 b/build.config.gki-debug.x86_64 index 9ee51e7de18c899aa1def3cb7ad472dc90fa6832..d89b7ad4e80447a7bfe9e9dea42055c506b85339 100644 --- a/build.config.gki-debug.x86_64 +++ b/build.config.gki-debug.x86_64 @@ -1,3 +1,3 @@ -. ${ROOT_DIR}/common/build.config.gki.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.x86_64 TRIM_NONLISTED_KMI="" KMI_SYMBOL_LIST_STRICT_MODE="" diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64 index 6ebaf34cb036b77b02544c8a5b9d662b3d12c0e4..74a6941f1aa79950fd14e9f05909ef5c91715407 100644 --- a/build.config.gki.aarch64 +++ b/build.config.gki.aarch64 @@ -1,6 +1,6 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.aarch64 -. ${ROOT_DIR}/common/build.config.gki +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki ABI_DEFINITION=android/abi_gki_aarch64.xml KMI_SYMBOL_LIST=android/abi_gki_aarch64 @@ -9,4 +9,6 @@ android/abi_gki_aarch64_cuttlefish android/abi_gki_aarch64_qcom " TRIM_NONLISTED_KMI=1 +KMI_SYMBOL_LIST_ADD_ONLY=1 KMI_SYMBOL_LIST_STRICT_MODE=1 +KMI_ENFORCED=1 diff --git a/build.config.gki.x86_64 b/build.config.gki.x86_64 index 627d1e1c27ab0a2ea54bd835b270184dc8a1a9e5..0e04fc692df29c8a02e9ee09bc92ee68a3598aac 100644 --- a/build.config.gki.x86_64 +++ b/build.config.gki.x86_64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.x86_64 -. ${ROOT_DIR}/common/build.config.gki +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki diff --git a/build.config.gki_kasan b/build.config.gki_kasan index e682b0d490bdc2751cea36ce6da6f5b5c7c37625..b3273b10a01c28bd29d7902e15192d44574ba42a 100644 --- a/build.config.gki_kasan +++ b/build.config.gki_kasan @@ -1,6 +1,5 @@ DEFCONFIG=gki_defconfig POST_DEFCONFIG_CMDS="check_defconfig && update_kasan_config" -KERNEL_DIR=common function update_kasan_config() { ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \ -e CONFIG_KASAN \ diff --git a/build.config.gki_kasan.aarch64 b/build.config.gki_kasan.aarch64 index 6277fd662863f2608d14969ccdd431357dd02654..9fd2560c45e87a4025d32bf9ac18ccdce8ac655c 100644 --- a/build.config.gki_kasan.aarch64 +++ b/build.config.gki_kasan.aarch64 @@ -1,4 +1,3 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.aarch64 -. ${ROOT_DIR}/common/build.config.gki_kasan - +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan diff --git a/build.config.gki_kasan.x86_64 b/build.config.gki_kasan.x86_64 index 6a379eceeb3199914703a8603053fc08f34a84ec..eec645805f3905ac1e1fc93c7180355fd1c0cc67 100644 --- a/build.config.gki_kasan.x86_64 +++ b/build.config.gki_kasan.x86_64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.x86_64 -. ${ROOT_DIR}/common/build.config.gki_kasan +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 738f3c732363a06cd6d246b57eca2855906621e9..228feeea555f1164884e15abf2cdbc6a90820849 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) (u8)access_byte_width; } } - /* An additional reference for the container */ - - acpi_ut_add_reference(obj_desc->field.region_obj); - ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 8cc4392c61f33aea636251426e263b7017613498..0dc8dea8158237d43bfe95893d32d064ff5fb4e5 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -563,11 +563,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) next_object = object->buffer_field.buffer_obj; break; - case ACPI_TYPE_LOCAL_REGION_FIELD: - - next_object = object->field.region_obj; - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: next_object = object->bank_field.bank_obj; @@ -608,6 +603,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } break; + case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_REGION: default: diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 49e16f009095707f9134295e677648604c2f1a69..9415a0041aaf7fc846a2a892cca391e2589163bc 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1080,29 +1080,21 @@ void acpi_ec_dispatch_gpe(void) /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ -static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index ec20557942d0b1ceecbd41bf5038df859a2a2f71..8eb783309e0ea4979e9b3782fc1765e2ace1be40 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3250,7 +3250,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY)); + !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { /* * -ESRCH indicates VMA cleared. The target is dying. diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index e68f9910c3b3f39c8f144fcb7c5f36a36b0bd044..f0ff5fc9d7ea6be07b8b97b41501ac46c2933424 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -347,12 +347,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return vma; } +static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid) +{ + /* + * Find the amount and size of buffers allocated by the current caller; + * The idea is that once we cross the threshold, whoever is responsible + * for the low async space is likely to try to send another async txn, + * and at some point we'll catch them in the act. This is more efficient + * than keeping a map per pid. + */ + struct rb_node *n = alloc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t total_alloc_size = 0; + size_t num_buffers = 0; + + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + if (buffer->pid != pid) + continue; + if (!buffer->async_transaction) + continue; + total_alloc_size += binder_alloc_buffer_size(alloc, buffer) + + sizeof(struct binder_buffer); + num_buffers++; + } + + /* + * Warn if this pid has more than 50 transactions, or more than 50% of + * async space (which is 25% of total buffer size). + */ + if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", + alloc->pid, pid, num_buffers, total_alloc_size); + } +} + static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -495,11 +533,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked( buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; buffer->extra_buffers_size = extra_buffers_size; + buffer->pid = pid; if (is_async) { alloc->free_async_space -= size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); + if (alloc->free_async_space < alloc->buffer_size / 10) { + /* + * Start detecting spammers once we have less than 20% + * of async space left (which is less than 10% of total + * buffer size). + */ + debug_low_async_space_locked(alloc, pid); + } } return buffer; @@ -517,6 +564,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction + * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated @@ -529,13 +577,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { struct binder_buffer *buffer; mutex_lock(&alloc->mutex); buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, - extra_buffers_size, is_async); + extra_buffers_size, is_async, pid); mutex_unlock(&alloc->mutex); return buffer; } diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index b60d161b7a7ae98c412ca9c075af530da4d67ee9..3daa3e21126778cbdb6f119096cc4a6e1b15234f 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -41,6 +41,7 @@ struct binder_transaction; * @offsets_size: size of array of offsets * @extra_buffers_size: size of space for other objects (like sg lists) * @user_data: user pointer to base of buffer space + * @pid: pid to attribute the buffer to (caller) * * Bookkeeping structure for binder transaction buffers */ @@ -60,6 +61,7 @@ struct binder_buffer { size_t offsets_size; size_t extra_buffers_size; void __user *user_data; + int pid; }; /** @@ -126,7 +128,8 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async); + int is_async, + int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index b72708918b06b182ca1faa9c161a136172bfd3f1..c839c490fde35b304165beb941ec01f08bc26ca0 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -128,7 +128,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, int i; for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 583e366be7e23f05f11e8c8a9e24af12a95425c3..505f8c3168188459bcee138e1b1ec7a86aee51cc 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -72,7 +72,7 @@ struct acard_sg { __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ }; -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int acard_ahci_port_start(struct ata_port *ap); static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) return si; } -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; @@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->hw_tag, opts); + + return AC_ERR_OK; } static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 2bdb250a2142c031a720ff327040db2b45ee28f2..f1153e7ba3b3a2abc3769bf38fe5304ee25b7c0b 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int ahci_port_start(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap); -static void ahci_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); static void ahci_freeze(struct ata_port *ap); static void ahci_thaw(struct ata_port *ap); @@ -1640,7 +1640,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) return sata_pmp_qc_defer_cmd_switch(qc); } -static void ahci_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; @@ -1676,6 +1676,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->hw_tag, opts); + + return AC_ERR_OK; } static void ahci_fbs_dec_intr(struct ata_port *ap) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 6b372fa583822f8265759162850610c0a5b08688..db1d86af21b4db2322f9f98765c9309db0e45fdf 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4492,9 +4492,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, @@ -4997,7 +4996,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) return ATA_DEFER_LINK; } -void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } +enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) +{ + return AC_ERR_OK; +} /** * ata_sg_init - Associate command with scatter-gather table. @@ -5484,7 +5486,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) return; } - ap->ops->qc_prep(qc); + qc->err_mask |= ap->ops->qc_prep(qc); + if (unlikely(qc->err_mask)) + goto err; trace_ata_qc_issue(qc); qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6c2c2b07f029ea9abcc9043c123a26b7a3644c32..e7af41d95490d76904f7087da574d0414f9609c2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2391,6 +2391,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2416,7 +2417,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 873cc0906055129eff641428db31c118cbdbeb21..7484ffdabd543c33a4809b5e286e04cf5d462181 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2695,12 +2695,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) +enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; ata_bmdma_fill_sg(qc); + + return AC_ERR_OK; } EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); @@ -2713,12 +2715,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) +enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; ata_bmdma_fill_sg_dumb(qc); + + return AC_ERR_OK; } EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 9588e685d994cf267b1fb4f491749b717274aa0f..765b99319d3cd297d7b6e2daf0363aafd947e578 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap) return ATA_CBL_PATA40; } -static void pata_macio_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) { unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); struct ata_port *ap = qc->ap; @@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) __func__, qc, qc->flags, write, qc->dev->devno); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; table = (struct dbdma_cmd *) priv->dma_table_cpu; @@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) table->command = cpu_to_le16(DBDMA_STOP); dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); + + return AC_ERR_OK; } diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c index e8b6a2e464c988e5eb5f586fa38fc426419b1678..5b1458ca986b65975bdf13075405457696e323ba 100644 --- a/drivers/ata/pata_pxa.c +++ b/drivers/ata/pata_pxa.c @@ -58,25 +58,27 @@ static void pxa_ata_dma_irq(void *d) /* * Prepare taskfile for submission. */ -static void pxa_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; struct dma_async_tx_descriptor *tx; enum dma_transfer_direction dir; if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, DMA_PREP_INTERRUPT); if (!tx) { ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); - return; + return AC_ERR_OK; } tx->callback = pxa_ata_dma_irq; tx->callback_param = pd; pd->dma_cookie = dmaengine_submit(tx); + + return AC_ERR_OK; } /* diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index f1e873a37465e4488a878c8555e097024b8e5392..096b4771b19da52392c0837d466419c7f6182af4 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int adma_port_start(struct ata_port *ap); static void adma_port_stop(struct ata_port *ap); -static void adma_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); static int adma_check_atapi_dma(struct ata_queued_cmd *qc); static void adma_freeze(struct ata_port *ap); @@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) return i; } -static void adma_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) { struct adma_port_priv *pp = qc->ap->private_data; u8 *buf = pp->pkt; @@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) adma_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) - return; + return AC_ERR_OK; buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* reserved */ @@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) printk("%s\n", obuf); } #endif + return AC_ERR_OK; } static inline void adma_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index ae52a45fab5f7eff61792b23f65634708ac35360..8b3be0ff91cb47d009fa04d7958dee3b31b7d99e 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -507,7 +507,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, return num_prde; } -static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sata_fsl_port_priv *pp = ap->private_data; @@ -553,6 +553,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", desc_info, ttl_dwords, num_prde); + + return AC_ERR_OK; } static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 9b6d7930d1c7940025877657e9062723b40921d4..6c7ddc037fce954d8e9ea3f31cb50c1bba01d86e 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) prd[-1].flags |= PRD_END; } -static void inic_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) { struct inic_port_priv *pp = qc->ap->private_data; struct inic_pkt *pkt = pp->pkt; @@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) inic_fill_sg(prd, qc); pp->cpb_tbl[0] = pp->pkt_dma; + + return AC_ERR_OK; } static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index ab2e9f62ddc1a693b0da58cb01d5f0f7632eea4b..2910b22fac117c6378a99508f4d8633782ad6dd0 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static int mv_qc_defer(struct ata_queued_cmd *qc); -static void mv_qc_prep(struct ata_queued_cmd *qc); -static void mv_qc_prep_iie(struct ata_queued_cmd *qc); +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); @@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; @@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) - return; + return AC_ERR_OK; /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: mv_rw_multi_errata_sata24(qc); - return; + return AC_ERR_OK; default: - return; + return AC_ERR_OK; } /* Fill in command request block @@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. - * - * FIXME: modify libata to give qc_prep a return value and - * return error here. */ - BUG_ON(tf->command); - break; + ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, + tf->command); + return AC_ERR_INVALID; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); @@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; mv_fill_sg(qc); + + return AC_ERR_OK; } /** @@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_qc_prep_iie(struct ata_queued_cmd *qc) +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; @@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) - return; + return AC_ERR_OK; if (tf->command == ATA_CMD_DSM) - return; /* use bmdma for this */ + return AC_ERR_OK; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) @@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) ); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; mv_fill_sg(qc); + + return AC_ERR_OK; } /** diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 761577d57ff3720bc129c1ca16315447a4e2d5bf..798d549435cc14c65ea961bc10d9d9b524102c49 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); -static void nv_adma_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); static void nv_adma_irq_clear(struct ata_port *ap); @@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap); static void nv_swncq_error_handler(struct ata_port *ap); static int nv_swncq_slave_config(struct scsi_device *sdev); static int nv_swncq_port_start(struct ata_port *ap); -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); @@ -1365,7 +1365,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) return 1; } -static void nv_adma_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; @@ -1377,7 +1377,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); ata_bmdma_qc_prep(qc); - return; + return AC_ERR_OK; } cpb->resp_flags = NV_CPB_RESP_DONE; @@ -1409,6 +1409,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) cpb->ctl_flags = ctl_flags; wmb(); cpb->resp_flags = 0; + + return AC_ERR_OK; } static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) @@ -1972,17 +1974,19 @@ static int nv_swncq_port_start(struct ata_port *ap) return 0; } -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) { if (qc->tf.protocol != ATA_PROT_NCQ) { ata_bmdma_qc_prep(qc); - return; + return AC_ERR_OK; } if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; nv_swncq_fill_sg(qc); + + return AC_ERR_OK; } static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index d032bf657f709a6b17f792c8b0cf4ba166f3e3c3..29d2bb465f60d81366f89c8ed8741f09f806a487 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int pdc_common_port_start(struct ata_port *ap); static int pdc_sata_port_start(struct ata_port *ap); -static void pdc_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); @@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); } -static void pdc_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) { struct pdc_port_priv *pp = qc->ap->private_data; unsigned int i; @@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) default: break; } + + return AC_ERR_OK; } static int pdc_is_sataii_tx4(unsigned long flags) diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 1fe941688e95d6dcce1fa0edaee68bbb0b998503..a66d10628c1838fb0c395fb75bd37f6ea23edd7d 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_port_start(struct ata_port *ap); static void qs_host_stop(struct ata_host *host); -static void qs_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); static void qs_freeze(struct ata_port *ap); @@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) return si; } -static void qs_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) { struct qs_port_priv *pp = qc->ap->private_data; u8 dflags = QS_DF_PORD, *buf = pp->pkt; @@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) qs_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) - return; + return AC_ERR_OK; nelem = qs_fill_sg(qc); @@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) /* frame information structure (FIS) */ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); + + return AC_ERR_OK; } static inline void qs_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 50ebd779d975fe3f1a298da50a94b93c934a6f62..8323f88d17a535d1f0dae5422d733633ef0cec8e 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -554,12 +554,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); } -static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; sata_rcar_bmdma_fill_sg(qc); + + return AC_ERR_OK; } static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index ed76f070d21e4e9c6ec9f68eec8befa953a46956..82adaf02887fb51c79b85686f297405637e3d7a5 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev); static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); -static void sil_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); static void sil_bmdma_setup(struct ata_queued_cmd *qc); static void sil_bmdma_start(struct ata_queued_cmd *qc); static void sil_bmdma_stop(struct ata_queued_cmd *qc); @@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); } -static void sil_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; sil_fill_sg(qc); + + return AC_ERR_OK; } static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 319f517137cd57c886482719ff6e01d28642eb7f..7a8ca81e52bfceef3ad3c047c846d4c905ab237f 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev); static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); static int sil24_qc_defer(struct ata_queued_cmd *qc); -static void sil24_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); static void sil24_pmp_attach(struct ata_port *ap); @@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc) return ata_std_qc_defer(qc); } -static void sil24_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sil24_port_priv *pp = ap->private_data; @@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) if (qc->flags & ATA_QCFLAG_DMAMAP) sil24_fill_sg(qc, sge); + + return AC_ERR_OK; } static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 405e606a234d1e818822c58f6ee6ce71b39bcdaf..0d742457925ecff2449c39a471e1b496776309a8 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap); static void pdc_freeze(struct ata_port *ap); static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); -static void pdc20621_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static unsigned int pdc20621_dimm_init(struct ata_host *host); @@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); } -static void pdc20621_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_DMA: @@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc) default: break; } + + return AC_ERR_OK; } static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index afebeb1c3e1e9fea29741fe61d83b980bc1e31de..723bad1201cc5bfbbe8fc241af99134cd17ab88f 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -432,9 +432,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 7323e9210f4b17b9a24e1ef7ab348afb7d03534c..38fec976e62d485524d70ed68b3b38691634c451 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev, rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); if (rc < 0) - goto out; + goto err_disable; rc = -ENOMEM; eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 112b1001c26943e33698a157404c169d1fcdc1f4..ef395b238816c6ca6dc1901c5c65658cb81eb2aa 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1013,6 +1013,7 @@ static int fs_open(struct atm_vcc *atm_vcc) error = make_rate (pcr, r, &tmc0, NULL); if (error) { kfree(tc); + kfree(vcc); return error; } } diff --git a/drivers/base/core.c b/drivers/base/core.c index 62fe457e9cf7a15297f7d183b2e1915a5fd77767..e9f67a7f41b9892c0327abba733320c5dc71dd9a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -3802,9 +3802,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { - if (fwnode) { - struct fwnode_handle *fn = dev->fwnode; + struct fwnode_handle *fn = dev->fwnode; + if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; @@ -3814,8 +3814,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } dev->fwnode = fwnode; } else { - dev->fwnode = fwnode_is_primary(dev->fwnode) ? - dev->fwnode->secondary : NULL; + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 86e267d13978b77cc26e47c3a528791cb5cc2202..cbfab9774f1726263a2f7b92931af61c86381ae5 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -493,7 +493,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) drv->bus->name, __func__, drv->name, dev_name(dev)); if (!list_empty(&dev->devres_head)) { dev_crit(dev, "Resources present before probing\n"); - return -EBUSY; + ret = -EBUSY; + goto done; } re_probe: @@ -600,7 +601,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) ret = 0; done: atomic_dec(&probe_count); - wake_up(&probe_waitqueue); + wake_up_all(&probe_waitqueue); return ret; } @@ -813,7 +814,9 @@ static int __device_attach(struct device *dev, bool allow_async) int ret = 0; device_lock(dev); - if (dev->driver) { + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { if (device_is_bound(dev)) { ret = 1; goto out_unlock; diff --git a/drivers/base/node.c b/drivers/base/node.c index 9fe33d3e2d3b856966104387dc366c8ef146b1b4..d2f133feaa9b24a483444b356fd980607e808134 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -414,10 +414,32 @@ static int __ref get_nid_for_pfn(unsigned long pfn) return pfn_to_nid(pfn); } +static int do_register_memory_block_under_node(int nid, + struct memory_block *mem_blk) +{ + int ret; + + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. + */ + mem_blk->nid = nid; + + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + if (ret) + return ret; + + return sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); +} + /* register memory section under specified node if it spans that node */ -int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) +int register_mem_block_under_node_early(struct memory_block *mem_blk, void *arg) { - int ret, nid = *(int *)arg; + int nid = *(int *)arg; unsigned long pfn, sect_start_pfn, sect_end_pfn; sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); @@ -437,38 +459,33 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) } /* - * We need to check if page belongs to nid only for the boot - * case, during hotplug we know that all pages in the memory - * block belong to the same node. - */ - if (system_state == SYSTEM_BOOTING) { - page_nid = get_nid_for_pfn(pfn); - if (page_nid < 0) - continue; - if (page_nid != nid) - continue; - } - - /* - * If this memory block spans multiple nodes, we only indicate - * the last processed node. + * We need to check if page belongs to nid only at the boot + * case because node's ranges can be interleaved. */ - mem_blk->nid = nid; - - ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, - &mem_blk->dev.kobj, - kobject_name(&mem_blk->dev.kobj)); - if (ret) - return ret; + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; - return sysfs_create_link_nowarn(&mem_blk->dev.kobj, - &node_devices[nid]->dev.kobj, - kobject_name(&node_devices[nid]->dev.kobj)); + return do_register_memory_block_under_node(nid, mem_blk); } /* mem section does not span the specified node */ return 0; } +/* + * During hotplug we know that all pages in the memory block belong to the same + * node. + */ +static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, + void *arg) +{ + int nid = *(int *)arg; + + return do_register_memory_block_under_node(nid, mem_blk); +} + /* * Unregister a memory block device under the node it spans. Memory blocks * with multiple nodes cannot be offlined and therefore also never be removed. @@ -484,10 +501,17 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) +int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, + enum meminit_context context) { - return walk_memory_range(start_pfn, end_pfn, (void *)&nid, - register_mem_sect_under_node); + walk_memory_blocks_func_t func; + + if (context == MEMINIT_HOTPLUG) + func = register_mem_block_under_node_hotplug; + else + func = register_mem_block_under_node_early; + + return walk_memory_range(start_pfn, end_pfn, (void *)&nid, func); } #ifdef CONFIG_HUGETLBFS diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 32058a4382012b200f25ebf549fd6f294fe257e0..aa4908c2560f4eee26000505e6b3cbf9094e4fe0 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1763,13 +1763,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } /* - * If a device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. */ - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); + pm_runtime_barrier(dev); if (pm_wakeup_pending()) { dev->power.direct_complete = false; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index d26b485ccc7d087ccfa42bc0c8a30b28b4683dce..e8b3353c18eb850572efa3d233d1b67038926e88 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2367,7 +2367,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_raw_write_async); static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, - unsigned int val_len) + unsigned int val_len, bool noinc) { struct regmap_range_node *range; int ret; @@ -2380,7 +2380,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, range = _regmap_range_lookup(map, reg); if (range) { ret = _regmap_select_page(map, ®, range, - val_len / map->format.val_bytes); + noinc ? 1 : val_len / map->format.val_bytes); if (ret != 0) return ret; } @@ -2418,7 +2418,7 @@ static int _regmap_bus_read(void *context, unsigned int reg, if (!map->format.parse_val) return -EINVAL; - ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); + ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); if (ret == 0) *val = map->format.parse_val(work_val); @@ -2536,7 +2536,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_read(map, reg, val, chunk_bytes); + ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); if (ret != 0) goto out; @@ -2547,7 +2547,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read remaining bytes */ if (val_len) { - ret = _regmap_raw_read(map, reg, val, val_len); + ret = _regmap_raw_read(map, reg, val, val_len, false); if (ret != 0) goto out; } @@ -2622,7 +2622,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, read_len = map->max_raw_read; else read_len = val_len; - ret = _regmap_raw_read(map, reg, val, read_len); + ret = _regmap_raw_read(map, reg, val, read_len, true); if (ret) goto out_unlock; val = ((u8 *)val) + read_len; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f7b9b23d4c76ced0b8558d87bc058a658243b222..5094ef15afd93f7402d56f21187fbe2f3d182605 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -227,24 +227,35 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_unfreeze_queue(lo->lo_queue); } +/** + * loop_validate_block_size() - validates the passed in block size + * @bsize: size to validate + */ static int -figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) +loop_validate_block_size(unsigned short bsize) +{ + if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) + return -EINVAL; + + return 0; +} + +/** + * loop_set_size() - sets device size and notifies userspace + * @lo: struct loop_device to set the size for + * @size: new size of the loop device + * + * Callers must validate that the size passed into this function fits into + * a sector_t, eg using loop_validate_size() + */ +static void loop_set_size(struct loop_device *lo, loff_t size) { - loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); - sector_t x = (sector_t)size; struct block_device *bdev = lo->lo_device; - if (unlikely((loff_t)x != size)) - return -EFBIG; - if (lo->lo_offset != offset) - lo->lo_offset = offset; - if (lo->lo_sizelimit != sizelimit) - lo->lo_sizelimit = sizelimit; - set_capacity(lo->lo_disk, x); - bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); + set_capacity(lo->lo_disk, size); + bd_set_size(bdev, size << SECTOR_SHIFT); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); - return 0; } static inline int @@ -866,6 +877,7 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + u32 granularity, max_discard_sectors; /* * If the backing device is a block device, mirror its zeroing @@ -878,11 +890,10 @@ static void loop_config_discard(struct loop_device *lo) struct request_queue *backingq; backingq = bdev_get_queue(inode->i_bdev); - blk_queue_max_discard_sectors(q, - backingq->limits.max_write_zeroes_sectors); - blk_queue_max_write_zeroes_sectors(q, - backingq->limits.max_write_zeroes_sectors); + max_discard_sectors = backingq->limits.max_write_zeroes_sectors; + granularity = backingq->limits.discard_granularity ?: + queue_physical_block_size(backingq); /* * We use punch hole to reclaim the free space used by the @@ -891,23 +902,26 @@ static void loop_config_discard(struct loop_device *lo) * useful information. */ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { - q->limits.discard_granularity = 0; - q->limits.discard_alignment = 0; - blk_queue_max_discard_sectors(q, 0); - blk_queue_max_write_zeroes_sectors(q, 0); + max_discard_sectors = 0; + granularity = 0; } else { - q->limits.discard_granularity = inode->i_sb->s_blocksize; - q->limits.discard_alignment = 0; - - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + max_discard_sectors = UINT_MAX >> 9; + granularity = inode->i_sb->s_blocksize; } - if (q->limits.max_write_zeroes_sectors) + if (max_discard_sectors) { + q->limits.discard_granularity = granularity; + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - else + } else { + q->limits.discard_granularity = 0; + blk_queue_max_discard_sectors(q, 0); + blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); + } + q->limits.discard_alignment = 0; } static void loop_unprepare_queue(struct loop_device *lo) @@ -933,22 +947,124 @@ static int loop_prepare_queue(struct loop_device *lo) return 0; } -static int loop_set_fd(struct loop_device *lo, fmode_t mode, - struct block_device *bdev, unsigned int arg) +static int +loop_release_xfer(struct loop_device *lo) +{ + int err = 0; + struct loop_func_table *xfer = lo->lo_encryption; + + if (xfer) { + if (xfer->release) + err = xfer->release(lo); + lo->transfer = NULL; + lo->lo_encryption = NULL; + module_put(xfer->owner); + } + return err; +} + +static int +loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, + const struct loop_info64 *i) +{ + int err = 0; + + if (xfer) { + struct module *owner = xfer->owner; + + if (!try_module_get(owner)) + return -EINVAL; + if (xfer->init) + err = xfer->init(lo, i); + if (err) + module_put(owner); + else + lo->lo_encryption = xfer; + } + return err; +} + +/** + * loop_set_status_from_info - configure device from loop_info + * @lo: struct loop_device to configure + * @info: struct loop_info64 to configure the device with + * + * Configures the loop device parameters according to the passed + * in loop_info64 configuration. + */ +static int +loop_set_status_from_info(struct loop_device *lo, + const struct loop_info64 *info) +{ + int err; + struct loop_func_table *xfer; + kuid_t uid = current_uid(); + + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + + err = loop_release_xfer(lo); + if (err) + return err; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; + + if (type >= MAX_LO_CRYPT) + return -EINVAL; + xfer = xfer_funcs[type]; + if (xfer == NULL) + return -EINVAL; + } else + xfer = NULL; + + err = loop_init_xfer(lo, xfer, info); + if (err) + return err; + + lo->lo_offset = info->lo_offset; + lo->lo_sizelimit = info->lo_sizelimit; + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); + memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); + lo->lo_file_name[LO_NAME_SIZE-1] = 0; + lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; + + if (!xfer) + xfer = &none_funcs; + lo->transfer = xfer->transfer; + lo->ioctl = xfer->ioctl; + + lo->lo_flags = info->lo_flags; + + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; + if (info->lo_encrypt_key_size) { + memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, + info->lo_encrypt_key_size); + lo->lo_key_owner = uid; + } + + return 0; +} + +static int loop_configure(struct loop_device *lo, fmode_t mode, + struct block_device *bdev, + const struct loop_config *config) { struct file *file; struct inode *inode; struct address_space *mapping; - int lo_flags = 0; int error; loff_t size; bool partscan; + unsigned short bsize; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); error = -EBADF; - file = fget(arg); + file = fget(config->fd); if (!file) goto out; @@ -967,51 +1083,59 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, mapping = file->f_mapping; inode = mapping->host; + size = get_loop_size(lo, file); + + if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { + error = -EINVAL; + goto out_unlock; + } + + if (config->block_size) { + error = loop_validate_block_size(config->block_size); + if (error) + goto out_unlock; + } + + error = loop_set_status_from_info(lo, &config->info); + if (error) + goto out_unlock; + if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || !file->f_op->write_iter) - lo_flags |= LO_FLAGS_READ_ONLY; + lo->lo_flags |= LO_FLAGS_READ_ONLY; - error = -EFBIG; - size = get_loop_size(lo, file); - if ((loff_t)(sector_t)size != size) - goto out_unlock; error = loop_prepare_queue(lo); if (error) goto out_unlock; error = 0; - set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); + set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->use_dio = false; + lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; lo->lo_device = bdev; - lo->lo_flags = lo_flags; lo->lo_backing_file = file; - lo->transfer = NULL; - lo->ioctl = NULL; - lo->lo_sizelimit = 0; lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); - if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) + if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) blk_queue_write_cache(lo->lo_queue, true, false); - if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) { + if (config->block_size) + bsize = config->block_size; + else if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) /* In case of direct I/O, match underlying block size */ - unsigned short bsize = bdev_logical_block_size( - inode->i_sb->s_bdev); + bsize = bdev_logical_block_size(inode->i_sb->s_bdev); + else + bsize = 512; - blk_queue_logical_block_size(lo->lo_queue, bsize); - blk_queue_physical_block_size(lo->lo_queue, bsize); - blk_queue_io_min(lo->lo_queue, bsize); - } + blk_queue_logical_block_size(lo->lo_queue, bsize); + blk_queue_physical_block_size(lo->lo_queue, bsize); + blk_queue_io_min(lo->lo_queue, bsize); loop_update_dio(lo); - set_capacity(lo->lo_disk, size); - bd_set_size(bdev, size << 9); loop_sysfs_init(lo); - /* let user-space know about the new size */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + loop_set_size(lo, size); set_blocksize(bdev, S_ISBLK(inode->i_mode) ? block_size(inode->i_bdev) : PAGE_SIZE); @@ -1040,43 +1164,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, return error; } -static int -loop_release_xfer(struct loop_device *lo) -{ - int err = 0; - struct loop_func_table *xfer = lo->lo_encryption; - - if (xfer) { - if (xfer->release) - err = xfer->release(lo); - lo->transfer = NULL; - lo->lo_encryption = NULL; - module_put(xfer->owner); - } - return err; -} - -static int -loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, - const struct loop_info64 *i) -{ - int err = 0; - - if (xfer) { - struct module *owner = xfer->owner; - - if (!try_module_get(owner)) - return -EINVAL; - if (xfer->init) - err = xfer->init(lo, i); - if (err) - module_put(owner); - else - lo->lo_encryption = xfer; - } - return err; -} - static int __loop_clr_fd(struct loop_device *lo, bool release) { struct file *filp = NULL; @@ -1224,10 +1311,11 @@ static int loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; - struct loop_func_table *xfer; - kuid_t uid = current_uid(); struct block_device *bdev; + kuid_t uid = current_uid(); + int prev_lo_flags; bool partscan = false; + bool size_changed = false; err = mutex_lock_killable(&loop_ctl_mutex); if (err) @@ -1242,13 +1330,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) err = -ENXIO; goto out_unlock; } - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) { - err = -EINVAL; - goto out_unlock; - } if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { + size_changed = true; sync_blockdev(lo->lo_device); invalidate_bdev(lo->lo_device); } @@ -1256,79 +1341,44 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); - err = loop_release_xfer(lo); - if (err) + if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { + /* If any pages were dirtied after kill_bdev(), try again */ + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); goto out_unfreeze; + } - if (info->lo_encrypt_type) { - unsigned int type = info->lo_encrypt_type; + prev_lo_flags = lo->lo_flags; - if (type >= MAX_LO_CRYPT) { - err = -EINVAL; - goto out_unfreeze; - } - xfer = xfer_funcs[type]; - if (xfer == NULL) { - err = -EINVAL; - goto out_unfreeze; - } - } else - xfer = NULL; - - err = loop_init_xfer(lo, xfer, info); + err = loop_set_status_from_info(lo, info); if (err) goto out_unfreeze; - if (lo->lo_offset != info->lo_offset || - lo->lo_sizelimit != info->lo_sizelimit) { - /* kill_bdev should have truncated all the pages */ - if (lo->lo_device->bd_inode->i_mapping->nrpages) { - err = -EAGAIN; - pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", - __func__, lo->lo_number, lo->lo_file_name, - lo->lo_device->bd_inode->i_mapping->nrpages); - goto out_unfreeze; - } - if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { - err = -EFBIG; - goto out_unfreeze; - } + /* Mask out flags that can't be set using LOOP_SET_STATUS. */ + lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; + /* For those flags, use the previous values instead */ + lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; + /* For flags that can't be cleared, use previous values too */ + lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS; + + if (size_changed) { + loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, + lo->lo_backing_file); + loop_set_size(lo, new_size); } loop_config_discard(lo); - memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); - memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); - lo->lo_file_name[LO_NAME_SIZE-1] = 0; - lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; - - if (!xfer) - xfer = &none_funcs; - lo->transfer = xfer->transfer; - lo->ioctl = xfer->ioctl; - - if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != - (info->lo_flags & LO_FLAGS_AUTOCLEAR)) - lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; - - lo->lo_encrypt_key_size = info->lo_encrypt_key_size; - lo->lo_init[0] = info->lo_init[0]; - lo->lo_init[1] = info->lo_init[1]; - if (info->lo_encrypt_key_size) { - memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, - info->lo_encrypt_key_size); - lo->lo_key_owner = uid; - } - /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); - if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && - !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { - lo->lo_flags |= LO_FLAGS_PARTSCAN; + if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && + !(prev_lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; bdev = lo->lo_device; partscan = true; @@ -1492,10 +1542,15 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { static int loop_set_capacity(struct loop_device *lo) { + loff_t size; + if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; - return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); + size = get_loop_size(lo, lo->lo_backing_file); + loop_set_size(lo, size); + + return 0; } static int loop_set_dio(struct loop_device *lo, unsigned long arg) @@ -1519,8 +1574,9 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (lo->lo_state != Lo_bound) return -ENXIO; - if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) - return -EINVAL; + err = loop_validate_block_size(arg); + if (err) + return err; if (lo->lo_queue->limits.logical_block_size == arg) return 0; @@ -1578,11 +1634,31 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; + void __user *argp = (void __user *) arg; int err; switch (cmd) { - case LOOP_SET_FD: - return loop_set_fd(lo, mode, bdev, arg); + case LOOP_SET_FD: { + /* + * Legacy case - pass in a zeroed out struct loop_config with + * only the file descriptor set , which corresponds with the + * default parameters we'd have used otherwise. + */ + struct loop_config config; + + memset(&config, 0, sizeof(config)); + config.fd = arg; + + return loop_configure(lo, mode, bdev, &config); + } + case LOOP_CONFIGURE: { + struct loop_config config; + + if (copy_from_user(&config, argp, sizeof(config))) + return -EFAULT; + + return loop_configure(lo, mode, bdev, &config); + } case LOOP_CHANGE_FD: return loop_change_fd(lo, bdev, arg); case LOOP_CLR_FD: @@ -1590,21 +1666,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_STATUS: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { - err = loop_set_status_old(lo, - (struct loop_info __user *)arg); + err = loop_set_status_old(lo, argp); } break; case LOOP_GET_STATUS: - return loop_get_status_old(lo, (struct loop_info __user *) arg); + return loop_get_status_old(lo, argp); case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { - err = loop_set_status64(lo, - (struct loop_info64 __user *) arg); + err = loop_set_status64(lo, argp); } break; case LOOP_GET_STATUS64: - return loop_get_status64(lo, (struct loop_info64 __user *) arg); + return loop_get_status64(lo, argp); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: case LOOP_SET_BLOCK_SIZE: @@ -1756,6 +1830,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_CLR_FD: case LOOP_GET_STATUS64: case LOOP_SET_STATUS64: + case LOOP_CONFIGURE: arg = (unsigned long) compat_ptr(arg); /* fall through */ case LOOP_SET_FD: @@ -2291,6 +2366,8 @@ static void __exit loop_exit(void) range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + mutex_lock(&loop_ctl_mutex); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); @@ -2298,6 +2375,8 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); + + mutex_unlock(&loop_ctl_mutex); } module_init(loop_init); diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index d2d7dc9cd58d21fcfb897af6f97a50843fcffc63..4fef1fb918ece9ea9501bc3b9105baf99a1b802f 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1086,7 +1086,7 @@ static int null_handle_rq(struct nullb_cmd *cmd) len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, - req_op(rq) & REQ_FUA); + rq->cmd_flags & REQ_FUA); if (err) { spin_unlock_irq(&nullb->lock); return err; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1101290971699fb77059f29c5b8a3c69960aecc5..9f1265ce2e36507641c8a20daeb553733673d2d4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4124,6 +4124,9 @@ static ssize_t rbd_config_info_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return sprintf(buf, "%s\n", rbd_dev->config_info); } @@ -4235,6 +4238,9 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = rbd_dev_refresh(rbd_dev); if (ret) return ret; @@ -5846,6 +5852,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_client *rbdc; int rc; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!try_module_get(THIS_MODULE)) return -ENODEV; @@ -5995,6 +6004,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, bool force = false; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + dev_id = -1; opt_buf[0] = '\0'; sscanf(buf, "%d %5s", &dev_id, opt_buf); diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index 4568608ed6f59f334113c07284495d02499b7453..cee4fe2b2bb55b54b979f98e7037481f3fa77a50 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -288,6 +288,67 @@ static int bt_clk_disable(struct bt_power_clk_data *clk) return rc; } +static int bt_enable_bt_reset_gpios_safely(void) +{ + int rc = 0; + int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst; + int wl_reset_gpio = bt_power_pdata->wl_gpio_sys_rst; + + if (wl_reset_gpio >= 0) { + BT_PWR_INFO("%s: BTON:Turn Bt On", __func__); + BT_PWR_INFO("%s: wl-reset-gpio(%d) value(%d)", + __func__, wl_reset_gpio, + gpio_get_value(wl_reset_gpio)); + } + + if ((wl_reset_gpio < 0) || + ((wl_reset_gpio >= 0) && + gpio_get_value(wl_reset_gpio))) { + BT_PWR_INFO("%s: BTON: Asserting BT_EN", + __func__); + rc = gpio_direction_output(bt_reset_gpio, 1); + if (rc) { + BT_PWR_ERR("%s: Unable to set direction", + __func__); + return rc; + } + bt_power_src_status[BT_RESET_GPIO] = + gpio_get_value(bt_reset_gpio); + } + + if ((wl_reset_gpio >= 0) && + (gpio_get_value(wl_reset_gpio) == 0)) { + if (gpio_get_value(bt_reset_gpio)) { + BT_PWR_INFO("%s: Wlan Off and BT On too close", + __func__); + BT_PWR_INFO("%s: Reset BT_EN", __func__); + BT_PWR_INFO("%s: Enable it after delay", + __func__); + rc = gpio_direction_output(bt_reset_gpio, 0); + if (rc) { + BT_PWR_ERR("%s:Unable to set direction", + __func__); + return rc; + } + bt_power_src_status[BT_RESET_GPIO] = + gpio_get_value(bt_reset_gpio); + } + BT_PWR_INFO("%s: 100ms delay added", __func__); + BT_PWR_INFO("%s: for AON output to fully discharge", + __func__); + msleep(100); + rc = gpio_direction_output(bt_reset_gpio, 1); + if (rc) { + BT_PWR_ERR("%s: Unable to set direction", + __func__); + return rc; + } + bt_power_src_status[BT_RESET_GPIO] = + gpio_get_value(bt_reset_gpio); + } + return rc; +} + static int bt_configure_gpios(int on) { int rc = 0; @@ -312,7 +373,7 @@ static int bt_configure_gpios(int on) bt_power_src_status[BT_RESET_GPIO] = gpio_get_value(bt_reset_gpio); msleep(50); - BT_PWR_INFO("BTON:Turn Bt Off bt-reset-gpio(%d) value(%d)\n", + BT_PWR_INFO("BTON:Turn Bt Off bt-reset-gpio(%d) value(%d)", bt_reset_gpio, gpio_get_value(bt_reset_gpio)); if (bt_sw_ctrl_gpio >= 0) { BT_PWR_INFO("BTON:Turn Bt Off"); @@ -323,14 +384,12 @@ static int bt_configure_gpios(int on) bt_power_src_status[BT_SW_CTRL_GPIO]); } - rc = gpio_direction_output(bt_reset_gpio, 1); - + rc = bt_enable_bt_reset_gpios_safely(); if (rc) { - BT_PWR_ERR("Unable to set direction\n"); - return rc; + BT_PWR_ERR("%s:bt_enable_bt_reset_gpios_safely failed", + __func__); } - bt_power_src_status[BT_RESET_GPIO] = - gpio_get_value(bt_reset_gpio); + msleep(50); /* Check if SW_CTRL is asserted */ if (bt_sw_ctrl_gpio >= 0) { @@ -384,6 +443,18 @@ static int bt_configure_gpios(int on) return rc; } +static void bt_free_gpios(void) +{ + if (bt_power_pdata->bt_gpio_sys_rst > 0) + gpio_free(bt_power_pdata->bt_gpio_sys_rst); + if (bt_power_pdata->wl_gpio_sys_rst > 0) + gpio_free(bt_power_pdata->wl_gpio_sys_rst); + if (bt_power_pdata->bt_gpio_sw_ctrl > 0) + gpio_free(bt_power_pdata->bt_gpio_sw_ctrl); + if (bt_power_pdata->bt_gpio_debug > 0) + gpio_free(bt_power_pdata->bt_gpio_debug); +} + static int bluetooth_power(int on) { int rc = 0; @@ -547,12 +618,9 @@ static int bluetooth_power(int on) if (bt_power_pdata->bt_gpio_sys_rst > 0) bt_configure_gpios(on); gpio_fail: - if (bt_power_pdata->bt_gpio_sys_rst > 0) - gpio_free(bt_power_pdata->bt_gpio_sys_rst); - if (bt_power_pdata->bt_gpio_sw_ctrl > 0) - gpio_free(bt_power_pdata->bt_gpio_sw_ctrl); - if (bt_power_pdata->bt_gpio_debug > 0) - gpio_free(bt_power_pdata->bt_gpio_debug); + //Free Gpios + bt_free_gpios(); + if (bt_power_pdata->bt_chip_clk) bt_clk_disable(bt_power_pdata->bt_chip_clk); clk_fail: @@ -821,6 +889,12 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev) if (bt_power_pdata->bt_gpio_sys_rst < 0) BT_PWR_INFO("bt-reset-gpio not provided in devicetree"); + bt_power_pdata->wl_gpio_sys_rst = + of_get_named_gpio(pdev->dev.of_node, + "qca,wl-reset-gpio", 0); + if (bt_power_pdata->wl_gpio_sys_rst < 0) + BT_PWR_INFO("wl-reset-gpio not provided in devicetree"); + bt_power_pdata->bt_gpio_sw_ctrl = of_get_named_gpio(pdev->dev.of_node, "qca,bt-sw-ctrl-gpio", 0); diff --git a/drivers/bluetooth/btfm_slim_slave.h b/drivers/bluetooth/btfm_slim_slave.h index 67e08a6ce667f29d928145f12b3683b21d30e452..48b9e84c9f535261b227a56a87d496564807f50d 100644 --- a/drivers/bluetooth/btfm_slim_slave.h +++ b/drivers/bluetooth/btfm_slim_slave.h @@ -71,8 +71,8 @@ #define SLAVE_SB_PGD_PORT_TX_SCO 0 #define SLAVE_SB_PGD_PORT_TX1_FM 1 #define SLAVE_SB_PGD_PORT_TX2_FM 2 -#define CHRKVER3_SB_PGD_PORT_TX1_FM 4 -#define CHRKVER3_SB_PGD_PORT_TX2_FM 5 +#define CHRKVER3_SB_PGD_PORT_TX1_FM 5 +#define CHRKVER3_SB_PGD_PORT_TX2_FM 4 #define SLAVE_SB_PGD_PORT_RX_SCO 16 #define SLAVE_SB_PGD_PORT_RX_A2P 17 diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 8d1cd2479e36f5dd5c0d057b3cdf4dd92d7ef019..cc51395d8b0e566b8366e0bb5d8c20c33c0d76e5 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -343,11 +343,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, * the end. */ len = patch_length; - buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, - GFP_KERNEL); + buf = kvmalloc(patch_length, GFP_KERNEL); if (!buf) return -ENOMEM; + memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); *_buf = buf; @@ -415,8 +415,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) if (ret < 0) return ret; ret = fw->size; - *buff = kmemdup(fw->data, ret, GFP_KERNEL); - if (!*buff) + *buff = kvmalloc(fw->size, GFP_KERNEL); + if (*buff) + memcpy(*buff, fw->data, ret); + else ret = -ENOMEM; release_firmware(fw); @@ -454,14 +456,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, goto out; if (btrtl_dev->cfg_len > 0) { - tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); + tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); if (!tbuff) { ret = -ENOMEM; goto out; } memcpy(tbuff, fw_data, ret); - kfree(fw_data); + kvfree(fw_data); memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); ret += btrtl_dev->cfg_len; @@ -474,7 +476,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, ret = rtl_download_firmware(hdev, fw_data, ret); out: - kfree(fw_data); + kvfree(fw_data); return ret; } @@ -501,8 +503,8 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) void btrtl_free(struct btrtl_device_info *btrtl_dev) { - kfree(btrtl_dev->fw_data); - kfree(btrtl_dev->cfg_data); + kvfree(btrtl_dev->fw_data); + kvfree(btrtl_dev->cfg_data); kfree(btrtl_dev); } EXPORT_SYMBOL_GPL(btrtl_free); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 8eede1197cd2ee7f6e32042d29fff7a660a14e92..5a68cd4dd71cb3bc7c5c27b6b50954b2cd449c69 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -803,7 +803,7 @@ static int h5_serdev_probe(struct serdev_device *serdev) if (!h5) return -ENOMEM; - set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags); h5->hu = &h5->serdev_hu; h5->serdev_hu.serdev = serdev; diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 46e20444ba19bdbe85163eddac4c6d93b4cbab3c..d3fb0d657fa5280bf6f8bf56c9edffec8efba2aa 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -369,7 +369,8 @@ void hci_uart_unregister_device(struct hci_uart *hu) struct hci_dev *hdev = hu->hdev; clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hci_unregister_dev(hdev); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); hci_free_dev(hdev); cancel_work_sync(&hu->write_work); diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index e31c02dc777098ca99901e033a34a5b66f63dbb0..cbd970fb02f18f7b5e0f7cff23c248baa6a1f926 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, return 0; } +/* + * Released firmware describes the IO port max address as 0x3fff, which is + * the max host bus address. Fixup to a proper range. This will probably + * never be fixed in firmware. + */ +static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, + struct resource *r) +{ + if (r->end != 0x3fff) + return; + + if (r->start == 0xe4) + r->end = 0xe4 + 0x04 - 1; + else if (r->start == 0x2f8) + r->end = 0x2f8 + 0x08 - 1; + else + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", + r); +} + /* * hisi_lpc_acpi_set_io_res - set the resources for a child * @child: the device node to be updated the I/O resource @@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, return -ENOMEM; } count = 0; - list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + list_for_each_entry(rentry, &resource_list, node) { + resources[count] = *rentry->res; + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); + count++; + } acpi_dev_free_resource_list(&resource_list); diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 5a0b4c8712ee0a76115ab4b14731df1180dc2211..7218b5b8890d15dec4ff2113c9e1dd18f7d45d0f 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -476,6 +476,8 @@ struct fastrpc_file { /* Flag to enable PM wake/relax voting for every remote invoke */ int wake_enable; uint32_t ws_timeout; + /* To indicate attempt has been made to allocate memory for debug_buf */ + int debug_buf_alloced_attempted; }; static struct fastrpc_apps gfa; @@ -4071,6 +4073,14 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl) if (debugfs_root) { buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1; + + spin_lock(&fl->hlock); + if (fl->debug_buf_alloced_attempted) { + spin_unlock(&fl->hlock); + return err; + } + fl->debug_buf_alloced_attempted = 1; + spin_unlock(&fl->hlock); fl->debug_buf = kzalloc(buf_size, GFP_KERNEL); if (!fl->debug_buf) { err = -ENOMEM; diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index b161bdf600004395ce56e71ba79171a8c769f512..0941d38b2d32f08e0520a3f782fd989f75455727 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void) if (intel_private.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) { + __free_page(page); return -EINVAL; + } intel_private.scratch_page_dma = dma_addr; } else diff --git a/drivers/char/random.c b/drivers/char/random.c index e529698cb53fc3345c2480842245ec4c43e53283..bef8c75f0429c6f081a10b6c713f20582c8812c6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1142,14 +1142,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies); - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; @@ -1249,6 +1249,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 8eeb4190207d1ac7ac024433f043ae59f2b0cd71..dce22b7fc54493265a489f796b5362cc69937081 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -776,17 +776,21 @@ static int __init tlclk_init(void) { int ret; + telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); + + alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); + if (!alarm_events) { + ret = -ENOMEM; + goto out1; + } + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); if (ret < 0) { printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); + kfree(alarm_events); return ret; } tlclk_major = ret; - alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); - if (!alarm_events) { - ret = -ENOMEM; - goto out1; - } /* Read telecom clock IRQ number (Set by BIOS) */ if (!request_region(TLCLK_BASE, 8, "telco_clock")) { @@ -795,7 +799,6 @@ static int __init tlclk_init(void) ret = -EBUSY; goto out2; } - telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", @@ -836,8 +839,8 @@ static int __init tlclk_init(void) release_region(TLCLK_BASE, 8); out2: kfree(alarm_events); -out1: unregister_chrdev(tlclk_major, "telco_clock"); +out1: return ret; } diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 4946c5b37d04df37a5afc841959b8f5cc465bc99..f79f877942733b32b0ed119f9a4bcfe97f54d93e 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -276,13 +276,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->cdev.owner = THIS_MODULE; chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 289221d653cb2fa40a279966aa024d03af5ddf7a..b9a30f0b882574305898fbe2755a82bd126f69bf 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -188,6 +188,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; enum tpm_chip_flags { @@ -278,6 +279,9 @@ struct tpm_output_header { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -595,7 +599,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, u8 *cmd); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index d2e101b32482f83ca85c2ca5c0f5cac85f46efe5..9f4e22dcde2704732c98a34a8f2adc0b10cd3ec8 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -43,18 +43,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -276,8 +279,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -456,7 +461,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -474,9 +479,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -522,8 +526,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; } diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 763fc7e6c0058825761b3730917c1d44338f3a0f..20f27100708bda2d61c4a59940fabdef20cc9ab3 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -26,6 +26,7 @@ #include "tpm.h" #define ACPI_SIG_TPM2 "TPM2" +#define TPM_CRB_MAX_RESOURCES 3 static const guid_t crb_acpi_start_guid = GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, @@ -95,7 +96,6 @@ enum crb_status { struct crb_priv { u32 sm; const char *hid; - void __iomem *iobase; struct crb_regs_head __iomem *regs_h; struct crb_regs_tail __iomem *regs_t; u8 __iomem *cmd; @@ -438,21 +438,27 @@ static const struct tpm_class_ops tpm_crb = { static int crb_check_resource(struct acpi_resource *ares, void *data) { - struct resource *io_res = data; + struct resource *iores_array = data; struct resource_win win; struct resource *res = &(win.res); + int i; if (acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win)) { - *io_res = *res; - io_res->name = NULL; + for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) { + if (resource_type(iores_array + i) != IORESOURCE_MEM) { + iores_array[i] = *res; + iores_array[i].name = NULL; + break; + } + } } return 1; } -static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, - struct resource *io_res, u64 start, u32 size) +static void __iomem *crb_map_res(struct device *dev, struct resource *iores, + void __iomem **iobase_ptr, u64 start, u32 size) { struct resource new_res = { .start = start, @@ -464,10 +470,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); - if (!resource_contains(io_res, &new_res)) + if (!iores) return devm_ioremap_resource(dev, &new_res); - return priv->iobase + (new_res.start - io_res->start); + if (!*iobase_ptr) { + *iobase_ptr = devm_ioremap_resource(dev, iores); + if (IS_ERR(*iobase_ptr)) + return *iobase_ptr; + } + + return *iobase_ptr + (new_res.start - iores->start); } /* @@ -494,9 +506,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { - struct list_head resources; - struct resource io_res; + struct list_head acpi_resource_list; + struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} }; + void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL}; struct device *dev = &device->dev; + struct resource *iores; + void __iomem **iobase_ptr; + int i; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; @@ -505,21 +521,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, u32 rsp_size; int ret; - INIT_LIST_HEAD(&resources); - ret = acpi_dev_get_resources(device, &resources, crb_check_resource, - &io_res); + INIT_LIST_HEAD(&acpi_resource_list); + ret = acpi_dev_get_resources(device, &acpi_resource_list, + crb_check_resource, iores_array); if (ret < 0) return ret; - acpi_dev_free_resource_list(&resources); + acpi_dev_free_resource_list(&acpi_resource_list); - if (resource_type(&io_res) != IORESOURCE_MEM) { + if (resource_type(iores_array) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; + } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) == + IORESOURCE_MEM) { + dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n"); + memset(iores_array + TPM_CRB_MAX_RESOURCES, + 0, sizeof(*iores_array)); + iores_array[TPM_CRB_MAX_RESOURCES].flags = 0; } - priv->iobase = devm_ioremap_resource(dev, &io_res); - if (IS_ERR(priv->iobase)) - return PTR_ERR(priv->iobase); + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (buf->control_address >= iores_array[i].start && + buf->control_address + sizeof(struct crb_regs_tail) - 1 <= + iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address, + sizeof(struct crb_regs_tail)); + + if (IS_ERR(priv->regs_t)) + return PTR_ERR(priv->regs_t); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older @@ -527,9 +563,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { - if (buf->control_address == io_res.start + + if (iores && + buf->control_address == iores->start + sizeof(*priv->regs_h)) - priv->regs_h = priv->iobase; + priv->regs_h = *iobase_ptr; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } @@ -538,13 +575,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, if (ret) return ret; - priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, - sizeof(struct crb_regs_tail)); - if (IS_ERR(priv->regs_t)) { - ret = PTR_ERR(priv->regs_t); - goto out_relinquish_locality; - } - /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. @@ -556,13 +586,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; - cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, - ioread32(&priv->regs_t->ctrl_cmd_size)); + cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; iores_array[i].end; ++i) { + if (cmd_pa >= iores_array[i].start && + cmd_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); - priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); + priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; @@ -570,11 +613,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); - rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, - ioread32(&priv->regs_t->ctrl_rsp_size)); + rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (rsp_pa >= iores_array[i].start && + rsp_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size); if (cmd_pa != rsp_pa) { - priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); + priv->rsp = crb_map_res(dev, iores, iobase_ptr, + rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 569e93e1f06ccdc978d30ee6035aa850dc2f5e72..3ba67bc6baba01c545e13ad518ea5e58846213b3 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -588,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) */ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { ibmvtpm_crq_process(crq, ibmvtpm); + wake_up_interruptible(&ibmvtpm->crq_queue.wq); crq->valid = 0; smp_wmb(); } @@ -635,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, } crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); + init_waitqueue_head(&crq_q->wq); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); @@ -687,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (rc) goto init_irq_cleanup; + if (!wait_event_timeout(ibmvtpm->crq_queue.wq, + ibmvtpm->rtce_buf != NULL, + HZ)) { + dev_err(dev, "CRQ response timed out\n"); + goto init_irq_cleanup; + } + return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index 91dfe766d08007e9dfa5fa91e7d5a132a8b85830..4f6a124601db414dcbbf5a32fb83569db4a34ffe 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue { struct ibmvtpm_crq *crq_addr; u32 index; u32 num_entry; + wait_queue_head_t wq; }; struct ibmvtpm_dev { diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 1a0e97a5da5a452d8aa7b486392ebcad4e1654bc..162fb16243d030655dc53100ff2ddef72711f395 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -22,7 +22,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index a985bf5e1ac61e6df1a56e153ed75640e0161072..c65d30bba700540611066123a86f74d4564ea05a 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = { static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) { int ret; + unsigned long min_rate, max_rate; + struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, @@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) sclk->hw.init = &init; ret = devm_clk_hw_register(dev, &sclk->hw); - if (!ret) - clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate, - sclk->info->range.max_rate); + if (ret) + return ret; + + if (sclk->info->rate_discrete) { + int num_rates = sclk->info->list.num_rates; + + if (num_rates <= 0) + return -EINVAL; + + min_rate = sclk->info->list.rates[0]; + max_rate = sclk->info->list.rates[num_rates - 1]; + } else { + min_rate = sclk->info->range.min_rate; + max_rate = sclk->info->range.max_rate; + } + + clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate); return ret; } diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index f77d4329183e3468ec675632c1968b021fa67552..4477e0c4650fd66273c62cfa50a4bcae7e2272f4 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev, parent_name = postdiv_name; } - pllen = kzalloc(sizeof(*pllout), GFP_KERNEL); + pllen = kzalloc(sizeof(*pllen), GFP_KERNEL); if (!pllen) { ret = -ENOMEM; goto err_unregister_postdiv; diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index 04f4f3739e3bed0f0e90d77b77279a50837e03ee..8d11d76e1db7c62219791e5af6e0b19dbd655590 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -144,7 +144,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" }; PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" }; PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" }; -PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; +PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" }; PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" }; PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" }; PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" }; diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 442309b5692033f6527248cab302ee42af327817..8086756e7f076f5af6daa02a4f4d2bda0c2aeb95 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -1072,7 +1072,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, @@ -1113,7 +1113,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { 0), GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index 0cd11e6893afa282c0d9c32bb386fcae5e21e065..25ed60776560eeee48eef64669b69eb8a05d445c 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -136,7 +136,7 @@ static void __init atlas6_clk_init(struct device_node *np) for (i = pll1; i < maxclk; i++) { atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]); - BUG_ON(!atlas6_clks[i]); + BUG_ON(IS_ERR(atlas6_clks[i])); } clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu"); clk_register_clkdev(atlas6_clks[io], NULL, "io"); diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index dc68c80c26aba31090775246fde45134655174a9..e124b13b396be2a7ca11ec6d0b3f1f3831311c70 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -38,7 +38,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* read VCO1 reg for numerator and denominator */ reg = readl(socfpgaclk->hw.reg); refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate / refdiv; + + vco_freq = parent_rate; + do_div(vco_freq, refdiv); /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5bed36e129516a91a46d7edbb8950da25507a3bd..7327e90735c895cd3f081fcfae98ab89815c4054 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), 0, 0, 2, 0xB0, 1}, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, - ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, + ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2}, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3}, { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index 4d59fda9af809b723e1742bb76c480f75e2e6b0a..878ca05b3f41881f13f213eda522a77ca2add48a 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, if (err) return NULL; } else { - const char *base_name = "adpll"; - char *buf; - - buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + - strlen(postfix), GFP_KERNEL); - if (!buf) - return NULL; - sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); - name = buf; + name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", + d->pa, postfix); } return name; diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index 1d740a8c42ab32ed9d36dd6d38635d6032a48215..47114c2a7cb5448739c108990cf476403b3f1d4a 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node) return PTR_ERR(clk); } - ret = ENXIO; + ret = -ENXIO; base = of_iomap(node, 0); if (!base) { pr_err("failed to map registers for clockevent\n"); diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 0df16eb1eb3cebcac2953fa52f70bf13c1a6d0b7..c5f98cafc25c9333d3d5ec614a8a62a57c622c90 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -458,6 +458,7 @@ static int __init armada37xx_cpufreq_driver_init(void) /* Now that everything is setup, enable the DVFS at hardware level */ armada37xx_cpufreq_enable_dvfs(nb_pm_base); + memset(&pdata, 0, sizeof(pdata)); pdata.suspend = armada37xx_cpufreq_suspend; pdata.resume = armada37xx_cpufreq_resume; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e7b3d4ed8eff44a367be371237e4de1c456c306f..864a7e8ebdfc342dfe43fe8d05c0813fe2e84261 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -712,7 +712,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); @@ -1431,6 +1431,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + cpu->pstate.turbo_pstate = phy_max; } else { cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; } @@ -2324,9 +2325,15 @@ static int intel_pstate_update_status(const char *buf, size_t size) { int ret; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; + + if (hwp_active) + return -EBUSY; + + return intel_pstate_unregister_driver(); + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 687c92ef76440bb96b6ae5c8662f03f72dbdc3b3..79942f70575760adc1836997c2dbedd93b4410fe 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -903,6 +903,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { void powernv_cpufreq_work_fn(struct work_struct *work) { struct chip *chip = container_of(work, struct chip, throttle); + struct cpufreq_policy *policy; unsigned int cpu; cpumask_t mask; @@ -917,12 +918,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) chip->restore = false; for_each_cpu(cpu, &mask) { int index; - struct cpufreq_policy policy; - cpufreq_get_policy(&policy, cpu); - index = cpufreq_table_find_index_c(&policy, policy.cur); - powernv_cpufreq_target_index(&policy, index); - cpumask_andnot(&mask, &mask, policy.cpus); + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + index = cpufreq_table_find_index_c(policy, policy->cur); + powernv_cpufreq_target_index(policy, index); + cpumask_andnot(&mask, &mask, policy->cpus); + cpufreq_cpu_put(policy); } out: put_online_cpus(); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index cb241f232b112ebb38588c9daecb5ccb2f0c25ce..219704cd87ebf1b36f133fa67dd91cbcf8856e62 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -150,7 +150,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 0b1fc5664b1d8a23ba04a8b2056a11709a036aba..c2736274ad63499787ec2d8bbcf9f65e79ff1404 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2980,7 +2980,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, ctx->enckeylen = keylen; ctx->authkeylen = 0; - memcpy(ctx->enckey, key, ctx->enckeylen); switch (ctx->enckeylen) { case AES_KEYSIZE_128: @@ -2996,6 +2995,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, goto badkey; } + memcpy(ctx->enckey, key, ctx->enckeylen); + flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); @@ -3056,6 +3057,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3084,6 +3089,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3113,6 +3122,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < CCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = CCM_ESP_SALT_SIZE; ctx->salt_offset = CCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 600336d169a9ee5298a90e2d9eb967021bd955a4..cd4d60d318ba91a41750a373dc447b1549dc3371 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -205,6 +205,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index b0ba4331944b52b9093636e13b88b3e071ae182b..43fe69d0981acfea3c4efba583113aaff06a5245 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -136,7 +136,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -153,7 +153,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -170,7 +170,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -198,7 +198,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -434,7 +434,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -456,7 +456,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 80ee074c6e0cbec33762c59bd79ef57449351afb..09930d95ad24be22a13a9abd78a0e76ca3d6a325 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -65,6 +65,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 7442b0422f8ac032f3c0c7ee87f352b522671927..bd43b5c1450c18c2ecb131d3165c181a3b90d617 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -471,6 +471,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 330853a2702f027ac82b5bab4a098ab4f2e8d0d7..626b643d610ebc6b925b283daf1e6735cc16c256 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -67,7 +67,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -96,6 +96,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -108,14 +109,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -304,7 +319,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -336,11 +351,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -370,7 +385,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -391,7 +406,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -1783,8 +1798,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) LSB_ITEM_SIZE); break; default: + kfree(hmac_buf); ret = -EINVAL; - goto e_ctx; + goto e_data; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); @@ -2033,7 +2049,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2059,8 +2075,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 28a5b8b38fa2f57c2e680984c70d13b6639fa06f..1bcb6f0157b077b88e62200591320302d22491d4 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -137,7 +137,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); @@ -149,10 +148,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + } + /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_shash; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -164,21 +172,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_shash: + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index ce81aa96afc3aa96d0be00efdddebfac51166dd4..1bc4cb3bd8cdcaa86e485db3278612b298dec63a 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2419,8 +2419,9 @@ int chcr_aead_dma_map(struct device *dev, else reqctx->b0_dma = 0; if (req->src == req->dst) { - error = dma_map_sg(dev, req->src, sg_nents(req->src), - DMA_BIDIRECTIONAL); + error = dma_map_sg(dev, req->src, + sg_nents_for_len(req->src, dst_size), + DMA_BIDIRECTIONAL); if (!error) goto err; } else { diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 1e0cc96306dd7bda5a538639c7f2588451629fb0..2c1f3ddb0cc79e70dfc7643c0a984e99763d3542 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1449,7 +1449,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1482,7 +1482,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1627,7 +1627,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, break; } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); @@ -1759,7 +1759,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1790,7 +1790,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index bf9658800bda5b0b55525b89e87fb1656cc5b401..3e3cc28d5cfe3552440000815108ea146ee00cb5 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -553,14 +554,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -576,7 +577,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -584,12 +585,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -597,7 +598,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -638,13 +639,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -676,7 +677,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -687,7 +688,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -728,6 +729,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -736,13 +738,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -750,7 +752,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -783,7 +785,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 1138e41d680599f34f1ffb86c2c1b8ee5cde208c..883342a45be7fc5a71db7736d178cb2f574d89d4 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -828,6 +828,11 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_bulk_req *msg; int digst_size = crypto_aead_authsize(aead_tfm); int ret, ctr = 0; + u32 cipher_len; + + cipher_len = areq->cryptlen - digst_size; + if (cipher_len % AES_BLOCK_SIZE != 0) + return -EINVAL; ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) @@ -842,7 +847,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; - cipher_param->cipher_length = areq->cryptlen - digst_size; + cipher_param->cipher_length = cipher_len; cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); @@ -871,6 +876,9 @@ static int qat_alg_aead_enc(struct aead_request *areq) uint8_t *iv = areq->iv; int ret, ctr = 0; + if (areq->cryptlen % AES_BLOCK_SIZE != 0) + return -EINVAL; + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) return ret; diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24fa390acf98f5815fe4e5d86e073a5..aeb03081415cbd9dba9a48a9a0da65aa6fdc8fe5 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 06768074d2d822cb0e837e953edb68ddcb02917d..479d9575e1245892312facdf1bc4c4f5803e5120 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -80,6 +80,8 @@ #define KHZ 1000 +#define KHZ_MAX (ULONG_MAX / KHZ) + /* Assume that the bus is saturated if the utilization is 25% */ #define BUS_SATURATION_RATIO 25 @@ -180,7 +182,7 @@ struct tegra_actmon_emc_ratio { }; static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { - { 1400000, ULONG_MAX }, + { 1400000, KHZ_MAX }, { 1200000, 750000 }, { 1100000, 600000 }, { 1000000, 500000 }, diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1551ca7df394113fca8fc923fb1fdb98d63b13aa..8586cc05def17aef5e2287dfd3b619870e9be00c 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -244,6 +244,30 @@ void dma_fence_free(struct dma_fence *fence) } EXPORT_SYMBOL(dma_fence_free); +static bool __dma_fence_enable_signaling(struct dma_fence *fence) +{ + bool was_set; + + lockdep_assert_held(fence->lock); + + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return false; + + if (!was_set && fence->ops->enable_signaling) { + trace_dma_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + dma_fence_signal_locked(fence); + return false; + } + } + + return true; +} + /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable @@ -256,19 +280,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; - if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && - fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - spin_lock_irqsave(fence->lock, flags); - - if (!fence->ops->enable_signaling(fence)) - dma_fence_signal_locked(fence); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return; - spin_unlock_irqrestore(fence->lock, flags); - } + spin_lock_irqsave(fence->lock, flags); + __dma_fence_enable_signaling(fence); + spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); @@ -302,7 +319,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, { unsigned long flags; int ret = 0; - bool was_set; if (WARN_ON(!fence || !func)) return -EINVAL; @@ -314,25 +330,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, spin_lock_irqsave(fence->lock, flags); - was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - ret = -ENOENT; - else if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - ret = -ENOENT; - } - } - - if (!ret) { + if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); - } else + } else { INIT_LIST_HEAD(&cb->node); + ret = -ENOENT; + } + spin_unlock_irqrestore(fence->lock, flags); return ret; @@ -432,7 +437,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; - bool was_set; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return ret; @@ -444,21 +448,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) goto out; } - was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!__dma_fence_enable_signaling(fence)) goto out; - if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - goto out; - } - } - if (!timeout) { ret = 0; goto out; diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 4a748c3435d7d0b5e4ddd5a5a5a70771d9b7ba3c..8d99c84361cbbd8f2dc5777ec45ed493fbdd65bb 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -131,11 +131,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); - return; + break; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } + + acpi_put_table((struct acpi_table_header *)csrt); } /** diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index dbc51154f12294b418c72f73459f1a02d25b173a..86427f6ba78cb91370b889de08bba74dec77c33c 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1677,6 +1677,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index b7ec56ae02a6ec9cf478599b0b0206f82b425849..fca232b1d4a643dfc4866ce2629d1f9d3202ef38 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); - goto err_unregister; + goto err_free; } platform_set_drvdata(pdev, hsdma); @@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev) return 0; +err_free: + of_dma_controller_free(pdev->dev.of_node); err_unregister: dma_async_device_unregister(dd); diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 91fd395c90c4cf99cd444fdede5d42d08ecb529d..8344a60c2131b43abe55680290354afac51ad62f 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index bc8050c025b7b8e323c04f298c850fab138b0a67..c564df713efc34f2544612c0f8a59920c79df02b 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2769,6 +2769,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. @@ -2776,7 +2777,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 4903a408fc146eae9c5b3ac00a6e00e87046b4d6..ac7af440f8658dc123f1aab4f9e554e14b07c9cf 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -494,8 +494,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c) spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_dma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_dma_stop(chan); chan->desc = NULL; } @@ -551,6 +553,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) if (!vdesc) return; + list_del(&vdesc->node); + chan->desc = to_stm32_dma_desc(vdesc); chan->next_sg = 0; } @@ -628,7 +632,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) } else { chan->busy = false; if (chan->next_sg == chan->desc->num_sgs) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; } diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 8c3c3e5b812a85d66c1b6b04797cdd2bf5ff6e64..9c6867916e89085a0c3442a7749f7fc2db3c90a5 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1137,6 +1137,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) return; } + list_del(&vdesc->node); + chan->desc = to_stm32_mdma_desc(vdesc); hwdesc = chan->desc->node[0].hwdesc; chan->curr_hwdesc = 0; @@ -1252,8 +1254,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c) LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_mdma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_mdma_stop(chan); chan->desc = NULL; } vchan_get_all_descriptors(&chan->vchan, &head); @@ -1341,7 +1345,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; chan->busy = false; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 15481aeaeecd17b56646b2222db84e53296fa296..5ccd24a46e38107a89a3c65f1a86c62a130eff93 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1225,8 +1225,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); - if (tdc->busy) - tegra_dma_terminate_all(dc); + tegra_dma_terminate_all(dc); spin_lock_irqsave(&tdc->lock, flags); list_splice_init(&tdc->pending_sg_req, &sg_req_list); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 73de6a6179fcd106f7fbaff8de4cede47304f665..e002ff8413e2a382c7624ebd5873c88834a78eac 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -127,10 +127,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index eb53260813c2e681b55a4ec88e2b1649130e661b..e869be0c79a22b16970429a1319133d10863504e 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -301,6 +301,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) /* Error exit stack */ err_kobj_reg: + kobject_put(&edac_dev->kobj); module_put(edac_dev->owner); err_out: diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 72c9eb9fdffbe497142d542b777997cddfb1c1b7..53042af7262e2a6aca5843c85dcd1cacbe0a658c 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void) /* Error unwind statck */ kobject_init_and_add_fail: - kfree(edac_pci_top_main_kobj); + kobject_put(edac_pci_top_main_kobj); kzalloc_fail: module_put(THIS_MODULE); diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index aac9b9b360b805e6af9e6ffca55b02e73052fe70..9e4781a807cfac9a67b01350a6f1cf18cda55da2 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -147,6 +147,8 @@ (n << (28 + (2 * skl) - PAGE_SHIFT)) static int nr_channels; +static struct pci_dev *mci_pdev; +static int ie31200_registered = 1; struct ie31200_priv { void __iomem *window; @@ -518,12 +520,16 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) static int ie31200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - edac_dbg(0, "MC:\n"); + int rc; + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; + rc = ie31200_probe1(pdev, ent->driver_data); + if (rc == 0 && !mci_pdev) + mci_pdev = pci_dev_get(pdev); - return ie31200_probe1(pdev, ent->driver_data); + return rc; } static void ie31200_remove_one(struct pci_dev *pdev) @@ -532,6 +538,8 @@ static void ie31200_remove_one(struct pci_dev *pdev) struct ie31200_priv *priv; edac_dbg(0, "\n"); + pci_dev_put(mci_pdev); + mci_pdev = NULL; mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; @@ -583,17 +591,53 @@ static struct pci_driver ie31200_driver = { static int __init ie31200_init(void) { + int pci_rc, i; + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); - return pci_register_driver(&ie31200_driver); + pci_rc = pci_register_driver(&ie31200_driver); + if (pci_rc < 0) + goto fail0; + + if (!mci_pdev) { + ie31200_registered = 0; + for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) { + mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor, + ie31200_pci_tbl[i].device, + NULL); + if (mci_pdev) + break; + } + if (!mci_pdev) { + edac_dbg(0, "ie31200 pci_get_device fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); + if (pci_rc < 0) { + edac_dbg(0, "ie31200 init fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + } + return 0; + +fail1: + pci_unregister_driver(&ie31200_driver); +fail0: + pci_dev_put(mci_pdev); + + return pci_rc; } static void __exit ie31200_exit(void) { edac_dbg(3, "MC:\n"); pci_unregister_driver(&ie31200_driver); + if (!ie31200_registered) + ie31200_remove_one(mci_pdev); } module_init(ie31200_init); diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 87f737e01473c357212def7950a2868a7bbac299..041f8152272bf51242fe683ea71a7ceddc4ddd9e 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) for (i = 0; i < num_domains; i++, scmi_pd++) { u32 state; - domains[i] = &scmi_pd->genpd; + if (handle->power_ops->state_get(handle, i, &state)) { + dev_warn(dev, "failed to get state for domain %d\n", i); + continue; + } scmi_pd->domain = i; scmi_pd->handle = handle; @@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; - if (handle->power_ops->state_get(handle, i, &state)) { - dev_warn(dev, "failed to get state for domain %d\n", i); - continue; - } - pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); + + domains[i] = &scmi_pd->genpd; } scmi_pd_data->domains = domains; diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 05b528c7ed8fdabecc46fcf15a10c201c40c8450..e809f4d9a9e93433712947d6a221ef9a50d82a90 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -410,14 +410,19 @@ int sdei_event_enable(u32 event_num) return -ENOENT; } - spin_lock(&sdei_list_lock); - event->reenable = true; - spin_unlock(&sdei_list_lock); + cpus_read_lock(); if (event->type == SDEI_EVENT_TYPE_SHARED) err = sdei_api_event_enable(event->event_num); else err = sdei_do_cross_call(_local_event_enable, event); + + if (!err) { + spin_lock(&sdei_list_lock); + event->reenable = true; + spin_unlock(&sdei_list_lock); + } + cpus_read_unlock(); mutex_unlock(&sdei_events_lock); return err; @@ -619,21 +624,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - + cpus_read_lock(); err = _sdei_event_register(event); if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); + } else { + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); } + cpus_read_unlock(); } while (0); mutex_unlock(&sdei_events_lock); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index de1bc38ab39fbf02f2277943b89a7ace319f0462..a8180f9090fae3d441369e474fef89aa9a284726 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -359,6 +359,7 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); + destroy_workqueue(efi_rts_wq); return -ENOMEM; } @@ -395,6 +396,7 @@ static int __init efisubsys_init(void) generic_ops_unregister(); err_put: kobject_put(efi_kobj); + destroy_workqueue(efi_rts_wq); return error; } diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8f5229da95bbaf6fcc9fc55349d27..6945c3c966375a2a7c96991d3094fad5dfd194c7 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 945bd13e5e7918c014a1d44792c0d67393f50629..cab324eb7df24daff30c52ab6e2fe3a2c59d4f24 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -367,6 +367,7 @@ static int __init gpio_mockup_init(void) err = platform_driver_register(&gpio_mockup_driver); if (err) { gpio_mockup_err("error registering platform driver\n"); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return err; } @@ -386,6 +387,7 @@ static int __init gpio_mockup_init(void) gpio_mockup_err("error registering device"); platform_driver_unregister(&gpio_mockup_driver); gpio_mockup_unregister_pdevs(); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return PTR_ERR(pdev); } diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index 55072d2b367fad89ffb099dc20abfa32381f5804..4d53347adcafa225818963c4269a6ecc0e41dd7d 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data, sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 91a8ef8e7f3fd18272d9ce5d071eeb640472236d..1436098b161495ca9550963eaed9c59a0d80dbb0 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) continue; tc3589x_gpio->oldregs[i][j] = new; - tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); + tc3589x_reg_write(tc3589x, regmap[i] + j, new); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 71efcf38f11beb2c628cee39ff4bae7fa50bedbd..94cd8a2610912d0eb813909de7e9ce3e02bdc1b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -276,7 +276,7 @@ static int acp_hw_init(void *handle) u32 val = 0; u32 count = 0; struct device *dev; - struct i2s_platform_data *i2s_pdata; + struct i2s_platform_data *i2s_pdata = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -317,20 +317,21 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); - if (adev->acp.acp_cell == NULL) - return -ENOMEM; + if (adev->acp.acp_cell == NULL) { + r = -ENOMEM; + goto failure; + } adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { - kfree(adev->acp.acp_cell); - return -ENOMEM; + r = -ENOMEM; + goto failure; } i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { - kfree(adev->acp.acp_res); - kfree(adev->acp.acp_cell); - return -ENOMEM; + r = -ENOMEM; + goto failure; } switch (adev->asic_type) { @@ -427,7 +428,7 @@ static int acp_hw_init(void *handle) r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) - return r; + goto failure; if (adev->asic_type != CHIP_STONEY) { for (i = 0; i < ACP_DEVS ; i++) { @@ -435,7 +436,7 @@ static int acp_hw_init(void *handle) r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); if (r) { dev_err(dev, "Failed to add dev to genpd\n"); - return r; + goto failure; } } } @@ -454,7 +455,8 @@ static int acp_hw_init(void *handle) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto failure; } udelay(100); } @@ -471,7 +473,8 @@ static int acp_hw_init(void *handle) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto failure; } udelay(100); } @@ -480,6 +483,13 @@ static int acp_hw_init(void *handle) val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); return 0; + +failure: + kfree(i2s_pdata); + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_cell); + kfree(adev->acp.acp_genpd); + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index a5df80d50d447a5dc0dd3a9a29c8ad21677e485c..6cf3dd5edffda3c4e05ae1b9dfa63a688be117ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -191,30 +191,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); - adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index c770d73352a793fc7c91d871b100a503870ca701..c15286858f0bf1662879f5911f794519a5ac5f51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -718,8 +718,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -856,8 +858,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = amdgpu_connector_best_single_encoder(connector); @@ -979,8 +983,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { @@ -1329,8 +1335,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 686a26de50f91e816471548bf3c1a0fc3f86db86..5f85c9586cba1a31cebd570d07af978657ca2563 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -275,7 +275,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_crtc_helper_set_config(set, ctx); @@ -299,6 +299,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev->have_disp_power_ref = false; } +out: /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5e29f14f4b301bea74c28153724668c9c6c0bef3..63b1e325b45c57444c8a42ccb631131b9ac6094a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1085,11 +1085,12 @@ long amdgpu_drm_ioctl(struct file *filp, dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_ioctl(filp, cmd, arg); pm_runtime_mark_last_busy(dev->dev); +out: pm_runtime_put_autosuspend(dev->dev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 869ff624b108c5c6241470427bd5ff5bb19d6ca1..e5e51e4d4f3d895a4c084280521d925b75f09332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -396,7 +396,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); - amdgpu_irq_get(adev, irq_src, irq_type); + + if (irq_src) + amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; @@ -508,8 +510,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(ring); } - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) @@ -545,8 +548,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) } /* disable the interrupt */ - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } @@ -572,8 +576,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) continue; /* enable the interrupt */ - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index ba10577569f856d42e8393a8d7e6010430c3a803..dd9b8feb3a666666b11cdcc0be48e5c8973e3cb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -524,8 +524,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; @@ -549,9 +553,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; @@ -834,7 +839,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) r = pm_runtime_get_sync(dev->dev); if (r < 0) - return r; + goto pm_put; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { @@ -882,6 +887,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) out_suspend: pm_runtime_mark_last_busy(dev->dev); +pm_put: pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7bea8ba89e88353c4e26f81c89c9c6490069558f..e63a253eb42550e548df03c6d819d27bfe191d03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -529,7 +529,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, while (isspace(*++tmp_str)); - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -629,7 +630,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) memcpy(buf_cpy, buf, bytes); buf_cpy[bytes] = '\0'; tmp = buf_cpy; - while ((sub_str = strsep(&tmp, delimiter)) != NULL) { + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); if (strlen(sub_str)) { ret = kstrtol(sub_str, 0, &level); if (ret) @@ -880,7 +882,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, i++; memcpy(buf_cpy, buf, count-i); tmp_str = buf_cpy; - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) { count = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fcf421263fd9689226de77738da98f09a6d4b280..abad7460084f2d7a42696f0f9b1a2aafa3726852 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -954,6 +954,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) release_sg: kfree(ttm->sg); + ttm->sg = NULL; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index e9934de1b9cf8127eb2b770e4301d6ee90c98223..0222bb7ea49b41a469f3514b58771f8badd42c25 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 189212cb3547585b4c5f6f6e64309953a4982a5e..bff39f561264e2ef481c949f71e6e20888948af8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1101,6 +1101,8 @@ static int stop_cpsch(struct device_queue_manager *dqm) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); dqm_unlock(dqm); + pm_release_ib(&dqm->packets); + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); pm_uninit(&dqm->packets); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 0805c423a5ce08d032c432dfa8133afb0177f02a..5cf499a07806a3c36c8dc408e7cd34186d0f0752 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -592,8 +592,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, ret = kobject_init_and_add(dev->kobj_node, &node_type, sys_props.kobj_nodes, "%d", id); - if (ret < 0) + if (ret < 0) { + kobject_put(dev->kobj_node); return ret; + } dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); if (!dev->kobj_mem) @@ -640,8 +642,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(mem->kobj, &mem_type, dev->kobj_mem, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(mem->kobj); return ret; + } mem->attr.name = "properties"; mem->attr.mode = KFD_SYSFS_FILE_MODE; @@ -659,8 +663,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(cache->kobj, &cache_type, dev->kobj_cache, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(cache->kobj); return ret; + } cache->attr.name = "properties"; cache->attr.mode = KFD_SYSFS_FILE_MODE; @@ -678,8 +684,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(iolink->kobj, &iolink_type, dev->kobj_iolink, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(iolink->kobj); return ret; + } iolink->attr.name = "properties"; iolink->attr.mode = KFD_SYSFS_FILE_MODE; @@ -759,8 +767,10 @@ static int kfd_topology_update_sysfs(void) ret = kobject_init_and_add(sys_props.kobj_topology, &sysprops_type, &kfd_device->kobj, "topology"); - if (ret < 0) + if (ret < 0) { + kobject_put(sys_props.kobj_topology); return ret; + } sys_props.kobj_nodes = kobject_create_and_add("nodes", sys_props.kobj_topology); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 3abc0294c05f5fe6fa0aa6c2b6ed8b5752fd0d96..2fb2c683ad54bd279c316a44a48078dfe6fba546 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1576,8 +1576,7 @@ static void write_i2c_retimer_setting( buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1595,8 +1594,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->sink->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1605,8 +1603,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1623,8 +1620,7 @@ static void write_i2c_retimer_setting( buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1642,8 +1638,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->sink->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1652,8 +1647,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1668,8 +1662,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1677,8 +1670,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1686,10 +1678,14 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( @@ -1710,8 +1706,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1719,8 +1714,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1728,8 +1722,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1737,8 +1730,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1746,8 +1738,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1755,8 +1746,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; if (is_vga_mode) { @@ -1768,8 +1758,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1777,8 +1766,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1786,9 +1774,13 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( @@ -1811,8 +1803,7 @@ static void write_i2c_redriver_setting( buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); } static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 46c9cb47a96e58b156a9162a984c1eccbd50a2fe..145af3bb2dfcb7c7d4b35f9879ea2e6e5950cb8f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -127,22 +127,16 @@ struct aux_payloads { struct vector payloads; }; -static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) +static bool dal_ddc_i2c_payloads_create( + struct dc_context *ctx, + struct i2c_payloads *payloads, + uint32_t count) { - struct i2c_payloads *payloads; - - payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL); - - if (!payloads) - return NULL; - if (dal_vector_construct( &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) - return payloads; - - kfree(payloads); - return NULL; + return true; + return false; } static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) @@ -155,14 +149,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) return p->payloads.count; } -static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) +static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p) { - if (!p || !*p) + if (!p) return; - dal_vector_destruct(&(*p)->payloads); - kfree(*p); - *p = NULL; + dal_vector_destruct(&p->payloads); } static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count) @@ -580,9 +572,13 @@ bool dal_ddc_service_query_ddc_data( uint32_t payloads_num = write_payloads + read_payloads; + if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) return false; + if (!payloads_num) + return false; + /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { @@ -613,23 +609,25 @@ bool dal_ddc_service_query_ddc_data( dal_ddc_aux_payloads_destroy(&payloads); } else { - struct i2c_payloads *payloads = - dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); + struct i2c_command command = {0}; + struct i2c_payloads payloads; - struct i2c_command command = { - .payloads = dal_ddc_i2c_payloads_get(payloads), - .number_of_payloads = 0, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) + return false; + + command.payloads = dal_ddc_i2c_payloads_get(&payloads); + command.number_of_payloads = 0; + command.engine = DDC_I2C_COMMAND_ENGINE; + command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; dal_ddc_i2c_payloads_add( - payloads, address, write_size, write_buf, true); + &payloads, address, write_size, write_buf, true); dal_ddc_i2c_payloads_add( - payloads, address, read_size, read_buf, false); + &payloads, address, read_size, read_buf, false); command.number_of_payloads = - dal_ddc_i2c_payloads_get_count(payloads); + dal_ddc_i2c_payloads_get_count(&payloads); ret = dm_helpers_submit_i2c( ddc->ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3f76e6019546f029739bfd182036b37fac3b671d..5a2f29bd350823141ed2b0077315d2a487eca227 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -1001,6 +1001,7 @@ struct resource_pool *dce100_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e5e9e92521e91fab5afd921d245a575802a8b330..17d936c260d97d2b6baf8cadd662c232bafe667e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -1344,6 +1344,7 @@ struct resource_pool *dce110_create_resource_pool( if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 288129343c77893f7d3db3011e3bfab835a75edc..71adab8bf31b1a246aae6c53ff9856b5b5cdf50c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -1287,6 +1287,7 @@ struct resource_pool *dce112_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index d43f37d99c7d9d2ed7bb0e37bfb58b91981914e6..f0f2ce6da82782b186a00da9c40930e0c00dff3a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -1076,6 +1076,7 @@ struct resource_pool *dce120_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 6b44ed3697a4f4d5fafdc2bd9febede20e4cbae7..e6d55688114001550d5a5a21ef542c018e3f143d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1361,6 +1361,7 @@ struct resource_pool *dcn10_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 52a73332befb9e4a27d1d2b4e1cdf4bbf480a21b..343f869c5277d08741834f3833c4ef8a8c1a80e7 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 219440bebd05236439a3fae831cc2f361048e0b5..058898b321b8a2225272e4d5afc5a0155930e08c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3566,7 +3566,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_POWER: return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: - if ((data->vr_config & 0xff) == 0x2) + if ((data->vr_config & VRCONF_VDDGFX_MASK) == + (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); else @@ -3969,6 +3970,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if (hwmgr->hardcode_pp_table != NULL) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + tmp_result = smu7_update_avfs(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update avfs voltages!", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ce459ea4ec3ad1858ba8a6bc9e7f209a9c509a64..da9e6923fa659b5525ae7473aa880ed6c9f8d1ff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3591,6 +3591,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr); data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index aa044c1955fe0e76a9a9901681d0ca5f3e4ea9cc..a2b2a6c67cda15b1c144713cb9f3f28aa442a88e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -362,6 +362,9 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; struct amdgpu_device *adev = hwmgr->adev; int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; @@ -371,8 +374,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low < range->min) low = range->min; - if (high > range->max) - high = range->max; + if (high > tdp_table->usSoftwareShutdownTemp) + high = tdp_table->usSoftwareShutdownTemp; if (low > high) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index 904eb2c9155b4b45ff21daee35eed31146cdd79f..601a596e94f029cdb22efd9466fa096335426b96 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -170,6 +170,8 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; @@ -179,8 +181,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low < range->min) low = range->min; - if (high > range->max) - high = range->max; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 083aa71487e8e508e970ac4f806799fb34aa5d3d..db87cb8930d247e4cd9ebbd376858971d943b0f3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2723,7 +2723,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) { - return ci_is_smc_ram_running(hwmgr); + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, + VOLTAGE_CONTROLLER_ON)) + ? true : false; } static int ci_smu_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 29409a65d864760e674f787cb5279cdbff5b91a7..a347b27405d885de990c70ec6c73bb5012c65112 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -446,7 +446,7 @@ int malidp_de_planes_init(struct drm_device *drm) const struct malidp_hw_regmap *map = &malidp->dev->hw->map; struct malidp_plane *plane = NULL; enum drm_plane_type plane_type; - unsigned long crtcs = 1 << drm->mode_config.num_crtc; + unsigned long crtcs = BIT(drm->mode_config.num_crtc); unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; u32 *formats; diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index a6e8f4591e636241c6f1e8515fea33dc9147a7f3..1ea2a1b0fe375d79ca3869d71db94243f9d9f2a6 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -180,7 +180,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) { - u8 ret; + u8 ret = 0; sii8620_read_buf(ctx, addr, &ret, 1); return ret; diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 373bd4c2b698ba1ebcb3b4572cb6c84b3ce448fe..84b7b22a9590ad3d22cd6d4738f60b5ea2135053 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -265,13 +265,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf, buf[len] = '\0'; - if (!strcmp(buf, "on")) + if (sysfs_streq(buf, "on")) connector->force = DRM_FORCE_ON; - else if (!strcmp(buf, "digital")) + else if (sysfs_streq(buf, "digital")) connector->force = DRM_FORCE_ON_DIGITAL; - else if (!strcmp(buf, "off")) + else if (sysfs_streq(buf, "off")) connector->force = DRM_FORCE_OFF; - else if (!strcmp(buf, "unspecified")) + else if (sysfs_streq(buf, "unspecified")) connector->force = DRM_FORCE_UNSPECIFIED; else return -EINVAL; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf90625df3c5bf4f34f1a1d187064f46c3599aa1..ac545c88a6f3d68686360a4cef9d0d5d9a3e427e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -731,9 +731,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -758,14 +755,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 3eb8b2a52d585503fe228768c5609b5b80a939ad..c3f9967b4fe1a0cf47c8bc6d124281888d5470a6 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1035,11 +1035,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format); */ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) { - u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, - scanline & 0xff }; + u8 payload[2] = { scanline >> 8, scanline & 0xff }; ssize_t err; - err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload, + sizeof(payload)); if (err < 0) return err; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index fa5c25d36d3dc8357e5e2584e1f9c4e00dcdb73d..652de972c3aea62bb351f8199ee969904fdcf024 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -107,6 +107,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Asus T103HAF */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 6a859e077ea0290d115031da247d4dc69033d1eb..37ae15dc4fc6d4f132705ff2ca48e0d84596425a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -694,7 +694,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) { dev_err(gpu->dev, "Failed to enable GPU power domain\n"); - return ret; + goto pm_put; } etnaviv_hw_identify(gpu); @@ -808,6 +808,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) gpu->mmu = NULL; fail: pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -848,7 +849,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) - return ret; + goto pm_put; dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); @@ -971,6 +972,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = 0; pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -985,7 +987,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) dev_err(gpu->dev, "recover hung GPU!\n"); if (pm_runtime_get_sync(gpu->dev) < 0) - return; + goto pm_put; mutex_lock(&gpu->lock); @@ -1005,6 +1007,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); } @@ -1278,8 +1281,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) if (!submit->runtime_resumed) { ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(gpu->dev); return NULL; + } submit->runtime_resumed = true; } @@ -1296,6 +1301,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) ret = event_alloc(gpu, nr_events, event); if (ret) { DRM_ERROR("no free events\n"); + pm_runtime_put_noidle(gpu->dev); return NULL; } @@ -1459,7 +1465,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) if (gpu->clk_bus) { ret = clk_prepare_enable(gpu->clk_bus); if (ret) - return ret; + goto disable_clk_reg; } if (gpu->clk_core) { @@ -1482,6 +1488,9 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) disable_clk_bus: if (gpu->clk_bus) clk_disable_unprepare(gpu->clk_bus); +disable_clk_reg: + if (gpu->clk_reg) + clk_disable_unprepare(gpu->clk_reg); return ret; } diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 17db4b4749d5aec46df17ff3654d4c9284e8fe8d..2e8479744ca4a21c4c8cfaa6eec7a72ef2b59c57 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct gma_clock_t clock; + memset(&clock, 0, sizeof(clock)); + switch (refclk) { case 27000: if (target < 200000) { diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 42daa5c9ff8e630f1c598c5488b5f291de0ad230..221a8cbc57f90c3b1f4a8634aeaebfa6a4eaaac8 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -311,18 +311,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; drm_panel_disable(imx_ldb_ch->panel); - if (imx_ldb_ch == &ldb->channel[0]) + if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; - else if (imx_ldb_ch == &ldb->channel[1]) + if (imx_ldb_ch == &ldb->channel[1] || dual) ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); - if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { + if (dual) { clk_disable_unprepare(ldb->clk[0]); clk_disable_unprepare(ldb->clk[1]); } diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index cffd3310240e5ddec86f13a2cce9b456d0c29d1f..c19c1dfbfcdc454b8712af8415ae7f96460f79cb 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -498,6 +498,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) return 0; } +static void imx_tve_disable_regulator(void *data) +{ + struct imx_tve *tve = data; + + regulator_disable(tve->dac_reg); +} + static bool imx_tve_readable_reg(struct device *dev, unsigned int reg) { return (reg % 4 == 0) && (reg <= 0xdc); @@ -622,6 +629,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) ret = regulator_enable(tve->dac_reg); if (ret) return ret; + ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve); + if (ret) + return ret; } tve->clk = devm_clk_get(dev, "tve"); @@ -668,18 +678,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) return 0; } -static void imx_tve_unbind(struct device *dev, struct device *master, - void *data) -{ - struct imx_tve *tve = dev_get_drvdata(dev); - - if (!IS_ERR(tve->dac_reg)) - regulator_disable(tve->dac_reg); -} - static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, - .unbind = imx_tve_unbind, }; static int imx_tve_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 947bc6d6230205eb141df8b6ea471ca7053dc35c..d1432176360714af5c434adc0fd0fd02e499920d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -600,8 +600,13 @@ static int mtk_drm_probe(struct platform_device *pdev) pm_runtime_disable(dev); err_node: of_node_put(private->mutex_node); - for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) + for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) { of_node_put(private->comp_node[i]); + if (private->ddp_comp[i]) { + put_device(private->ddp_comp[i]->larb_dev); + private->ddp_comp[i] = NULL; + } + } return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 62444a3a5742a5d15c0cf175235296ec28dd6bdc..331fb0c129290dfd63704a676b48667774b2a014 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1476,25 +1476,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to get system configuration registers: %d\n", ret); - return ret; + goto put_device; } hdmi->sys_regmap = regmap; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); + goto put_device; + } remote = of_graph_get_remote_node(np, 1, 0); - if (!remote) - return -EINVAL; + if (!remote) { + ret = -EINVAL; + goto put_device; + } if (!of_device_is_compatible(remote, "hdmi-connector")) { hdmi->next_bridge = of_drm_find_bridge(remote); if (!hdmi->next_bridge) { dev_err(dev, "Waiting for external bridge\n"); of_node_put(remote); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto put_device; } } @@ -1503,7 +1508,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n", remote); of_node_put(remote); - return -EINVAL; + ret = -EINVAL; + goto put_device; } of_node_put(remote); @@ -1511,10 +1517,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); - return -EINVAL; + ret = -EINVAL; + goto put_device; } return 0; +put_device: + put_device(hdmi->cec_dev); + return ret; } /* diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ba6f3c14495c0ceb605276e81edce5ddb055a1c3..d29a58bd2f7a363f65e1b76a615cf026ef9e63d6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1474,18 +1474,31 @@ static const struct adreno_gpu_funcs funcs = { static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; - u32 bin, val; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80; cell = nvmem_cell_get(dev, "speed_bin"); - /* If a nvmem cell isn't defined, nothing to do */ - if (IS_ERR(cell)) - return; + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); + + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf); - bin = *((u32 *) nvmem_cell_read(cell, NULL)); - nvmem_cell_put(cell); + val = (1 << bin); + kfree(buf); + } - val = (1 << bin); + nvmem_cell_put(cell); + } dev_pm_opp_set_supported_hw(dev, &val, 1); } @@ -1518,7 +1531,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) check_speed_bin(&pdev->dev); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); + /* Restricting nr_rings to 1 to temporarily disable preemption */ + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 9cde79a7335c825de1f59c059ad13ac2d50eb30a..739ca9c2081a6fb8bb51834621321a81409e5e4a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -117,12 +117,22 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) { int ret; u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) dev_err(gmu->dev, "GMU firmware initialization timed out\n"); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 93d70f4a2154e289be09a8dd6ae8a038e007ec7b..c9f831604558f2fa3a9439a64aae4a6157f68492 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -221,7 +221,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno: */ - ring->memptrs->fence = ring->seqno; + ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4752f08f0884c131c0a5949f1cfc22c5e10f37e8..3c3b7f7013e874de268996cfcbac1605e7f006bc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -659,7 +659,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); if (!fevent) { - DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); + DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); return; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index e4afdb910a27cb9a0976e1b93a5852832d832d84..e112998696778f4f37058eb26f79065133a709ad 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -497,8 +497,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (!dev->dma_parms) { dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_msm_uninit; + } } dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); @@ -1373,6 +1375,13 @@ static int msm_pdev_remove(struct platform_device *pdev) return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, @@ -1384,6 +1393,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 10107e551fac35d5c18a1e92402df77c05174acc..1bb0a9f6fa7307a59d7d73b40ee917c8530622ec 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -909,8 +909,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port); @@ -1920,8 +1922,10 @@ nv50_disp_atomic_commit(struct drm_device *dev, int ret, i; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index fb0094fc55834aa7ba69090a517d38ad2b332001..b71afde8f115a944340816b1e07a0808205567aa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -551,8 +551,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) pm_runtime_get_noresume(dev->dev); } else { ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return conn_status; + } } nv_encoder = nouveau_connector_ddc_detect(connector); diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 9635704a1d8647b116bb1c42b65bf536f39f4905..4561a786fab07701d18e5f74576e31d8edf9e339 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(drm->dev); return ret; + } + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2b7a54cc3c9ef419677cd6284b215ca9f759732d..81999bed1e4a58f362779f18e30e0714e731e4ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -899,8 +899,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) /* need to bring up power immediately if opening device */ ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); @@ -980,8 +982,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) long ret; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { case DRM_NOUVEAU_NVIF: diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0f64c0a1d4b30236243e6229afa19d990592f86c..d4fe52ec4c966531a4e8f4e23f8db4fdb9524f1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user) struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); int ret = pm_runtime_get_sync(drm->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put(drm->dev->dev); return ret; + } return 0; } @@ -315,7 +317,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct nouveau_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; @@ -599,6 +601,7 @@ nouveau_fbcon_init(struct drm_device *dev) drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b56524d343c3e84eb109badc3a763471fe95eff8..a98fccb0d32f9ea9d396a9ef0e21b04d70e6f25f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -46,8 +46,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem) int ret; ret = pm_runtime_get_sync(dev); - if (WARN_ON(ret < 0 && ret != -EACCES)) + if (WARN_ON(ret < 0 && ret != -EACCES)) { + pm_runtime_put_autosuspend(dev); return; + } if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); @@ -80,8 +82,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) return ret; ret = pm_runtime_get_sync(dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev); goto out; + } ret = nouveau_vma_new(nvbo, &cli->vmm, &vma); pm_runtime_mark_last_busy(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c002f896850739b343624247e7d52d94e34bf99d..9682f30ab6f68acdcddf716d484e78265a279c36 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -176,6 +176,8 @@ void nouveau_mem_del(struct ttm_mem_reg *reg) { struct nouveau_mem *mem = nouveau_mem(reg); + if (!mem) + return; nouveau_mem_fini(mem); kfree(reg->mm_node); reg->mm_node = NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 8ebdc74cc0ad71b66347206134eb1972ac380ee3..326948b655428dd0ff56c986f7c34d3fa57e1049 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) else nvbe->ttm.ttm.func = &nv50_sgdma_backend; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) - /* - * A failing ttm_dma_tt_init() will call ttm_tt_destroy() - * and thus our nouveau_sgdma_destroy() hook, so we don't need - * to free nvbe here. - */ + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + kfree(nvbe); return NULL; + } return &nvbe->ttm.ttm; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09dc5f88ef431d4b374017cdc138b52453..8d9812a51ef633a81d900d8f87549ee1166ce623 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) else return ERR_PTR(-ENODEV); + if (!pdev->rom || pdev->romlen == 0) + return ERR_PTR(-ENODEV); + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { + priv->size = pdev->romlen; if (ret = -ENODEV, - (priv->rom = pci_platform_rom(pdev, &priv->size))) + (priv->rom = ioremap(pdev->rom, pdev->romlen))) return priv; kfree(priv); } @@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); } +static void +platform_fini(void *data) +{ + struct priv *priv = data; + + iounmap(priv->rom); + kfree(priv); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 3bfb95d230e0e4456b744ac5b961b629152c5527..d8fb686c1fda94293dc1cd459b8255cfecaab265 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void) dss = of_find_matching_node(NULL, omapdss_of_match); if (dss == NULL || !of_device_is_available(dss)) - return 0; + goto put_node; omapdss_walk_device(dss, true); @@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void) kfree(n); } +put_node: + of_node_put(dss); return 0; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 654fea2b43124777b7afad6bb8d920da90d61cd5..8814aa38c5e7be1f4a2b8e4dbfea830d6190e8dc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1503,7 +1503,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = { static const struct panel_desc lg_lb070wv8 = { .modes = &lg_lb070wv8_mode, .num_modes = 1, - .bpc = 16, + .bpc = 8, .size = { .width = 151, .height = 91, diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 9e7d5e44a12fa4587aa38b2fc9c2899b89cbbd3e..90c1afe498beaf6c545e7b9d5ecca30ac21f599f 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4364,7 +4364,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev, table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; - if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (!pi->mem_gddr5) { diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 04c0ed41374f1ada033278e168b99b08ab643521..dd0528cf98183b79905106731d6bb78068543c5c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -104,25 +104,33 @@ static bool radeon_read_bios(struct radeon_device *rdev) static bool radeon_read_platform_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = rdev->pdev->rom; + size_t romlen = rdev->pdev->romlen; + void __iomem *bios; rdev->bios = NULL; - bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + rdev->bios = kzalloc(romlen, GFP_KERNEL); + if (!rdev->bios) return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } + + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(rdev->bios, bios, romlen); + iounmap(bios); + + if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) + goto free_bios; return true; +free_bios: + kfree(rdev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index de656f55538392fef2ec2d74365b250ff97418d0..b9927101e8450597fe69b863683b4c1cd4a764e3 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -882,8 +882,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -1028,8 +1030,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1166,8 +1170,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1250,8 +1256,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (radeon_connector->detected_hpd_without_ddc) { @@ -1665,8 +1673,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && radeon_check_hpd_status_unchanged(connector)) { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7d1e14f0140a2aef0b000afe2c42916ea5bc5ded..3f0f3a578ddf04b7f3ddd34f14317365562af261 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -625,8 +625,10 @@ radeon_crtc_set_config(struct drm_mode_set *set, dev = set->crtc->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_crtc_helper_set_config(set, ctx); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c26f09b47ecb2b1d305de20cbded4332047d69c0..0cd33289c2b63aa5ba2c54f25d64564b98b3c194 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -168,12 +168,7 @@ int radeon_no_wb; int radeon_modeset = -1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; -#ifdef __powerpc__ -/* Default to PCI on PowerPC (fdo #95017) */ int radeon_agpmode = -1; -#else -int radeon_agpmode = 0; -#endif int radeon_vram_limit = 0; int radeon_gart_size = -1; /* auto */ int radeon_benchmarking = 0; @@ -523,8 +518,10 @@ long radeon_drm_ioctl(struct file *filp, long ret; dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3ff835767ac58fbab6526d7cb4df6ab7b53b217a..34b3cb6c146f920f95adae38f3748ccca7e4f428 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -627,8 +627,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); return r; + } /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 79eb11cd185d1eb1b3bdb60478b46fc4811c64d9..9a5584efd5e78828779bb9b0f72d182b546856d6 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -761,7 +761,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0), sun6i_dsi_dcs_build_pkt_hdr(dsi, msg)); - bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL); + bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL); if (!bounce) return -ENOMEM; @@ -772,7 +772,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc)); len += sizeof(crc); - regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len); + regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4)); regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1); kfree(bounce); diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index 880e8fbb08556871424ab785a3c260416196a791..242752b2d328cacaaa54378d23f1ca38e7ab2adf 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -14,7 +14,7 @@ struct sun8i_mixer; /* VI channel CSC units offsets */ #define CCSC00_OFFSET 0xAA050 -#define CCSC01_OFFSET 0xFA000 +#define CCSC01_OFFSET 0xFA050 #define CCSC10_OFFSET 0xA0000 #define CCSC11_OFFSET 0xF0000 diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 71a798e5d55917b95b3cd5ffeca2aa2512e13f16..649b57e5e4b780f7daf1416356fc1cadcd20ce7d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -364,7 +364,7 @@ static struct regmap_config sun8i_mixer_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 0xbfffc, /* guessed */ + .max_register = 0xffffc, /* guessed */ }; static int sun8i_mixer_of_get_id(struct device_node *node) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index a1acab39d87f49385c51edddbaeae87e9745c956..096a33f12c615239460ffba7bdc26fc7ef290aad 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -150,12 +150,16 @@ static int panel_connector_get_modes(struct drm_connector *connector) int i; for (i = 0; i < timings->num_timings; i++) { - struct drm_display_mode *mode = drm_mode_create(dev); + struct drm_display_mode *mode; struct videomode vm; if (videomode_from_timings(timings, &vm, i)) break; + mode = drm_mode_create(dev); + if (!mode) + break; + drm_display_mode_from_videomode(&vm, mode); mode->type = DRM_MODE_TYPE_DRIVER; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e3a0691582ffdf0d85bc7c84ab424efc310e01be..68cfa25674e509960e51f9c6c27a9d40cea1325c 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ttm_tt_init_fields(ttm, bo, page_flags); if (ttm_tt_alloc_page_directory(ttm)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, else ret = ttm_dma_tt_alloc_page_directory(ttm_dma); if (ret) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index e8723a2412a6f5d7a0522799b1bb60cca1458e60..c0b113ba329c2b23537216d5c665971761f1fc38 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -132,9 +133,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, struct drm_connector *connector = priv->connector; u32 format = fb->format->format; u32 ctrl1 = 0; + int retries; clk_prepare_enable(priv->clk); + /* Reset the TVE200 and wait for it to come back online */ + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); + for (retries = 0; retries < 5; retries++) { + usleep_range(30000, 50000); + if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) + continue; + else + break; + } + if (retries == 5 && + readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) { + dev_err(drm->dev, "can't get hardware out of reset\n"); + return; + } + /* Function 1 */ ctrl1 |= TVE200_CTRL_CSMODE; /* Interlace mode for CCIR656: parameterize? */ @@ -231,8 +248,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe) drm_crtc_vblank_off(crtc); - /* Disable and Power Down */ + /* Disable put into reset and Power Down */ writel(0, priv->regs + TVE200_CTRL); + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); clk_disable_unprepare(priv->clk); } @@ -280,6 +298,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe) struct drm_device *drm = crtc->dev; struct tve200_drm_dev_private *priv = drm->dev_private; + /* Clear any IRQs and enable */ + writel(0xFF, priv->regs + TVE200_INT_CLR); writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN); return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 86b98856756d9d15f5c3cabe7cc3fb89db9b48a9..1161662664577a9d3f6b7b74e4bbf16cf66609fa 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1134,6 +1134,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) card->num_links = 1; card->name = "vc4-hdmi"; card->dev = dev; + card->owner = THIS_MODULE; /* * Be careful, snd_soc_register_card() calls dev_set_drvdata() and diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 4709f08f39e490d2b755b4a6b5bf5dc072635734..1c1a435d354bcae83d5b6d91f2c3e3420dc542e5 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -219,32 +219,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return 0; } -static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret; - - obj = drm_gem_object_lookup(file, handle); - if (!obj) - return -ENOENT; - - if (!obj->filp) { - ret = -EINVAL; - goto unref; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto unref; - - *offset = drm_vma_node_offset_addr(&obj->vma_node); -unref: - drm_gem_object_put_unlocked(obj); - - return ret; -} - static struct drm_ioctl_desc vgem_ioctls[] = { DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), @@ -438,7 +412,6 @@ static struct drm_driver vgem_driver = { .fops = &vgem_driver_fops, .dumb_create = vgem_gem_dumb_create, - .dumb_map_offset = vgem_gem_dumb_map, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 6a712a8d59e93b68fb68c98358af37b5e20d7680..e486b6517ac556f5d0c259992f92c5f42e44e24d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2861,7 +2861,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, ++i; } - if (i != unit) { + if (&con->head == &dev_priv->dev->mode_config.connector_list) { DRM_ERROR("Could not find initial display unit.\n"); ret = -EINVAL; goto out_unlock; @@ -2885,13 +2885,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, break; } - if (mode->type & DRM_MODE_TYPE_PREFERRED) - *p_mode = mode; - else { + if (&mode->head == &con->modes) { WARN_ONCE(true, "Could not find initial preferred mode.\n"); *p_mode = list_first_entry(&con->modes, struct drm_display_mode, head); + } else { + *p_mode = mode; } out_unlock: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 72357811719122f70f36ea420786b0333a787e0a..0743a73117000bc22cb3439e17c5d03ffb556beb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -79,7 +79,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) struct vmw_legacy_display_unit *entry; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; - int i = 0; + int i; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. @@ -90,12 +90,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); - i++; } if (crtc == NULL) return 0; - fb = entry->base.crtc.primary->state->fb; + fb = crtc->primary->state->fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->format->cpp[0] * 8, diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index 329e4a3d8ae7b3b9444e0accd135a94179ed9fd8..6c9ad4533999c19ee747365a0e3c7925aefe0c20 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -25,6 +25,8 @@ #include "debug.h" #include "channel.h" +static DEFINE_MUTEX(debug_lock); + unsigned int host1x_debug_trace_cmdbuf; static pid_t host1x_debug_force_timeout_pid; @@ -61,12 +63,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) struct output *o = data; mutex_lock(&ch->cdma.lock); + mutex_lock(&debug_lock); if (show_fifo) host1x_hw_show_channel_fifo(m, ch, o); host1x_hw_show_channel_cdma(m, ch, o); + mutex_unlock(&debug_lock); mutex_unlock(&ch->cdma.lock); return 0; diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index 91653adc41cc437fd857931ec29c597c5220f856..cdaf1d74e31a28a7f8966c0af2f5d6e7573c699c 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c @@ -998,9 +998,10 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run) return IRQ_WAKE_THREAD; } -static irqreturn_t norotate_irq(int irq, void *data) +static irqreturn_t eof_irq(int irq, void *data) { struct ipu_image_convert_chan *chan = data; + struct ipu_image_convert_priv *priv = chan->priv; struct ipu_image_convert_ctx *ctx; struct ipu_image_convert_run *run; unsigned long flags; @@ -1017,45 +1018,26 @@ static irqreturn_t norotate_irq(int irq, void *data) ctx = run->ctx; - if (ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this is a rotation operation, just ignore */ - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; - } - - ret = do_irq(run); -out: - spin_unlock_irqrestore(&chan->irqlock, flags); - return ret; -} - -static irqreturn_t rotate_irq(int irq, void *data) -{ - struct ipu_image_convert_chan *chan = data; - struct ipu_image_convert_priv *priv = chan->priv; - struct ipu_image_convert_ctx *ctx; - struct ipu_image_convert_run *run; - unsigned long flags; - irqreturn_t ret; - - spin_lock_irqsave(&chan->irqlock, flags); - - /* get current run and its context */ - run = chan->current_run; - if (!run) { + if (irq == chan->out_eof_irq) { + if (ipu_rot_mode_is_irt(ctx->rot_mode)) { + /* this is a rotation op, just ignore */ + ret = IRQ_HANDLED; + goto out; + } + } else if (irq == chan->rot_out_eof_irq) { + if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { + /* this was NOT a rotation op, shouldn't happen */ + dev_err(priv->ipu->dev, + "Unexpected rotation interrupt\n"); + ret = IRQ_HANDLED; + goto out; + } + } else { + dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq); ret = IRQ_NONE; goto out; } - ctx = run->ctx; - - if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this was NOT a rotation operation, shouldn't happen */ - dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; - } - ret = do_irq(run); out: spin_unlock_irqrestore(&chan->irqlock, flags); @@ -1148,7 +1130,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) chan->out_chan, IPU_IRQ_EOF); - ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, + ret = request_threaded_irq(chan->out_eof_irq, eof_irq, do_bh, 0, "ipu-ic", chan); if (ret < 0) { dev_err(priv->ipu->dev, "could not acquire irq %d\n", @@ -1161,7 +1143,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) chan->rotation_out_chan, IPU_IRQ_EOF); - ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, + ret = request_threaded_irq(chan->rot_out_eof_irq, eof_irq, do_bh, 0, "ipu-ic", chan); if (ret < 0) { dev_err(priv->ipu->dev, "could not acquire irq %d\n", diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 47ce35d960e3b7b7f960d4f172d2aea01bbf3c99..e93c4b59efc07abad5a7c67541b8e4fc8a2bf14e 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2407,14 +2407,6 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid( return ret; } -static inline int _check_region(unsigned long start, unsigned long size, - uint64_t len) -{ - uint64_t end = ((uint64_t) start) + size; - - return (end > len); -} - static int check_vma_flags(struct vm_area_struct *vma, unsigned int flags) { @@ -2429,23 +2421,27 @@ static int check_vma_flags(struct vm_area_struct *vma, return -EFAULT; } -static int check_vma(struct vm_area_struct *vma, struct file *vmfile, - struct kgsl_memdesc *memdesc) +static int check_vma(unsigned long hostptr, u64 size) { - if (vma == NULL || vma->vm_file != vmfile) - return -EINVAL; + struct vm_area_struct *vma; + unsigned long cur = hostptr; - /* userspace may not know the size, in which case use the whole vma */ - if (memdesc->size == 0) - memdesc->size = vma->vm_end - vma->vm_start; - /* range checking */ - if (vma->vm_start != memdesc->useraddr || - (memdesc->useraddr + memdesc->size) != vma->vm_end) - return -EINVAL; - return check_vma_flags(vma, memdesc->flags); + while (cur < (hostptr + size)) { + vma = find_vma(current->mm, cur); + if (!vma) + return false; + + /* Don't remap memory that we already own */ + if (vma->vm_file && vma->vm_file->f_op == &kgsl_fops) + return false; + + cur = vma->vm_end; + } + + return true; } -static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile) +static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) { int ret = 0; long npages = 0, i; @@ -2468,18 +2464,16 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile) } down_read(¤t->mm->mmap_sem); - /* If we have vmfile, make sure we map the correct vma and map it all */ - if (vmfile != NULL) - ret = check_vma(find_vma(current->mm, memdesc->useraddr), - vmfile, memdesc); - - if (ret == 0) { - npages = get_user_pages(memdesc->useraddr, - sglen, write, pages, NULL); - ret = (npages < 0) ? (int)npages : 0; + if (!check_vma(memdesc->useraddr, memdesc->size)) { + up_read(¤t->mm->mmap_sem); + ret = -EFAULT; + goto out; } + + npages = get_user_pages(memdesc->useraddr, sglen, write, pages, NULL); up_read(¤t->mm->mmap_sem); + ret = (npages < 0) ? (int)npages : 0; if (ret) goto out; @@ -2530,7 +2524,7 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr; } - return memdesc_sg_virt(&entry->memdesc, NULL); + return memdesc_sg_virt(&entry->memdesc); } #ifdef CONFIG_DMA_SHARED_BUFFER diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 2c85d075daee13186ad079eea20aba37eefb7ad0..05122167d9d850c862dba09a80c476a1788b464f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1425,6 +1425,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1437,7 +1448,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1564,7 +1575,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE - 1; diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 07e26c3567eb979666be9c7966b67d6a5dbf56d0..6346282e0ff055aa81ce508e3f9b63fbaa6f9a52 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -192,6 +192,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); + input_free_device(input); return ret; } @@ -202,6 +203,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) if (ret) { hid_err(hdev, "Failed to register elan input device: %d\n", ret); + input_mt_destroy_slots(input); input_free_device(input); return ret; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 95c0558b073a4db54993c4cae0f8881cd59210d9..5791b01296e0a4cdf53074f206a05bde74f5aa62 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -756,6 +756,7 @@ #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a +#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882 #define USB_DEVICE_ID_S510_RECEIVER 0xc50c #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 @@ -987,6 +988,8 @@ #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 #define USB_DEVICE_ID_SAITEK_X52 0x075c +#define USB_DEVICE_ID_SAITEK_X52_2 0x0255 +#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index dbb0cbe65fc98a9ada4bc1bce7ed940ae5577f99..a9da1526c40ae0635f1103d7867e9265cded96a5 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -362,13 +362,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev) u8 *buf; int ret; - buf = kmalloc(2, GFP_KERNEL); + buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4, dev->battery_report_type, HID_REQ_GET_REPORT); - if (ret != 2) { + if (ret < 2) { kfree(buf); return -ENODATA; } @@ -1125,6 +1125,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 8baf10beb1d5d2254bfc78564b670cccd44fe525..ccda72f748ee549dff0501c7adfc2d5c9bff2560 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -841,6 +841,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 9c7317f93169dce05bdc532a76ad6f600e3ba707..8ae357bf5a09019d07e1711bcd9bd25e3eb82f9c 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -147,6 +147,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, @@ -182,6 +184,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_QVR32A, USB_DEVICE_ID_QVR32A), HID_QUIRK_HIDINPUT_FORCE | HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_NREAL, USB_DEVICE_ID_NREAL), HID_QUIRK_HIDINPUT_FORCE | HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET }, { 0 } }; diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index f17ebbe53abf0dd1d0de29e34e4c10552c930b2d..1f8d403d3db4d574ddafbf5ab3d3ed33ce5d184b 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -444,6 +444,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) dev_err(&client->dev, "failed to change power setting.\n"); set_pwr_exit: + + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests. + * According to Goodix Windows even waits 60 ms after (other?) + * PWR_ON requests. Testing has confirmed that several devices + * will not work properly without a delay after a PWR_ON request. + */ + if (!ret && power_state == I2C_HID_PWR_ON) + msleep(60); + return ret; } @@ -465,15 +478,6 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; - /* - * The HID over I2C specification states that if a DEVICE needs time - * after the PWR_ON request, it should utilise CLOCK stretching. - * However, it has been observered that the Windows driver provides a - * 1ms sleep between the PWR_ON and RESET requests and that some devices - * rely on this. - */ - usleep_range(1000, 5000); - i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index c34ef95d7cef338d0dec06f976ab7de7e9b86878..2dff663847c69b49c4ce5b30990b0813a2a299a8 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -532,12 +532,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, switch (cmd) { case HIDIOCGUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; uref->value = field->value[uref->usage_index]; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; case HIDIOCSUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; field->value[uref->usage_index] = uref->value; goto goodreturn; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 3bf1f9ef8ea258648d5b27474019f2004612c584..7920b0d7e35a73acdfb14cd8be944e26783df1e9 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -779,7 +779,7 @@ static void vmbus_wait_for_unload(void) void *page_addr; struct hv_message *msg; struct vmbus_channel_message_header *hdr; - u32 message_type; + u32 message_type, i; /* * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was @@ -789,8 +789,11 @@ static void vmbus_wait_for_unload(void) * functional and vmbus_unload_response() will complete * vmbus_connection.unload_event. If not, the last thing we can do is * read message pages for all CPUs directly. + * + * Wait no more than 10 seconds so that the panic path can't get + * hung forever in case the response message isn't seen. */ - while (1) { + for (i = 0; i < 1000; i++) { if (completion_done(&vmbus_connection.unload_event)) break; @@ -1249,6 +1252,8 @@ channel_message_table[CHANNELMSG_COUNT] = { { CHANNELMSG_19, 0, NULL }, { CHANNELMSG_20, 0, NULL }, { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL }, + { CHANNELMSG_22, 0, NULL }, + { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL }, }; /* @@ -1260,25 +1265,16 @@ void vmbus_onmessage(void *context) { struct hv_message *msg = context; struct vmbus_channel_message_header *hdr; - int size; hdr = (struct vmbus_channel_message_header *)msg->u.payload; - size = msg->header.payload_size; trace_vmbus_on_message(hdr); - if (hdr->msgtype >= CHANNELMSG_COUNT) { - pr_err("Received invalid channel message type %d size %d\n", - hdr->msgtype, size); - print_hex_dump_bytes("", DUMP_PREFIX_NONE, - (unsigned char *)msg->u.payload, size); - return; - } - - if (channel_message_table[hdr->msgtype].message_handler) - channel_message_table[hdr->msgtype].message_handler(hdr); - else - pr_err("Unhandled channel message type %d\n", hdr->msgtype); + /* + * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go + * out of bound and the message_handler pointer can not be NULL. + */ + channel_message_table[hdr->msgtype].message_handler(hdr); } /* diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index fb22b72fd535a6c780bf1b84268752bd8cb63823..0699c601888956afe8ac6c0367ea97f518279b2c 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -939,6 +939,10 @@ void vmbus_on_msg_dpc(unsigned long data) } entry = &channel_message_table[hdr->msgtype]; + + if (!entry->message_handler) + goto msg_handled; + if (entry->handler_type == VMHT_BLOCKING) { ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx == NULL) diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 5c677ba44014307c7651928641393da6e90f5eae..b201129a9beaeebde3e512d60bbae1bf2fd1ee01 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -760,15 +760,18 @@ static ssize_t applesmc_light_show(struct device *dev, } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; + + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: @@ -817,12 +820,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); - speed = ((buffer[0] << 8 | buffer[1]) >> 2); - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + + speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, @@ -858,12 +860,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, @@ -879,10 +880,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; + val = (buffer[0] << 8 | buffer[1]); + if (input) val = val | (0x01 << to_index(attr)); else @@ -958,13 +960,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + - ((u32)buffer[2]<<8) + buffer[3]; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + + ((u32)buffer[2]<<8) + buffer[3]; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index d1c0b954053feaaf26ff12a2068a2ef5b1a46b22..f4fc47614a6d0dec69183969dd62ed36169b5f47 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -623,18 +623,15 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->enable) { + drvdata->reading = false; + /* Re-enable the TMC if need be */ + if (drvdata->mode == CS_MODE_SYSFS) { /* There is no point in reading a TMC in HW FIFO mode */ mode = readl_relaxed(drvdata->base + TMC_MODE); if (mode != TMC_MODE_CIRCULAR_BUFFER) { spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EINVAL; } - } - - drvdata->reading = false; - /* Re-enable the TMC if need be */ - if (drvdata->mode == CS_MODE_SYSFS) { /* * The trace run will continue with the same allocated trace * buffer. As such zero-out the buffer so that we don't end diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 0e745f82d6a53c91abaa29b0b87b93ce564fce53..f328de980855d97345b0b0a6eef94f53b1d06596 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -50,8 +50,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap) pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); pca_outw(adap, I2C_PCA_IND, 0xA5); pca_outw(adap, I2C_PCA_IND, 0x5A); + + /* + * After a reset we need to re-apply any configuration + * (calculated in pca_init) to get the bus in a working state. + */ + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); + + pca_set_con(adap, I2C_PCA_CON_ENSIO); } else { adap->reset_chip(adap->data); + pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); } } @@ -432,13 +446,14 @@ static int pca_init(struct i2c_adapter *adap) " Use the nominal frequency.\n", adap->name); } - pca_reset(pca_data); - clock = pca_clock(pca_data); printk(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); - pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.clock_freq = clock; + + pca_reset(pca_data); } else { int clock; int mode; @@ -505,19 +520,15 @@ static int pca_init(struct i2c_adapter *adap) thi = tlow * min_thi / min_tlow; } + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.mode = mode; + pca_data->bus_settings.tlow = tlow; + pca_data->bus_settings.thi = thi; + pca_reset(pca_data); printk(KERN_INFO "%s: Clock frequency is %dHz\n", adap->name, clock * 100); - - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE); - pca_outw(pca_data, I2C_PCA_IND, mode); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL); - pca_outw(pca_data, I2C_PCA_IND, tlow); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH); - pca_outw(pca_data, I2C_PCA_IND, thi); - - pca_set_con(pca_data, I2C_PCA_CON_ENSIO); } udelay(500); /* 500 us for oscillator to stabilise */ diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index d917cefc5a19c62882234fa9f88ea382007bf2a3..b13605718291619f29e8fc6d21bb513ef513b21e 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -382,10 +382,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -442,11 +440,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->send_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 8a8ca945561b090822c40afaf768a1a45b3dd247..7eba874a981d3f872c1f879ca77b19e40c0b527b 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -74,6 +74,9 @@ struct i2c_ram { char res1[4]; /* Reserved */ ushort rpbase; /* Relocation pointer */ char res2[2]; /* Reserved */ + /* The following elements are only for CPM2 */ + char res3[4]; /* Reserved */ + uint sdmatmp; /* Internal */ }; #define I2COM_START 0x80 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 679c6c41f64b49babf8b0a7505d56a2c4093f6c7..3ac3b26cc9317bdab7f5c55e953f5fb40b2d875f 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1506,6 +1506,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } static inline void i801_acpi_remove(struct i801_priv *priv) { } #endif +static unsigned char i801_setup_hstcfg(struct i801_priv *priv) +{ + unsigned char hstcfg = priv->original_hstcfg; + + hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ + hstcfg |= SMBHSTCFG_HST_EN; + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg); + return hstcfg; +} + static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned char temp; @@ -1611,14 +1621,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) return err; } - pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp); - priv->original_hstcfg = temp; - temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ - if (!(temp & SMBHSTCFG_HST_EN)) { + pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg); + temp = i801_setup_hstcfg(priv); + if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN)) dev_info(&dev->dev, "Enabling SMBus device\n"); - temp |= SMBHSTCFG_HST_EN; - } - pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp); if (temp & SMBHSTCFG_SMB_SMI_EN) { dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n"); @@ -1692,6 +1698,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_drvdata(dev, priv); + dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_set_autosuspend_delay(&dev->dev, 1000); pm_runtime_use_autosuspend(&dev->dev); pm_runtime_put_autosuspend(&dev->dev); @@ -1745,6 +1752,7 @@ static int i801_resume(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct i801_priv *priv = pci_get_drvdata(pci_dev); + i801_setup_hstcfg(priv); i801_enable_host_notify(&priv->adapter); return 0; diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 90f5d0407d7323c3d183dbdf9508f8fac061c58c..f48e637f1a3e880f429a01641e49f5e19c958c44 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -35,12 +36,17 @@ #define REG_CTRL_ACK_IGNORE BIT(1) #define REG_CTRL_STATUS BIT(2) #define REG_CTRL_ERROR BIT(3) -#define REG_CTRL_CLKDIV_SHIFT 12 -#define REG_CTRL_CLKDIV_MASK GENMASK(21, 12) -#define REG_CTRL_CLKDIVEXT_SHIFT 28 -#define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, 28) +#define REG_CTRL_CLKDIV GENMASK(21, 12) +#define REG_CTRL_CLKDIVEXT GENMASK(29, 28) + +#define REG_SLV_ADDR GENMASK(7, 0) +#define REG_SLV_SDA_FILTER GENMASK(10, 8) +#define REG_SLV_SCL_FILTER GENMASK(13, 11) +#define REG_SLV_SCL_LOW GENMASK(27, 16) +#define REG_SLV_SCL_LOW_EN BIT(28) #define I2C_TIMEOUT_MS 500 +#define FILTER_DELAY 15 enum { TOKEN_END = 0, @@ -135,19 +141,24 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int div; - div = DIV_ROUND_UP(clk_rate, freq * i2c->data->div_factor); + div = DIV_ROUND_UP(clk_rate, freq); + div -= FILTER_DELAY; + div = DIV_ROUND_UP(div, i2c->data->div_factor); /* clock divider has 12 bits */ - if (div >= (1 << 12)) { + if (div > GENMASK(11, 0)) { dev_err(i2c->dev, "requested bus frequency too low\n"); - div = (1 << 12) - 1; + div = GENMASK(11, 0); } - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, - (div & GENMASK(9, 0)) << REG_CTRL_CLKDIV_SHIFT); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV, + FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0))); + + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT, + FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10)); - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, - (div >> 10) << REG_CTRL_CLKDIVEXT_SHIFT); + /* Disable HIGH/LOW mode */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0); dev_dbg(i2c->dev, "%s: clk %lu, freq %u, div %u\n", __func__, clk_rate, freq, div); @@ -276,7 +287,10 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ : TOKEN_SLAVE_ADDR_WRITE; - writel(msg->addr << 1, i2c->regs + REG_SLAVE_ADDR); + + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR, + FIELD_PREP(REG_SLV_ADDR, msg->addr << 1)); + meson_i2c_add_token(i2c, TOKEN_START); meson_i2c_add_token(i2c, token); } @@ -435,6 +449,10 @@ static int meson_i2c_probe(struct platform_device *pdev) return ret; } + /* Disable filtering */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, + REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); + meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); return 0; diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c index 96b4572e6d9cc4465decf7993616595caa492da3..cf3fcf35fe3d88484de879d87409fa98e91067d2 100644 --- a/drivers/i2c/busses/i2c-owl.c +++ b/drivers/i2c/busses/i2c-owl.c @@ -179,6 +179,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT); if (fifostat & OWL_I2C_FIFOSTAT_RNB) { i2c_dev->err = -ENXIO; + /* Clear NACK error bit by writing "1" */ + owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOSTAT, + OWL_I2C_FIFOSTAT_RNB, true); goto stop; } @@ -186,6 +189,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) stat = readl(i2c_dev->base + OWL_I2C_REG_STAT); if (stat & OWL_I2C_STAT_BEB) { i2c_dev->err = -EIO; + /* Clear BUS error bit by writing "1" */ + owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT, + OWL_I2C_STAT_BEB, true); goto stop; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 6e49e438ef5a58cb10519f40598ac8307ae6af41..f9029800d39962b3bf88a83f69deaf6617ce734c 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -587,13 +587,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } - rcar_i2c_write(priv, ICSSR, ~SAR & 0xff); + /* Clear SSR, too, because of old STOPs to other clients than us */ + rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); } /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } @@ -848,7 +850,7 @@ static int rcar_reg_slave(struct i2c_client *slave) priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); rcar_i2c_write(priv, ICSSR, 0); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSCR, SIE | SDBS); return 0; @@ -860,12 +862,14 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); - /* disable irqs and ensure none is running before clearing ptr */ + /* ensure no irq is running before clearing ptr */ + disable_irq(priv->irq); rcar_i2c_write(priv, ICSIER, 0); - rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSSR, 0); + enable_irq(priv->irq); + rcar_i2c_write(priv, ICSCR, SDBS); rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ - synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f225bef1e043c9e8de71318f7ccfe1d9b726773a..41dd0a08a625c9071618b7cb656fb5a6e9cfa13c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1292,8 +1292,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) /* create pre-declared device nodes */ of_i2c_register_devices(adap); - i2c_acpi_register_devices(adap); i2c_acpi_install_space_handler(adap); + i2c_acpi_register_devices(adap); if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 47a9f70a24a972810b431a997eba934d5a6b520f..f2e7e373ee478bfd0b02348ec782fa35f98a8ef1 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -22,10 +22,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", @@ -64,6 +62,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index b45dff609b3593cc3206115086bbdd65bd70d4b2..12b864485060f9d191b18612a9ee7700d41aff9f 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -151,12 +152,17 @@ #define CFG_FAIL_STALL_DIFF_EN BIT(20) #define ADDR_ASSOCIATED_W_OTHER_GPII_EN BIT(21) +/* Enable bits for GPIIn, n:[0-11] */ +#define GPIIn_IBI_EN(n) BIT(n) + /* IBI_CMD fields */ #define IBI_CMD_OPCODE BIT(0) #define I3C_SLAVE_RW BIT(15) #define STALL BIT(21) #define I3C_SLAVE_ADDR_SHIFT 8 #define I3C_SLAVE_MASK 0x7f +#define NUM_OF_MDB_SHIFT 16 +#define IBI_NUM_OF_MDB_MSK GENMASK(18, 16) /* IBI_GEN_CONFIG fields */ #define IBI_C_ENABLE BIT(0) @@ -202,6 +208,8 @@ enum geni_i3c_err_code { #define IBI_SW_RESET_MIN_SLEEP 1000 #define IBI_SW_RESET_MAX_SLEEP 2000 +#define MAX_I3C_SE 2 + enum i3c_trans_dir { WRITE_TRANSACTION = 0, READ_TRANSACTION = 1 @@ -319,6 +327,12 @@ struct geni_i3c_clk_fld { u32 i2c_t_cycle_cnt; }; +static void geni_i3c_enable_ibi_ctrl(struct geni_i3c_dev *gi3c, bool enable); +static void geni_i3c_enable_ibi_irq(struct geni_i3c_dev *gi3c, bool enable); + +static struct geni_i3c_dev *i3c_geni_dev[MAX_I3C_SE]; +static int i3c_nos; + static struct geni_i3c_dev* to_geni_i3c_master(struct i3c_master_controller *master) { @@ -449,11 +463,11 @@ static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c, static void geni_i3c_err(struct geni_i3c_dev *gi3c, int err) { if (gi3c->cur_rnw == WRITE_TRANSACTION) - GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "len:%d, write\n", - gi3c->cur_len); + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s:Error: Write, len:%d\n", __func__, gi3c->cur_len); else - GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "len:%d, read\n", - gi3c->cur_len); + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s:Error: Read, len:%d\n", __func__, gi3c->cur_len); GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "%s\n", gi3c_log[err].msg); gi3c->err = gi3c_log[err].err; @@ -580,6 +594,26 @@ static irqreturn_t geni_i3c_ibi_irq(int irq, void *dev) return IRQ_HANDLED; } +static void geni_i3c_handle_err(struct geni_i3c_dev *gi3c, u32 status) +{ + if (status & M_GP_IRQ_0_EN) + geni_i3c_err(gi3c, RD_TERM); + if (status & M_GP_IRQ_1_EN) + geni_i3c_err(gi3c, NACK); + if (status & M_GP_IRQ_2_EN) + geni_i3c_err(gi3c, CRC_ERR); + if (status & M_GP_IRQ_3_EN) + geni_i3c_err(gi3c, BUS_PROTO); + if (status & M_GP_IRQ_4_EN) + geni_i3c_err(gi3c, NACK_7E); + if (status & M_CMD_OVERRUN_EN) + geni_i3c_err(gi3c, GENI_OVERRUN); + if (status & M_ILLEGAL_CMD_EN) + geni_i3c_err(gi3c, GENI_ILLEGAL_CMD); + if (status & M_CMD_ABORT_EN) + geni_i3c_err(gi3c, GENI_ABORT_DONE); +} + static irqreturn_t geni_i3c_irq(int irq, void *dev) { struct geni_i3c_dev *gi3c = dev; @@ -597,24 +631,8 @@ static irqreturn_t geni_i3c_irq(int irq, void *dev) dm_rx_st = readl_relaxed(gi3c->se.base + SE_DMA_RX_IRQ_STAT); dma = readl_relaxed(gi3c->se.base + SE_GENI_DMA_MODE_EN); - if ((m_stat & SE_I3C_ERR) || - (dm_rx_st & DM_I3C_CB_ERR)) { - if (m_stat & M_GP_IRQ_0_EN) - geni_i3c_err(gi3c, RD_TERM); - if (m_stat & M_GP_IRQ_1_EN) - geni_i3c_err(gi3c, NACK); - if (m_stat & M_GP_IRQ_2_EN) - geni_i3c_err(gi3c, CRC_ERR); - if (m_stat & M_GP_IRQ_3_EN) - geni_i3c_err(gi3c, BUS_PROTO); - if (m_stat & M_GP_IRQ_4_EN) - geni_i3c_err(gi3c, NACK_7E); - if (m_stat & M_CMD_OVERRUN_EN) - geni_i3c_err(gi3c, GENI_OVERRUN); - if (m_stat & M_ILLEGAL_CMD_EN) - geni_i3c_err(gi3c, GENI_ILLEGAL_CMD); - if (m_stat & M_CMD_ABORT_EN) - geni_i3c_err(gi3c, GENI_ABORT_DONE); + if ((m_stat & SE_I3C_ERR) || (dm_rx_st & DM_I3C_CB_ERR)) { + geni_i3c_handle_err(gi3c, m_stat); /* Disable the TX Watermark interrupt to stop TX */ if (!dma) @@ -688,8 +706,11 @@ static irqreturn_t geni_i3c_irq(int irq, void *dev) complete(&gi3c->done); } else if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE) || - (dm_rx_st & RX_RESET_DONE)) + (dm_rx_st & RX_RESET_DONE) || + (dm_tx_st & TX_RESET_DONE)) { + complete(&gi3c->done); + } spin_unlock_irqrestore(&gi3c->spinlock, flags); return IRQ_HANDLED; @@ -740,6 +761,7 @@ static int _i3c_geni_execute_command enum i3c_trans_dir rnw = gi3c->cur_rnw; u32 len = gi3c->cur_len; + reinit_completion(&gi3c->done); geni_se_select_mode(gi3c->se.base, xfer->mode); gi3c->err = 0; @@ -759,6 +781,8 @@ static int _i3c_geni_execute_command gi3c->se.base, gi3c->cur_buf, len, &rx_dma); if (ret) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "DMA Err:%d, FIFO mode enabled\n", ret); xfer->mode = FIFO_MODE; geni_se_select_mode(gi3c->se.base, xfer->mode); } @@ -777,6 +801,8 @@ static int _i3c_geni_execute_command gi3c->se.base, gi3c->cur_buf, len, &tx_dma); if (ret) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "DMA Err:%d, FIFO mode enabled\n", ret); xfer->mode = FIFO_MODE; geni_se_select_mode(gi3c->se.base, xfer->mode); } @@ -785,32 +811,55 @@ static int _i3c_geni_execute_command writel_relaxed(1, gi3c->se.base + SE_GENI_TX_WATERMARK_REG); } - time_remaining = wait_for_completion_timeout(&gi3c->done, - XFER_TIMEOUT); + + time_remaining = wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT); if (!time_remaining) { unsigned long flags; GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "got wait_for_completion timeout\n"); - spin_lock_irqsave(&gi3c->spinlock, flags); geni_i3c_err(gi3c, GENI_TIMEOUT); gi3c->cur_buf = NULL; gi3c->cur_len = gi3c->cur_idx = 0; gi3c->cur_rnw = 0; - geni_abort_m_cmd(gi3c->se.base); + + reinit_completion(&gi3c->done); + + spin_lock_irqsave(&gi3c->spinlock, flags); + geni_cancel_m_cmd(gi3c->se.base); spin_unlock_irqrestore(&gi3c->spinlock, flags); - time_remaining = wait_for_completion_timeout(&gi3c->done, - XFER_TIMEOUT); + + time_remaining = wait_for_completion_timeout(&gi3c->done, HZ); + if (!time_remaining) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s:Cancel cmd failed : Aborting\n", __func__); + + reinit_completion(&gi3c->done); + spin_lock_irqsave(&gi3c->spinlock, flags); + geni_abort_m_cmd(gi3c->se.base); + spin_unlock_irqrestore(&gi3c->spinlock, flags); + time_remaining = + wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT); + if (!time_remaining) + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s:Abort Failed\n", __func__); + } } + if (xfer->mode == SE_DMA) { if (gi3c->err) { + reinit_completion(&gi3c->done); if (rnw == READ_TRANSACTION) writel_relaxed(1, gi3c->se.base + SE_DMA_RX_FSM_RST); else writel_relaxed(1, gi3c->se.base + SE_DMA_TX_FSM_RST); + time_remaining = wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT); + if (!time_remaining) + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "Timeout:FSM Reset, rnw:%d\n", rnw); } geni_se_rx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev, rx_dma, len); @@ -1087,6 +1136,10 @@ static int geni_i3c_master_priv_xfers << SLV_ADDR_SHFT); xfer.m_param |= (use_7e) ? USE_7E : 0; + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s: stall:%d,use_7e:%d, nxfers:%d,i:%d,m_param:0x%x,rnw:%d\n", + __func__, stall, use_7e, nxfers, i, xfer.m_param, xfers[i].rnw); + /* Update use_7e status for next loop iteration */ use_7e = !stall; @@ -1176,6 +1229,7 @@ static int geni_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) if (!data) return -ENOMEM; + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "%s\n", __func__); i2c_dev_set_master_data(dev, data); return 0; @@ -1298,8 +1352,11 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m) /* Get an address for the master. */ ret = i3c_master_get_free_addr(m, 0); - if (ret < 0) + if (ret < 0) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s: error No free addr:%d\n", __func__, ret); goto err_cleanup; + } info.dyn_addr = ret; info.dcr = I3C_DCR_GENERIC_DEVICE; @@ -1333,6 +1390,8 @@ static bool geni_i3c_master_supports_ccc_cmd const struct i3c_ccc_cmd *cmd ) { + struct geni_i3c_dev *gi3c = to_geni_i3c_master(m); + switch (cmd->id) { case I3C_CCC_ENEC(true): /* fallthrough */ @@ -1390,6 +1449,8 @@ static bool geni_i3c_master_supports_ccc_cmd break; } + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s: Unsupported cmnd\n", __func__); return false; } @@ -1430,38 +1491,14 @@ static int geni_i3c_master_disable_ibi(struct i3c_dev_desc *dev) static void qcom_geni_i3c_ibi_conf(struct geni_i3c_dev *gi3c) { - u32 val, timeout; - gi3c->ibi.err = 0; reinit_completion(&gi3c->ibi.done); /* set the configuration for 100Khz OD speed */ geni_write_reg(0x5FD74322, gi3c->se.ibi_base, IBI_SCL_PP_TIMING_CONFIG); - /* Enable I3C IBI controller */ - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); - val |= IBI_C_ENABLE; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); - - /* enable ENABLE_CHANGE */ - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - val |= IBI_C_ENABLE; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - - /* wait for ENABLE_CHANGE */ - timeout = wait_for_completion_timeout(&gi3c->ibi.done, XFER_TIMEOUT); - if (!timeout) { - GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, - "timeout while ENABLE_CHANGE bit\n"); - return; - } - - /* enable manager interrupts */ - geni_write_reg(0x1B, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - - /* Enable GPII0 interrupts */ - geni_write_reg(0x1, gi3c->se.ibi_base, IBI_GPII_IBI_EN); - geni_write_reg(~0u, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + geni_i3c_enable_ibi_ctrl(gi3c, true); + geni_i3c_enable_ibi_irq(gi3c, true); gi3c->ibi.is_init = true; } @@ -1472,6 +1509,7 @@ static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev, struct geni_i3c_dev *gi3c = to_geni_i3c_master(m); struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); unsigned long i, flags; + unsigned int payload_len = req->max_payload_len; if (!gi3c->ibi.hw_support) return -EPERM; @@ -1505,6 +1543,7 @@ static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev, cmd = ((dev->info.dyn_addr & I3C_SLAVE_MASK) << I3C_SLAVE_ADDR_SHIFT) | I3C_SLAVE_RW | STALL; + cmd |= ((payload_len << NUM_OF_MDB_SHIFT) & IBI_NUM_OF_MDB_MSK); geni_write_reg(cmd, gi3c->se.ibi_base, IBI_CMD(0)); /* wait for adding slave IBI */ @@ -1565,50 +1604,110 @@ static int qcom_deallocate_ibi_table_entry(struct geni_i3c_dev *gi3c) return 0; } -static void qcom_geni_i3c_ibi_unconf(struct geni_i3c_dev *gi3c) +static void geni_i3c_enable_hotjoin_irq(struct geni_i3c_dev *gi3c, bool enable) { - u32 val, timeout; - int ret = 0; + u32 val; - val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0)); - if (val) { - ret = qcom_deallocate_ibi_table_entry(gi3c); - if (ret) - return; - } + //Disable hot-join, until next probe happens + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + if (enable) + val |= HOT_JOIN_IRQ_EN; + else + val &= ~HOT_JOIN_IRQ_EN; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - /* disable interrupts */ - geni_write_reg(0, gi3c->se.ibi_base, IBI_GPII_IBI_EN); - geni_write_reg(0, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s:%s\n", __func__, (enable) ? "Enabled" : "Disabled"); +} - /* check if any IBI is enabled, if not then reset HW */ - val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN); - if (!val) { +static void geni_i3c_enable_ibi_irq(struct geni_i3c_dev *gi3c, bool enable) +{ + u32 val; - gi3c->ibi.err = 0; + if (enable) { + /* enable manager interrupts : HPG sec 4.1 */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + val |= (val & 0x1B); + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + + /* Enable GPII0 interrupts */ + geni_write_reg(GPIIn_IBI_EN(0), gi3c->se.ibi_base, + IBI_GPII_IBI_EN); + geni_write_reg(~0u, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + } else { + geni_write_reg(0, gi3c->se.ibi_base, IBI_GPII_IBI_EN); + geni_write_reg(0, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + geni_write_reg(0, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + } +} + +static void geni_i3c_enable_ibi_ctrl(struct geni_i3c_dev *gi3c, bool enable) +{ + u32 val, timeout; + + if (enable) { reinit_completion(&gi3c->ibi.done); - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); - val &= ~IBI_C_ENABLE; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); + /* enable ENABLE_CHANGE */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + val |= IBI_C_ENABLE; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + /* Enable I3C IBI controller, if not in enabled state */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); + if (!(val & IBI_C_ENABLE)) { + val |= IBI_C_ENABLE; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); - /* wait for ENABLE change */ - timeout = wait_for_completion_timeout(&gi3c->ibi.done, - XFER_TIMEOUT); - if (!timeout) { - GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, - "timeout while disabling IBI controller\n"); + /* wait for ENABLE_CHANGE */ + timeout = wait_for_completion_timeout(&gi3c->ibi.done, + XFER_TIMEOUT); + if (!timeout) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "timeout while ENABLE_CHANGE bit\n"); return; + } + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s: IBI ctrl enabled\n", __func__); } + } else { + /* Disable IBI controller */ + + /* check if any IBI is enabled, if not then disable IBI ctrl */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN); + if (!val) { + gi3c->ibi.err = 0; + reinit_completion(&gi3c->ibi.done); - if (gi3c->ibi.err) { + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); + val &= ~IBI_C_ENABLE; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); + + /* wait for ENABLE change */ + timeout = wait_for_completion_timeout(&gi3c->ibi.done, + XFER_TIMEOUT); + if (!timeout) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "timeout disabling IBI: 0x%x\n", gi3c->ibi.err); + return; + } GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, - "error while disabling IBI controller 0x%x\n", - gi3c->ibi.err); - return; + "%s: IBI ctrl disabled\n", __func__); } + } +} + +static void qcom_geni_i3c_ibi_unconf(struct geni_i3c_dev *gi3c) +{ + u32 val; + int ret = 0; + + val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0)); + if (val) { + ret = qcom_deallocate_ibi_table_entry(gi3c); + if (ret) + return; } gi3c->ibi.is_init = false; @@ -1742,20 +1841,19 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c, (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH)); if (ret) { GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, - "geni_se_resources_init\n"); + "geni_se_resources_init Failed:%d\n", ret); return ret; } ret = device_property_read_u32(&pdev->dev, "se-clock-frequency", - &gi3c->clk_src_freq); + &gi3c->clk_src_freq); if (ret) { GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "SE clk freq not specified, default to 100 MHz.\n"); gi3c->clk_src_freq = 100000000; } - ret = device_property_read_u32(&pdev->dev, "dfs-index", - &gi3c->dfs_idx); + ret = device_property_read_u32(&pdev->dev, "dfs-index", &gi3c->dfs_idx); if (ret) gi3c->dfs_idx = 0xf; @@ -1848,8 +1946,7 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c, IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "Request_irq failed:%d: err:%d\n", - gi3c->ibi.mngr_irq, ret); + "Request_irq:%d: err:%d\n", gi3c->ibi.mngr_irq, ret); return ret; } @@ -1871,12 +1968,11 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c, } ret = devm_request_irq(&pdev->dev, gi3c->ibi.gpii_irq[0], - geni_i3c_ibi_irq, IRQF_TRIGGER_HIGH, - dev_name(&pdev->dev), gi3c); + geni_i3c_ibi_irq, IRQF_TRIGGER_HIGH, + dev_name(&pdev->dev), gi3c); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "Request_irq failed:%d: err:%d\n", - gi3c->ibi.gpii_irq[0], ret); + "Request_irq failed:%d: err:%d\n", gi3c->ibi.gpii_irq[0], ret); return ret; } @@ -1907,58 +2003,71 @@ static int geni_i3c_probe(struct platform_device *pdev) gi3c->se.dev = &pdev->dev; gi3c->ipcl = ipc_log_context_create(4, dev_name(gi3c->se.dev), 0); + if (!gi3c->ipcl) + dev_info(&pdev->dev, "Error creating IPC Log\n"); + + if (i3c_nos < MAX_I3C_SE) + i3c_geni_dev[i3c_nos++] = gi3c; ret = i3c_geni_rsrcs_init(gi3c, pdev); - if (ret) - return ret; + if (ret) { + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "Error:%d i3c_geni_rsrcs_init\n", ret); + goto cleanup_init; + } ret = i3c_geni_rsrcs_clk_init(gi3c); - if (ret) - return ret; + if (ret) { + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "Error:%d i3c_geni_rsrcs_clk_init\n", ret); + goto cleanup_init; + } gi3c->irq = platform_get_irq(pdev, 0); if (gi3c->irq < 0) { + ret = gi3c->irq; GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "IRQ error for i3c-master-geni\n"); - return gi3c->irq; + "IRQ error=%d for i3c-master-geni\n", ret); + goto cleanup_init; } init_completion(&gi3c->done); mutex_init(&gi3c->lock); spin_lock_init(&gi3c->spinlock); platform_set_drvdata(pdev, gi3c); + + /* Keep interrupt disabled so the system can enter low-power mode */ + irq_set_status_flags(gi3c->irq, IRQ_NOAUTOEN); ret = devm_request_irq(&pdev->dev, gi3c->irq, geni_i3c_irq, IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "Request_irq failed:%d: err:%d\n", - gi3c->irq, ret); - return ret; + "i3c irq failed:%d: err:%d\n", gi3c->irq, ret); + goto cleanup_init; } - /* Disable the interrupt so that the system can enter low-power mode */ - disable_irq(gi3c->irq); ret = se_geni_resources_on(&gi3c->se.i3c_rsc); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "Error turning on resources %d\n", ret); - return ret; + goto cleanup_init; } proto = get_se_proto(gi3c->se.base); if (proto != I3C) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "Invalid proto %d\n", proto); - se_geni_resources_off(&gi3c->se.i3c_rsc); - return -ENXIO; + ret = -ENXIO; + goto geni_resources_off; } se_mode = geni_read_reg(gi3c->se.base, GENI_IF_FIFO_DISABLE_RO); if (se_mode) { + /* GSI mode not supported */ GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "Non supported mode %d\n", se_mode); - se_geni_resources_off(&gi3c->se.i3c_rsc); - return -ENXIO; + ret = -ENXIO; + goto geni_resources_off; } tx_depth = get_tx_fifo_depth(gi3c->se.base); @@ -1966,21 +2075,11 @@ static int geni_i3c_probe(struct platform_device *pdev) geni_se_init(gi3c->se.base, gi3c->tx_wm, tx_depth); se_config_packing(gi3c->se.base, BITS_PER_BYTE, PACKING_BYTES_PW, true); - gi3c->hj_wl = wakeup_source_register(gi3c->se.dev, - dev_name(gi3c->se.dev)); - if (!gi3c->hj_wl) { - GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "wakeup source registration failed\n"); - se_geni_resources_off(&gi3c->se.i3c_rsc); - return -ENOMEM; - } - - INIT_WORK(&gi3c->hj_wd, geni_i3c_hotjoin); - gi3c->hj_wq = alloc_workqueue("%s", 0, 0, dev_name(gi3c->se.dev)); - ret = i3c_ibi_rsrcs_init(gi3c, pdev); if (ret) { - se_geni_resources_off(&gi3c->se.i3c_rsc); - return ret; + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "Error: %d, i3c_ibi_rsrcs_init\n", ret); + goto geni_resources_off; } se_geni_resources_off(&gi3c->se.i3c_rsc); @@ -1995,32 +2094,56 @@ static int geni_i3c_probe(struct platform_device *pdev) ret = i3c_master_register(&gi3c->ctrlr, &pdev->dev, &geni_i3c_master_ops, false); - if (ret) + if (ret) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "I3C master registration failed=%d, continue\n", ret); + + /* NOTE : This may fail on 7E NACK, but should return 0 */ + ret = 0; + } + + // hot-join + gi3c->hj_wl = wakeup_source_register(gi3c->se.dev, + dev_name(gi3c->se.dev)); + if (!gi3c->hj_wl) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "i3c_master_register failed:%d\n", ret); + "wakeup source registration failed\n"); + se_geni_resources_off(&gi3c->se.i3c_rsc); + return -ENOMEM; + } - //enable hot-join IRQ also - geni_write_reg(~0u, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + INIT_WORK(&gi3c->hj_wd, geni_i3c_hotjoin); + gi3c->hj_wq = alloc_workqueue("%s", 0, 0, dev_name(gi3c->se.dev)); + geni_i3c_enable_hotjoin_irq(gi3c, true); - GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n"); + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "I3C probed:%d\n", ret); + return ret; - return 0; +geni_resources_off: + se_geni_resources_off(&gi3c->se.i3c_rsc); + +cleanup_init: + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "I3C probe failed\n"); + return ret; } static int geni_i3c_remove(struct platform_device *pdev) { struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev); - int ret = 0, val = 0; + int ret = 0, i; //Disable hot-join, until next probe happens - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - val &= ~HOT_JOIN_IRQ_EN; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + geni_i3c_enable_hotjoin_irq(gi3c, false); + destroy_workqueue(gi3c->hj_wq); + wakeup_source_unregister(gi3c->hj_wl); if (gi3c->ibi.is_init) qcom_geni_i3c_ibi_unconf(gi3c); - destroy_workqueue(gi3c->hj_wq); - wakeup_source_unregister(gi3c->hj_wl); + geni_i3c_enable_ibi_ctrl(gi3c, false); + + /* Potentially to be done before pinctrl change */ + geni_i3c_enable_ibi_irq(gi3c, false); + /*force suspend to avoid the auto suspend caused by driver removal*/ pm_runtime_force_suspend(gi3c->se.dev); ret = pinctrl_select_state(gi3c->se.i3c_rsc.geni_pinctrl, @@ -2028,9 +2151,16 @@ static int geni_i3c_remove(struct platform_device *pdev) if (ret) GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, " i3c: pinctrl_select_state failed\n"); + ret = i3c_master_unregister(&gi3c->ctrlr); + /* TBD : If we need debug for previous session, Don't delete logs */ if (gi3c->ipcl) ipc_log_context_destroy(gi3c->ipcl); + + for (i = 0; i < i3c_nos; i++) + i3c_geni_dev[i] = NULL; + i3c_nos = 0; + return ret; } diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index cb8c98a440109d12154bc56e5fff8e03b8d07998..e029d4b0f7afd13ef7299a2cbda55dfe4d81591f 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -197,6 +197,14 @@ struct bmc150_accel_data { struct mutex mutex; u8 fifo_mode, watermark; s16 buffer[8]; + /* + * Ensure there is sufficient space and correct alignment for + * the timestamp if enabled + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; u8 bw_bits; u32 slope_dur; u32 slope_thres; @@ -915,15 +923,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, * now. */ for (i = 0; i < count; i++) { - u16 sample[8]; int j, bit; j = 0; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) - memcpy(&sample[j++], &buffer[i * 3 + bit], 2); + memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], + sizeof(data->scan.channels[0])); - iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + tstamp); tstamp += sample_period; } diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 0c0df4fce420694437f0ba14e10b61f0ed5a28e1..f74cb2e082a67f5c107d4558ae887dc8f81a5d33 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -212,14 +212,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) const struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct kxsd9_state *st = iio_priv(indio_dev); + /* + * Ensure correct positioning and alignment of timestamp. + * No need to zero initialize as all elements written. + */ + struct { + __be16 chan[4]; + s64 ts __aligned(8); + } hw_values; int ret; - /* 4 * 16bit values AND timestamp */ - __be16 hw_values[8]; ret = regmap_bulk_read(st->map, KXSD9_REG_X, - &hw_values, - 8); + hw_values.chan, + sizeof(hw_values.chan)); if (ret) { dev_err(st->dev, "error reading data\n"); @@ -227,7 +233,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, - hw_values, + &hw_values, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c index da0ceaac46b53353980eabcb2375eb97f97a4ef4..a3b5d5780bc8fd2196aaa00d92e008b2b82c6aa5 100644 --- a/drivers/iio/accel/mma7455_core.c +++ b/drivers/iio/accel/mma7455_core.c @@ -55,6 +55,14 @@ struct mma7455_data { struct regmap *regmap; + /* + * Used to reorganize data. Will ensure correct alignment of + * the timestamp if present + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static int mma7455_drdy(struct mma7455_data *mma7455) @@ -85,19 +93,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma7455_data *mma7455 = iio_priv(indio_dev); - u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ int ret; ret = mma7455_drdy(mma7455); if (ret) goto done; - ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, - sizeof(__le16) * 3); + ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, + mma7455->scan.channels, + sizeof(mma7455->scan.channels)); if (ret) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index fcfec758fec633f4423db2616681669eb3eb9787..15c254b4745cc7bfa0a57ec16aa4d216cb20eca3 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -107,6 +107,12 @@ struct mma8452_data { u8 data_cfg; const struct mma_chip_info *chip_info; int sleep_val; + + /* Ensure correct alignment of time stamp when present */ + struct { + __be16 channels[3]; + s64 ts __aligned(8); + } buffer; }; /** @@ -1088,14 +1094,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma8452_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 3 16-bit channels + padding + ts */ int ret; - ret = mma8452_read(data, (__be16 *)buffer); + ret = mma8452_read(data, data->buffer.channels); if (ret < 0) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index d1239624187da1997c44c8ba7400f549568b39ff..1ab106b3d3a6bceeee393fad34017c659f898ac2 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -146,6 +146,11 @@ struct ina2xx_chip_info { int range_vbus; /* Bus voltage maximum in V */ int pga_gain_vshunt; /* Shunt voltage PGA gain */ bool allow_async_readout; + /* data buffer needs space for channel data and timestamp */ + struct { + u16 chan[4]; + u64 ts __aligned(8); + } scan; }; static const struct ina2xx_config ina2xx_config[] = { @@ -736,8 +741,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev) static int ina2xx_work_buffer(struct iio_dev *indio_dev) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); - /* data buffer needs space for channel data and timestap */ - unsigned short data[4 + sizeof(s64)/sizeof(short)]; int bit, ret, i = 0; s64 time; @@ -756,10 +759,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) if (ret < 0) return ret; - data[i++] = val; + chip->scan.chan[i++] = val; } - iio_push_to_buffers_with_timestamp(indio_dev, data, time); + iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time); return 0; }; diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 49db9e9ae625c0fa3d75c6e14900eacb83cbdb92..b372b226ac203e1aaa3034f14b6e1ace2211020b 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -38,6 +38,11 @@ struct max1118 { struct spi_device *spi; struct mutex lock; struct regulator *reg; + /* Ensure natural alignment of buffer elements */ + struct { + u8 channels[2]; + s64 ts __aligned(8); + } scan; u8 data ____cacheline_aligned; }; @@ -162,7 +167,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max1118 *adc = iio_priv(indio_dev); - u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */ int scan_index; int i = 0; @@ -180,10 +184,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) goto out; } - data[i] = ret; + adc->scan.channels[i] = ret; i++; } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 819f26011500534f632db66b3a91a44a10e30f71..4ee4ca35c25509cfb69abbe6e760387d0df820b5 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -99,16 +99,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) { int ret; - mutex_lock(&adc->lock); - ret = i2c_master_send(adc->i2c, &newconfig, 1); if (ret > 0) { adc->config = newconfig; ret = 0; } - mutex_unlock(&adc->lock); - return ret; } @@ -141,6 +137,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, u8 config; u8 req_channel = channel->channel; + mutex_lock(&adc->lock); + if (req_channel != MCP3422_CHANNEL(adc->config)) { config = adc->config; config &= ~MCP3422_CHANNEL_MASK; @@ -148,12 +146,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc, config &= ~MCP3422_PGA_MASK; config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); ret = mcp3422_update_config(adc, config); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&adc->lock); return ret; + } msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); } - return mcp3422_read(adc, value, &config); + ret = mcp3422_read(adc, value, &config); + + mutex_unlock(&adc->lock); + + return ret; } static int mcp3422_read_raw(struct iio_dev *iio, diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 405e3779c0c564f45dde2b701bd3393ce4f759e0..ef95363ebac2aa069df26543483af8a5a98e75d0 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -36,6 +36,12 @@ struct adc081c { /* 8, 10 or 12 */ int bits; + + /* Ensure natural alignment of buffer elements */ + struct { + u16 channel; + s64 ts __aligned(8); + } scan; }; #define REG_CONV_RES 0x00 @@ -131,14 +137,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc081c *data = iio_priv(indio_dev); - u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ int ret; ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); if (ret < 0) goto out; - buf[0] = ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + data->scan.channel = ret; + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index 25504640e1261dba0ea92fd9d65b88c088ad14ba..ec490e7a5b737ab944e590f81445b341e07ea505 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -28,6 +28,11 @@ struct adc084s021 { struct spi_transfer spi_trans; struct regulator *reg; struct mutex lock; + /* Buffer used to align data */ + struct { + __be16 channels[4]; + s64 ts __aligned(8); + } scan; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache line. @@ -143,14 +148,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc) struct iio_poll_func *pf = pollfunc; struct iio_dev *indio_dev = pf->indio_dev; struct adc084s021 *adc = iio_priv(indio_dev); - __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */ mutex_lock(&adc->lock); - if (adc084s021_adc_conversion(adc, &data) < 0) + if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0) dev_err(&adc->spi->dev, "Failed to read data\n"); - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); mutex_unlock(&adc->lock); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 6a114dcb4a3a754b4d3d876c1e7af76147706f57..dc8d859e4b92cd2236c1739e825532e41f5ef3b5 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -312,6 +312,7 @@ static const struct iio_chan_spec ads1115_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +#ifdef CONFIG_PM static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; @@ -329,6 +330,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) return ret < 0 ? ret : 0; } +#else /* !CONFIG_PM */ + +static int ads1015_set_power_state(struct ads1015_data *data, bool on) +{ + return 0; +} + +#endif /* !CONFIG_PM */ + static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index b4a46eb45789379d97d4c5abac562738db0e8897..46d5d48b58b6c7735ae937a258376efe55f13439 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -78,6 +78,11 @@ struct ccs811_data { struct ccs811_reading buffer; struct iio_trigger *drdy_trig; bool drdy_trig_on; + /* Ensures correct alignment of timestamp if present */ + struct { + s16 channels[2]; + s64 ts __aligned(8); + } scan; }; static const struct iio_chan_spec ccs811_channels[] = { @@ -309,17 +314,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ccs811_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */ int ret; - ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4, - (u8 *)&buf); + ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, + sizeof(data->scan.channels), + (u8 *)data->scan.channels); if (ret != 4) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 095530c233e412242e83c55bb3085a643c2ba77f..7549abd544c0f16df8c67dac797521ae20dc26c8 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -417,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - ret = IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_MICRO; } else { int mult; @@ -448,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, ret = IIO_VAL_INT; break; default: - ret = -EINVAL; + return -EINVAL; } unlock: diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index e70a5339acb192006bf84e732a212d5106b26889..3fc11aec98b953089b9055eed07a1825dbac694d 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -38,8 +38,11 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); - if (!adis->buffer) + if (!adis->buffer) { + kfree(adis->xfer); + adis->xfer = NULL; return -ENOMEM; + } tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 830a2d45aa4dd4e7fb7ed7cbac5431c3011aba88..947f17588024a4452cc391ca460efd5e6e807d11 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1245,13 +1245,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ltr501_data *data = iio_priv(indio_dev); - u16 buf[8]; + struct { + u16 channels[3]; + s64 ts __aligned(8); + } scan; __le16 als_buf[2]; u8 mask = 0; int j = 0; int ret, psdata; - memset(buf, 0, sizeof(buf)); + memset(&scan, 0, sizeof(scan)); /* figure out which data needs to be ready */ if (test_bit(0, indio_dev->active_scan_mask) || @@ -1270,9 +1273,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) if (ret < 0) return ret; if (test_bit(0, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[1]); + scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[0]); + scan.channels[j++] = le16_to_cpu(als_buf[0]); } if (mask & LTR501_STATUS_PS_RDY) { @@ -1280,10 +1283,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) &psdata, 2); if (ret < 0) goto done; - buf[j++] = psdata & LTR501_PS_DATA_MASK; + scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index bcdb0eb9e5371f05a35ca91eb1daaa7337ac12f1..7d2b3d06572623a8ca45f1554d257299e32896f9 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c @@ -78,6 +78,11 @@ struct max44000_data { struct mutex lock; struct regmap *regmap; + /* Ensure naturally aligned timestamp */ + struct { + u16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ @@ -491,7 +496,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max44000_data *data = iio_priv(indio_dev); - u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ int index = 0; unsigned int regval; int ret; @@ -501,17 +505,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) ret = max44000_read_alsval(data); if (ret < 0) goto out_unlock; - buf[index++] = ret; + data->scan.channels[index++] = ret; } if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); if (ret < 0) goto out_unlock; - buf[index] = regval; + data->scan.channels[index] = regval; } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 42a827a66512182485678b53a534e079aee82a3c..379aa7f4a80417499b812cabce71b4545d3dd6d0 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -381,6 +381,12 @@ struct ak8975_data { struct iio_mount_matrix orientation; struct regulator *vdd; struct regulator *vid; + + /* Ensure natural alignment of timestamp */ + struct { + s16 channels[3]; + s64 ts __aligned(8); + } scan; }; /* Enable attached power regulator if any. */ @@ -815,7 +821,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) const struct i2c_client *client = data->client; const struct ak_def *def = data->def; int ret; - s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ __le16 fval[3]; mutex_lock(&data->lock); @@ -838,12 +843,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) mutex_unlock(&data->lock); /* Clamp to valid range. */ - buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); - buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); - buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); + data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); + data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); - iio_push_to_buffers_with_timestamp(indio_dev, buff, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); + return; unlock: diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 64f206e11d497a28af270cf4973a422be41f581b..4ebf63360a697f5340f15325d6c43d99c0a64912 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1100,14 +1100,22 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) break; } - spin_lock_irq(&cm.lock); + spin_lock_irq(&cm_id_priv->lock); + spin_lock(&cm.lock); + /* Required for cleanup paths related cm_req_handler() */ + if (cm_id_priv->timewait_info) { + cm_cleanup_timewait(cm_id_priv->timewait_info); + kfree(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; + } if (!list_empty(&cm_id_priv->altr_list) && (!cm_id_priv->altr_send_port_not_ready)) list_del(&cm_id_priv->altr_list); if (!list_empty(&cm_id_priv->prim_list) && (!cm_id_priv->prim_send_port_not_ready)) list_del(&cm_id_priv->prim_list); - spin_unlock_irq(&cm.lock); + spin_unlock(&cm.lock); + spin_unlock_irq(&cm_id_priv->lock); cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); @@ -1424,7 +1432,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_IDLE) { + if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; @@ -1442,12 +1450,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, param->ppath_sgid_attr, &cm_id_priv->av, cm_id_priv); if (ret) - goto error1; + goto out; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, cm_id_priv); if (ret) - goto error1; + goto out; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); @@ -1465,7 +1473,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) - goto error1; + goto out; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); @@ -1488,7 +1496,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, return 0; error2: cm_free_msg(cm_id_priv->msg); -error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); @@ -1973,7 +1980,7 @@ static int cm_req_handler(struct cm_work *work) pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, be32_to_cpu(cm_id->local_id)); ret = -EINVAL; - goto free_timeinfo; + goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; @@ -2057,8 +2064,6 @@ static int cm_req_handler(struct cm_work *work) rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); -free_timeinfo: - kfree(cm_id_priv->timewait_info); destroy: ib_destroy_cm_id(cm_id); return ret; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 82f309fb3ce52d5f1d5e2db4367d41f90ae07cd8..e1ecd4682c096129e82577dca3923d60113fe0ba 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1617,7 +1617,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, if (!(rdma_protocol_ib(qp->device, attr->alt_ah_attr.port_num) && rdma_protocol_ib(qp->device, port))) { - ret = EINVAL; + ret = -EINVAL; goto out; } } @@ -1711,7 +1711,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) dev_put(netdev); - if (!rc) { + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { netdev_speed = lksettings.base.speed; } else { netdev_speed = SPEED_1000; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c9af2d139f5cb6084fefb07c911095fa9d894bc0..957da3ffe593cde8de8d17fe7b795ac165717aa0 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3033,6 +3033,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, wc->wc_flags |= IB_WC_GRH; } +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + /* + * Check if the vlan is configured in the host. If not configured, it + * can be a transparent VLAN. So dont report the vlan id. + */ + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + return false; + return true; +} + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { @@ -3101,9 +3114,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { - wc->vlan_id = vlan_id; - wc->sl = sl; - wc->wc_flags |= IB_WC_WITH_VLAN; + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->sl = sl; + wc->wc_flags |= IB_WC_WITH_VLAN; + } } wc->port_num = 1; wc->vendor_err = orig_cqe->status; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 589b0d4677d52dcefe03cebdc5eb60d9de145b8d..f1b666c80f368ff43868300515596d9b384a65f5 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -753,7 +753,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 09e7d3dd30553bdd038a1fe268e6faba2208ea13..336144876363a395558b84bc09c52e66a9939531 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -141,7 +141,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 9d3e8b9949458a4503d6ee942f8abe324c90954e..b6e9e0ef793911787ac25f921f338a88bff83d5d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -47,6 +47,7 @@ struct bnxt_qplib_dev_attr { #define FW_VER_ARR_LEN 4 u8 fw_ver[FW_VER_ARR_LEN]; +#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256 u16 max_sgid; u16 max_mrw; u32 max_qp; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 16145b0a145839349fd518d9a2bd24997d3faeaf..3fd3dfa3478b7cf382bef8bdad0855bb78339eab 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) - goto fail2; + goto fail3; } /* find a route */ @@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { err = pick_local_ip6addrs(dev, cm_id); if (err) - goto fail2; + goto fail3; } /* find a route */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 4321b9e3dbb4bae0289d5d57d9be4ed5f4eee72f..0273d0404e74080ed1cf8294d9efab96c118ac68 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2071,9 +2071,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); if (!dst || dst->error) { if (dst) { - dst_release(dst); i40iw_pr_err("ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return rc; } diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index a2d708dceb8d70c9bb92b71569180034d189ce90..cca12100c5833be51ff657ed832aa4ee778cebb2 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -351,10 +351,10 @@ struct qedr_srq_hwq_info { u32 wqe_prod; u32 sge_prod; u32 wr_prod_cnt; - u32 wr_cons_cnt; + atomic_t wr_cons_cnt; u32 num_elems; - u32 *virt_prod_pair_addr; + struct rdma_srq_producers *virt_prod_pair_addr; dma_addr_t phy_prod_pair_addr; }; diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 25667157736751a6f9e4b02276bc087b090b1c62..e908dfbaa137809412fcdf69c78b0720d55c16c8 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -460,10 +460,10 @@ qedr_addr6_resolve(struct qedr_dev *dev, if ((!dst) || dst->error) { if (dst) { - dst_release(dst); DP_ERR(dev, "ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return -EINVAL; } diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 38fe2f74137574cb28e96aca32fc8ed2ec604bb1..7b26afc7fef35562fd2755b17724fffd6b254c52 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3577,7 +3577,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq) * count and consumer count and subtract it from max * work request supported so that we get elements left. */ - used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt; + used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt); return hw_srq->max_wr - used; } @@ -3592,7 +3592,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, unsigned long flags; int status = 0; u32 num_sge; - u32 offset; spin_lock_irqsave(&srq->lock, flags); @@ -3605,7 +3604,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, if (!qedr_srq_elem_left(hw_srq) || wr->num_sge > srq->hw_srq.max_sges) { DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n", - hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt, + hw_srq->wr_prod_cnt, + atomic_read(&hw_srq->wr_cons_cnt), wr->num_sge, srq->hw_srq.max_sges); status = -ENOMEM; *bad_wr = wr; @@ -3639,22 +3639,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, hw_srq->sge_prod++; } - /* Flush WQE and SGE information before + /* Update WQE and SGE information before * updating producer. */ - wmb(); + dma_wmb(); /* SRQ producer is 8 bytes. Need to update SGE producer index * in first 4 bytes and need to update WQE producer in * next 4 bytes. */ - *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod; - offset = offsetof(struct rdma_srq_producers, wqe_prod); - *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) = - hw_srq->wqe_prod; + srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod; + /* Make sure sge producer is updated first */ + dma_wmb(); + srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod; - /* Flush producer after updating it. */ - wmb(); wr = wr->next; } @@ -4077,7 +4075,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, } else { __process_resp_one(dev, qp, cq, wc, resp, wr_id); } - srq->hw_srq.wr_cons_cnt++; + atomic_inc(&srq->hw_srq.wr_cons_cnt); return 1; } diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 10999fa6928181f4f99e5e2cb60b6bea8b358eed..6589ff51eaf5c4cc78c29f0b97c82dc018f770a1 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -121,6 +121,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN; rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; + addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, + rxe->ndev->dev_addr); rxe->max_ucontext = RXE_MAX_UCONTEXT; } @@ -163,9 +165,6 @@ static int rxe_init_ports(struct rxe_dev *rxe) rxe_init_port_param(port); - if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len) - return -EINVAL; - port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len, sizeof(*port->pkey_tbl), GFP_KERNEL); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index dff605fdf60faf0a5c6550a360a32c18a40d5d13..2cca89ca08cd44103764bd5bfebfd884389cca90 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -203,6 +203,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, vaddr = page_address(sg_page(sg)); if (!vaddr) { pr_warn("null vaddr\n"); + ib_umem_release(umem); err = -ENOMEM; goto err1; } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 230697fa31fe36705cc9b5c6d05e2d4d963ebc36..8a22ab8b29e9bf10fea68de1f49353266dc72365 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -583,15 +583,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, int err; if (mask & IB_QP_MAX_QP_RD_ATOMIC) { - int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); + int max_rd_atomic = attr->max_rd_atomic ? + roundup_pow_of_two(attr->max_rd_atomic) : 0; qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); } if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { - int max_dest_rd_atomic = - __roundup_pow_of_two(attr->max_dest_rd_atomic); + int max_dest_rd_atomic = attr->max_dest_rd_atomic ? + roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 695a607e2d14c8b315f2782f22cf11b50b624676..b8f3e65402d1dca12dab7cce6ad0309f93fdff6c 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -332,10 +332,14 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb) { + struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); const struct ib_gid_attr *gid_attr; union ib_gid dgid; union ib_gid *pdgid; + if (pkt->mask & RXE_LOOPBACK_MASK) + return 0; + if (skb->protocol == htons(ETH_P_IP)) { ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, (struct in6_addr *)&dgid); @@ -368,7 +372,7 @@ void rxe_rcv(struct sk_buff *skb) if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES)) goto drop; - if (unlikely(rxe_match_dgid(rxe, skb) < 0)) { + if (rxe_match_dgid(rxe, skb) < 0) { pr_warn_ratelimited("failed matching dgid\n"); goto drop; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index f5b1e0ad6142049fb928770acafa768f7075831e..f7f9caaec7d6bfdde9b07ca4a4befcea546afffa 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -733,6 +733,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, unsigned int mask; unsigned int length = 0; int i; + struct ib_send_wr *next; while (wr) { mask = wr_opcode_mask(wr->opcode, qp); @@ -749,6 +750,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, break; } + next = wr->next; + length = 0; for (i = 0; i < wr->num_sge; i++) length += wr->sg_list[i].length; @@ -759,7 +762,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, *bad_wr = wr; break; } - wr = wr->next; + wr = next; } rxe_run_task(&qp->req.task, 1); @@ -1143,7 +1146,7 @@ static ssize_t parent_show(struct device *device, struct rxe_dev *rxe = container_of(device, struct rxe_dev, ib_dev.dev); - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); + return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); } static DEVICE_ATTR_RO(parent); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 85267bbf483603bee697fdddae4405507d5c3c31..ef122210170535dbb380beb6a3a4ad78f5aa0364 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -513,7 +513,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open_default(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev); -int ipoib_ib_dev_stop(struct net_device *dev); +void ipoib_ib_dev_stop(struct net_device *dev); void ipoib_ib_dev_up(struct net_device *dev); void ipoib_ib_dev_down(struct net_device *dev); int ipoib_ib_dev_stop_default(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0f2e80f54d33309c26d77d9debe5a118651ee423..82b9c5b6e3e65db8c99915fe827685c6c7e0714e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -669,14 +669,13 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, return rc; } -static void __ipoib_reap_ah(struct net_device *dev) +static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); unsigned long flags; - netif_tx_lock_bh(dev); + netif_tx_lock_bh(priv->dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) @@ -687,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev) } spin_unlock_irqrestore(&priv->lock, flags); - netif_tx_unlock_bh(dev); + netif_tx_unlock_bh(priv->dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); - struct net_device *dev = priv->dev; - __ipoib_reap_ah(dev); + ipoib_reap_dead_ahs(priv); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } -static void ipoib_flush_ah(struct net_device *dev) +static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - cancel_delayed_work(&priv->ah_reap_task); - flush_workqueue(priv->wq); - ipoib_reap_ah(&priv->ah_reap_task.work); + clear_bit(IPOIB_STOP_REAPER, &priv->flags); + queue_delayed_work(priv->wq, &priv->ah_reap_task, + round_jiffies_relative(HZ)); } -static void ipoib_stop_ah(struct net_device *dev) +static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - set_bit(IPOIB_STOP_REAPER, &priv->flags); - ipoib_flush_ah(dev); + cancel_delayed_work(&priv->ah_reap_task); + /* + * After ipoib_stop_ah_reaper() we always go through + * ipoib_reap_dead_ahs() which ensures the work is really stopped and + * does a final flush out of the dead_ah's list + */ } static int recvs_pending(struct net_device *dev) @@ -846,18 +845,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) return 0; } -int ipoib_ib_dev_stop(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - priv->rn_ops->ndo_stop(dev); - - clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_flush_ah(dev); - - return 0; -} - int ipoib_ib_dev_open_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -901,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev) return -1; } - clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(priv->wq, &priv->ah_reap_task, - round_jiffies_relative(HZ)); - + ipoib_start_ah_reaper(priv); if (priv->rn_ops->ndo_open(dev)) { pr_warn("%s: Failed to open dev\n", dev->name); goto dev_stop; @@ -915,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev) return 0; dev_stop: - set_bit(IPOIB_STOP_REAPER, &priv->flags); - cancel_delayed_work(&priv->ah_reap_task); - set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_ib_dev_stop(dev); + ipoib_stop_ah_reaper(priv); return -1; } +void ipoib_ib_dev_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + priv->rn_ops->ndo_stop(dev); + + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + ipoib_stop_ah_reaper(priv); +} + void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -1232,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_mcast_dev_flush(dev); if (oper_up) set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); - ipoib_flush_ah(dev); + ipoib_reap_dead_ahs(priv); } if (level >= IPOIB_FLUSH_NORMAL) @@ -1307,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ - ipoib_stop_ah(dev); + ipoib_reap_dead_ahs(priv); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6093e8268583d81558e137091a71083fe625d2cd..d0c35eb687aeb2b84454f50b4b4e6597e2579111 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev) /* no more works over the priv->wq */ if (priv->wq) { + /* See ipoib_mcast_carrier_on_task() */ + WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); priv->wq = NULL; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index d3ff1fc09af712700507d05ac3548703e49173a1..a9040c0fb4c3f6fd6b5a6c4d4357dcd1ee333643 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -2044,7 +2044,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index 1d6010d463e2cf7b7ee13b93ba8f6f4fb0e7127d..022a8cb58a06689557842ec744bd9bcfbb1115ca 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -454,7 +454,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data, fsp_reg_write_enable(psmouse, false); - return count; + return retval; } PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg); diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 6590d10f166fe0a8e15b0816924f3b09a0c29c7a..e46865785409434dcfbd468f9db4e5da98ceb7da 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -20,10 +20,12 @@ #include "trackpoint.h" static const char * const trackpoint_variants[] = { - [TP_VARIANT_IBM] = "IBM", - [TP_VARIANT_ALPS] = "ALPS", - [TP_VARIANT_ELAN] = "Elan", - [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics", + [TP_VARIANT_SYNAPTICS] = "Synaptics", }; /* @@ -283,6 +285,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, case TP_VARIANT_ALPS: case TP_VARIANT_ELAN: case TP_VARIANT_NXP: + case TP_VARIANT_JYT_SYNAPTICS: + case TP_VARIANT_SYNAPTICS: if (variant_id) *variant_id = param[0]; if (firmware_id) diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 538986e5ac5bcf4f5c38267db393a30fedb0dee6..4ebcdf802e9a2824b8824800c7573c502697531b 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -27,10 +27,12 @@ * 0x01 was the original IBM trackpoint, others implement very limited * subset of trackpoint features. */ -#define TP_VARIANT_IBM 0x01 -#define TP_VARIANT_ALPS 0x02 -#define TP_VARIANT_ELAN 0x03 -#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_JYT_SYNAPTICS 0x05 +#define TP_VARIANT_SYNAPTICS 0x06 /* * Commands diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 8134c7f928165996a4d3096da8ec251063fd6894..51bd2ebaa342c8171899918e80ab1e3579aedd29 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -552,6 +552,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), }, }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; @@ -680,6 +688,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), }, }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; @@ -709,6 +725,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), }, }, + { + /* Acer Aspire 5 A515 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), + DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + }, + }, { } }; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 58da65df03f5e2551352c9d99c42a794d036b8e5..7a59a8ebac1089bf40138bacc460815c7d3a9be5 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -776,6 +776,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) might_sleep(); + /* + * When memory encryption is active the device is likely not in a + * direct-mapped domain. Forbid using IOMMUv2 functionality for now. + */ + if (mem_encrypt_active()) + return -ENODEV; + if (!amd_iommu_v2_supported()) return -ENODEV; diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 1bd0cd7168dfc92d2dcf7da7c78efc06c78b5f3b..4bf6049dd2c7970c15863574e1c397e24b89dc04 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1302,13 +1302,17 @@ static int exynos_iommu_of_xlate(struct device *dev, return -ENODEV; data = platform_get_drvdata(sysmmu); - if (!data) + if (!data) { + put_device(&sysmmu->dev); return -ENODEV; + } if (!owner) { owner = kzalloc(sizeof(*owner), GFP_KERNEL); - if (!owner) + if (!owner) { + put_device(&sysmmu->dev); return -ENOMEM; + } INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 852e2841395b42b00ceb3cfe2813767384dc4cdd..9d2d03545bb07f945757d471ef22b118c8550e15 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -479,12 +479,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts @@ -601,13 +607,21 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) static void intel_teardown_irq_remapping(struct intel_iommu *iommu) { + struct fwnode_handle *fn; + if (iommu && iommu->ir_table) { if (iommu->ir_msi_domain) { + fn = iommu->ir_msi_domain->fwnode; + irq_domain_remove(iommu->ir_msi_domain); + irq_domain_free_fwnode(fn); iommu->ir_msi_domain = NULL; } if (iommu->ir_domain) { + fn = iommu->ir_domain->fwnode; + irq_domain_remove(iommu->ir_domain); + irq_domain_free_fwnode(fn); iommu->ir_domain = NULL; } free_pages((unsigned long)iommu->ir_table->base, diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 50217548c3b8e7afbb906a57b28705d2c1df7cb7..5ce55fabc9d809089562f41eca47d139f19d395f 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -101,8 +101,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); bytes = omap_iommu_dump_ctx(obj, p, count); + if (bytes < 0) + goto err; bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); +err: mutex_unlock(&iommu_debug_lock); kfree(buf); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index fe7d63cdfb1d78727f669e1077a48be071a1e079..d5cc32e80f5e24f87f53d6d22156c3cd6c847727 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2458,6 +2458,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, { msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; @@ -2473,7 +2474,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 90aaf190157f774f3d5c7e6968e5520ce373f0b3..42455f31b06111f4a4c3e14e62360a07ddf1d8fa 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -23,7 +23,7 @@ #include struct mtk_sysirq_chip_data { - spinlock_t lock; + raw_spinlock_t lock; u32 nr_intpol_bases; void __iomem **intpol_bases; u32 *intpol_words; @@ -45,7 +45,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) reg_index = chip_data->which_word[hwirq]; offset = hwirq & 0x1f; - spin_lock_irqsave(&chip_data->lock, flags); + raw_spin_lock_irqsave(&chip_data->lock, flags); value = readl_relaxed(base + reg_index * 4); if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) { if (type == IRQ_TYPE_LEVEL_LOW) @@ -61,7 +61,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) data = data->parent_data; ret = data->chip->irq_set_type(data, type); - spin_unlock_irqrestore(&chip_data->lock, flags); + raw_spin_unlock_irqrestore(&chip_data->lock, flags); return ret; } @@ -220,7 +220,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, ret = -ENOMEM; goto out_free_which_word; } - spin_lock_init(&chip_data->lock); + raw_spin_lock_init(&chip_data->lock); return 0; diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 97b27f338c305898f7b1c829214ab00ecc377912..f605470855f193e0cfc23d04b53dde86c1db4e21 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -382,6 +382,16 @@ static void stm32_irq_ack(struct irq_data *d) irq_gc_unlock(gc); } +/* directly set the target bit without reading first. */ +static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(val, base + reg); +} + static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) { struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); @@ -415,9 +425,9 @@ static void stm32_exti_h_eoi(struct irq_data *d) raw_spin_lock(&chip_data->rlock); - stm32_exti_set_bit(d, stm32_bank->rpr_ofst); + stm32_exti_write_bit(d, stm32_bank->rpr_ofst); if (stm32_bank->fpr_ofst != UNDEF_REG) - stm32_exti_set_bit(d, stm32_bank->fpr_ofst); + stm32_exti_write_bit(d, stm32_bank->fpr_ofst); raw_spin_unlock(&chip_data->rlock); diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index a781ba6de02ce6deb3fde5cec8c82eea77024542..112502a56e3e70f24868ebc54ef9ae3336c53573 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -173,6 +173,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev) { led_cdev->flags |= LED_SUSPENDED; led_set_brightness_nopm(led_cdev, 0); + flush_work(&led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_classdev_suspend); diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 77a104d2b1243a503d8daa0660d73ae19baa819e..13f414ff6fd007748e18f3fbe1b46356e4e3fafd 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -207,21 +207,33 @@ static int pm860x_led_probe(struct platform_device *pdev) data->cdev.brightness_set_blocking = pm860x_led_set; mutex_init(&data->lock); - ret = devm_led_classdev_register(chip->dev, &data->cdev); + ret = led_classdev_register(chip->dev, &data->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } pm860x_led_set(&data->cdev, 0); + + platform_set_drvdata(pdev, data); + return 0; } +static int pm860x_led_remove(struct platform_device *pdev) +{ + struct pm860x_led *data = platform_get_drvdata(pdev); + + led_classdev_unregister(&data->cdev); + + return 0; +} static struct platform_driver pm860x_led_driver = { .driver = { .name = "88pm860x-led", }, .probe = pm860x_led_probe, + .remove = pm860x_led_remove, }; module_platform_driver(pm860x_led_driver); diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c index 5ff7d72f73aa4ac62ceeab074fb26a6f09e69b3f..ecc265bb69a023daec01cad230fdf4ceeaf47306 100644 --- a/drivers/leds/leds-da903x.c +++ b/drivers/leds/leds-da903x.c @@ -113,12 +113,23 @@ static int da903x_led_probe(struct platform_device *pdev) led->flags = pdata->flags; led->master = pdev->dev.parent; - ret = devm_led_classdev_register(led->master, &led->cdev); + ret = led_classdev_register(led->master, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", id); return ret; } + platform_set_drvdata(pdev, led); + + return 0; +} + +static int da903x_led_remove(struct platform_device *pdev) +{ + struct da903x_led *led = platform_get_drvdata(pdev); + + led_classdev_unregister(&led->cdev); + return 0; } @@ -127,6 +138,7 @@ static struct platform_driver da903x_led_driver = { .name = "da903x-led", }, .probe = da903x_led_probe, + .remove = da903x_led_remove, }; module_platform_driver(da903x_led_driver); diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 72224b599ffce55ba13b9418e27c9fd8362173f0..c1e562a4d6adfa0179341a640373943cc308e199 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -698,7 +698,7 @@ static int lm3533_led_probe(struct platform_device *pdev) platform_set_drvdata(pdev, led); - ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev); + ret = led_classdev_register(pdev->dev.parent, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id); return ret; @@ -708,13 +708,18 @@ static int lm3533_led_probe(struct platform_device *pdev) ret = lm3533_led_setup(led, pdata); if (ret) - return ret; + goto err_deregister; ret = lm3533_ctrlbank_enable(&led->cb); if (ret) - return ret; + goto err_deregister; return 0; + +err_deregister: + led_classdev_unregister(&led->cdev); + + return ret; } static int lm3533_led_remove(struct platform_device *pdev) @@ -724,6 +729,7 @@ static int lm3533_led_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); + led_classdev_unregister(&led->cdev); return 0; } diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c index 6cb94f9a2f3f3fb382d98f29f780d1cb58c76339..b9c60dd2b13274d9cc66db6a9c766f56ca180ece 100644 --- a/drivers/leds/leds-lm355x.c +++ b/drivers/leds/leds-lm355x.c @@ -168,18 +168,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip) /* input and output pins configuration */ switch (chip->type) { case CHIP_LM3554: - reg_val = pdata->pin_tx2 | pdata->ntc_pin; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin; ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val); if (ret < 0) goto out; - reg_val = pdata->pass_mode; + reg_val = (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val); if (ret < 0) goto out; break; case CHIP_LM3556: - reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin | + (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val); if (ret < 0) goto out; diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c index 1ee48cb21df95f8ac03a6d8ea6d4189ebf7c8ca2..022e973dc7c31f59c997e1cc193f1914632b833c 100644 --- a/drivers/leds/leds-mlxreg.c +++ b/drivers/leds/leds-mlxreg.c @@ -209,8 +209,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv) brightness = LED_OFF; led_data->base_color = MLXREG_LED_GREEN_SOLID; } - sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg", - data->label); + snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name), + "mlxreg:%s", data->label); led_cdev->name = led_data->led_cdev_name; led_cdev->brightness = brightness; led_cdev->max_brightness = LED_ON; diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index c5798b92e4d376f5f8b819b9bcc2828ca8611a0f..d926edcb04ee6ce9d99d6aaea1ae94a93754c9a0 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -273,12 +273,23 @@ static int wm831x_status_probe(struct platform_device *pdev) drvdata->cdev.blink_set = wm831x_status_blink_set; drvdata->cdev.groups = wm831x_status_groups; - ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev); + ret = led_classdev_register(wm831x->dev, &drvdata->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } + platform_set_drvdata(pdev, drvdata); + + return 0; +} + +static int wm831x_status_remove(struct platform_device *pdev) +{ + struct wm831x_status *drvdata = platform_get_drvdata(pdev); + + led_classdev_unregister(&drvdata->cdev); + return 0; } @@ -287,6 +298,7 @@ static struct platform_driver wm831x_status_driver = { .name = "wm831x-status", }, .probe = wm831x_status_probe, + .remove = wm831x_status_remove, }; module_platform_driver(wm831x_status_driver); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 2a2f189dd37cbd0ec5f27508d745b94716516056..6a380ed4919a09c9815519326b3010374e173640 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -264,7 +264,7 @@ struct bcache_device { #define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 - unsigned int nr_stripes; + int nr_stripes; unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; @@ -585,6 +585,7 @@ struct cache_set { */ wait_queue_head_t btree_cache_wait; struct task_struct *btree_cache_alloc_lock; + spinlock_t btree_cannibalize_lock; /* * When we free a btree node, we increment the gen of the bucket the diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 268f1b6850840ad70711bca42276da11e6d5c18a..ec48cf86cab60b905b31fa4bb07c0c5c30de1d84 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, b->page_order = page_order; - t->data = (void *) __get_free_pages(gfp, b->page_order); + t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); if (!t->data) goto err; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 38a8f8d2a908de41ca24c71896339dab1c9266cb..e388e7bb7b5db8679428e1a55f924f16271cc4f3 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -830,7 +830,7 @@ int bch_btree_cache_alloc(struct cache_set *c) mutex_init(&c->verify_lock); c->verify_ondisk = (void *) - __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); + __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c))); c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); @@ -876,15 +876,17 @@ static struct btree *mca_find(struct cache_set *c, struct bkey *k) static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) { - struct task_struct *old; - - old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); - if (old && old != current) { + spin_lock(&c->btree_cannibalize_lock); + if (likely(c->btree_cache_alloc_lock == NULL)) { + c->btree_cache_alloc_lock = current; + } else if (c->btree_cache_alloc_lock != current) { if (op) prepare_to_wait(&c->btree_cache_wait, &op->wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&c->btree_cannibalize_lock); return -EINTR; } + spin_unlock(&c->btree_cannibalize_lock); return 0; } @@ -919,10 +921,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, */ static void bch_cannibalize_unlock(struct cache_set *c) { + spin_lock(&c->btree_cannibalize_lock); if (c->btree_cache_alloc_lock == current) { c->btree_cache_alloc_lock = NULL; wake_up(&c->btree_cache_wait); } + spin_unlock(&c->btree_cannibalize_lock); } static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 7bb15cddca5ecb6dbd25d3f21b2ca201e9d106f8..182c2b7bd9601606f4416c040b7a16fb62d19016 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -864,8 +864,8 @@ int bch_journal_alloc(struct cache_set *c) j->w[1].c = c; if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || - !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || - !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) + !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) || + !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS))) return -ENOMEM; return 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 68ebc2759c2ef618c22c3137d65aa9fdb40f5866..7787ec42f81e1972852c75f7bea3b33396b466b8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1693,7 +1693,7 @@ void bch_cache_set_unregister(struct cache_set *c) } #define alloc_bucket_pages(gfp, c) \ - ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) + ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) { @@ -1737,6 +1737,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) sema_init(&c->sb_write_mutex, 1); mutex_init(&c->bucket_lock); init_waitqueue_head(&c->btree_cache_wait); + spin_lock_init(&c->btree_cannibalize_lock); init_waitqueue_head(&c->bucket_wait); init_waitqueue_head(&c->gc_wait); sema_init(&c->uuid_write_mutex, 1); @@ -2013,7 +2014,14 @@ static const char *register_cache_set(struct cache *ca) sysfs_create_link(&c->kobj, &ca->kobj, buf)) goto err; - if (ca->sb.seq > c->sb.seq) { + /* + * A special case is both ca->sb.seq and c->sb.seq are 0, + * such condition happens on a new created cache device whose + * super block is never flushed yet. In this case c->sb.version + * and other members should be updated too, otherwise we will + * have a mistaken super block version in cache set. + */ + if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { c->sb.version = ca->sb.version; memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); c->sb.flags = ca->sb.flags; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b5fc3c6c7178e00212cb5d78d9f2acb3e4e8ab69..aa58833fb012f6a30778cc1c7d5d17b24c27b542 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -506,15 +506,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) { struct bcache_device *d = c->devices[inode]; - unsigned int stripe_offset, stripe, sectors_dirty; + unsigned int stripe_offset, sectors_dirty; + int stripe; if (!d) return; + stripe = offset_to_stripe(d, offset); + if (stripe < 0) + return; + if (UUID_FLASH_ONLY(&c->uuids[inode])) atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); - stripe = offset_to_stripe(d, offset); stripe_offset = offset & (d->stripe_size - 1); while (nr_sectors) { @@ -554,12 +558,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) static void refill_full_stripes(struct cached_dev *dc) { struct keybuf *buf = &dc->writeback_keys; - unsigned int start_stripe, stripe, next_stripe; + unsigned int start_stripe, next_stripe; + int stripe; bool wrapped = false; stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); - - if (stripe >= dc->disk.nr_stripes) + if (stripe < 0) stripe = 0; start_stripe = stripe; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index e75dc33339f6f64d7786183ee580f446365e5f92..b902e574c5c403f93ed0c510ec09fb68a4cda57f 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -28,10 +28,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline unsigned int offset_to_stripe(struct bcache_device *d, +static inline int offset_to_stripe(struct bcache_device *d, uint64_t offset) { do_div(offset, d->stripe_size); + + /* d->nr_stripes is in range [1, INT_MAX] */ + if (unlikely(offset >= d->nr_stripes)) { + pr_err("Invalid stripe %llu (>= nr_stripes %d).\n", + offset, d->nr_stripes); + return -EINVAL; + } + + /* + * Here offset is definitly smaller than INT_MAX, + * return it as int will never overflow. + */ return offset; } @@ -39,7 +51,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) { - unsigned int stripe = offset_to_stripe(&dc->disk, offset); + int stripe = offset_to_stripe(&dc->disk, offset); + + if (stripe < 0) + return false; while (1) { if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 151aa95775be2daee11c7721eb62dde28ec702b1..af6d4f898e4c1de8d9b72f053beb454d5617a1e8 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4d36373e1c0f044264af149bfb46efe7aa6c25c5..9fde174ce3961969e713de1b6cce25c4af707a17 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -95,9 +95,6 @@ static void dm_old_stop_queue(struct request_queue *q) static void dm_mq_stop_queue(struct request_queue *q) { - if (blk_mq_queue_stopped(q)) - return; - blk_mq_quiesce_queue(q); } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 6a26afcc1fd6b15f9e17f8b860cdb193f0443446..85077f4d257a704e385001b8116c5c97ac54b554 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -698,12 +698,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index cc028353f9d5594a7cf0766407671be8e561efb1..776aaf5951e4afc9f7d0d721ef1b109c7525bae7 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -226,6 +226,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -244,9 +245,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -268,7 +276,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 0b2af6e74fc375ed163824fa9cdaf84d1b9ffd95..4522e87d9d68d039808904cc0dd62dcd897bcf84 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1443,6 +1443,7 @@ static void unlock_all_bitmaps(struct mddev *mddev) } } kfree(cinfo->other_bitmap_lockres); + cinfo->other_bitmap_lockres = NULL; } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 01021382131bcf30f3f238ccd0f67b64972a207c..d91154d6545509a72e3521bf2c1423115dab0d48 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3596,6 +3596,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, * is missing/faulty, then we need to read everything we can. */ if (sh->raid_conf->level != 6 && + sh->raid_conf->rmw_level != PARITY_DISABLE_RMW && sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; @@ -4832,7 +4833,7 @@ static void handle_stripe(struct stripe_head *sh) * or to load a block that is being partially written. */ if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) + || (s.to_write && s.failed) || (s.syncing && (s.uptodate + s.compute < disks)) || s.replacing || s.expanding) diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 4961573850d549b224a6241cbdaf77acae0198e1..b2b3f779592fd35d3506ed9f9f7c0a8ea22974c1 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap, struct cec_log_addrs log_addrs; mutex_lock(&adap->lock); - log_addrs = adap->log_addrs; + /* + * We use memcpy here instead of assignment since there is a + * hole at the end of struct cec_log_addrs that an assignment + * might ignore. So when we do copy_to_user() we could leak + * one byte of memory. + */ + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); if (!adap->is_configured) memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, sizeof(log_addrs.log_addr)); diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 097c42d3f8c2661c72e909580419c8ef4a8648a1..df0c7243eafe4211b10e650a7d89815d9ea29aea 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) goto error; if (dev->delivery_system == SYS_DVBS) { - dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; - dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; + u32 bit_error = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3] << 0; + + dev->dvbv3_ber = bit_error; + dev->post_bit_error += bit_error; c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue = dev->post_bit_error; dev->block_error += buf[4] << 8 | buf[5] << 0; diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c index 92f4112d2e377a939fe39cdad2eda884f5a9d21f..eaf94b817dbc054f2961154588602e5ee888075a 100644 --- a/drivers/media/firewire/firedtv-fw.c +++ b/drivers/media/firewire/firedtv-fw.c @@ -271,6 +271,8 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) name_len = fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)); + if (name_len < 0) + return name_len; for (i = ARRAY_SIZE(model_names); --i; ) if (strlen(model_names[i]) <= name_len && strncmp(name, model_names[i], name_len) == 0) diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 4731e1c72f9601d63597d9f1c3cfd96b92178643..0a434bdce3b3b36cf81d654724f14c6ab5ca9e54 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr, if (rval < 0) { if (rval != -EBUSY && rval != -EAGAIN) pm_runtime_set_active(&client->dev); - pm_runtime_put(&client->dev); + pm_runtime_put_noidle(&client->dev); return -ENODEV; } if (smiapp_read_nvm(sensor, sensor->nvm)) { + pm_runtime_put(&client->dev); dev_err(&client->dev, "nvm read failed\n"); return -ENODEV; } diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index ed518b1f82e4a941799ee2c0061b5dcbf5fbabbf..d04ed438a45deaa8b5f3dda9f5419663c63c26d2 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -568,6 +568,38 @@ static void media_device_release(struct media_devnode *devnode) dev_dbg(devnode->parent, "Media device released\n"); } +static void __media_device_unregister_entity(struct media_entity *entity) +{ + struct media_device *mdev = entity->graph_obj.mdev; + struct media_link *link, *tmp; + struct media_interface *intf; + unsigned int i; + + ida_free(&mdev->entity_internal_idx, entity->internal_idx); + + /* Remove all interface links pointing to this entity */ + list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { + list_for_each_entry_safe(link, tmp, &intf->links, list) { + if (link->entity == entity) + __media_remove_intf_link(link); + } + } + + /* Remove all data links that belong to this entity */ + __media_entity_remove_links(entity); + + /* Remove all pads that belong to this entity */ + for (i = 0; i < entity->num_pads; i++) + media_gobj_destroy(&entity->pads[i].graph_obj); + + /* Remove the entity */ + media_gobj_destroy(&entity->graph_obj); + + /* invoke entity_notify callbacks to handle entity removal?? */ + + entity->graph_obj.mdev = NULL; +} + /** * media_device_register_entity - Register an entity with a media device * @mdev: The media device @@ -625,6 +657,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, */ ret = media_graph_walk_init(&new, mdev); if (ret) { + __media_device_unregister_entity(entity); mutex_unlock(&mdev->graph_mutex); return ret; } @@ -637,38 +670,6 @@ int __must_check media_device_register_entity(struct media_device *mdev, } EXPORT_SYMBOL_GPL(media_device_register_entity); -static void __media_device_unregister_entity(struct media_entity *entity) -{ - struct media_device *mdev = entity->graph_obj.mdev; - struct media_link *link, *tmp; - struct media_interface *intf; - unsigned int i; - - ida_free(&mdev->entity_internal_idx, entity->internal_idx); - - /* Remove all interface links pointing to this entity */ - list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { - list_for_each_entry_safe(link, tmp, &intf->links, list) { - if (link->entity == entity) - __media_remove_intf_link(link); - } - } - - /* Remove all data links that belong to this entity */ - __media_entity_remove_links(entity); - - /* Remove all pads that belong to this entity */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_destroy(&entity->pads[i].graph_obj); - - /* Remove the entity */ - media_gobj_destroy(&entity->graph_obj); - - /* invoke entity_notify callbacks to handle entity removal?? */ - - entity->graph_obj.mdev = NULL; -} - void media_device_unregister_entity(struct media_entity *entity) { struct media_device *mdev = entity->graph_obj.mdev; diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c index 00329f668b590260713d3dab844179398f55b465..5177479d13d388efa1a2cbd13c87d051afdc0b00 100644 --- a/drivers/media/pci/cx23885/cx23888-ir.c +++ b/drivers/media/pci/cx23885/cx23888-ir.c @@ -1178,8 +1178,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); - if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) + if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, + GFP_KERNEL)) { + kfree(state); return -ENOMEM; + } state->dev = dev; sd = &state->sd; diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index d6816effb87866b80eae57d7c45004524d8cc031..d02b5fd940c122400531eac5e80877612ae2092e 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -424,14 +424,15 @@ static void debiirq(unsigned long cookie) case DATA_CI_GET: { u8 *data = av7110->debi_virt; + u8 data_0 = data[0]; - if ((data[0] < 2) && data[2] == 0xff) { + if (data_0 < 2 && data[2] == 0xff) { int flags = 0; if (data[5] > 0) flags |= CA_CI_MODULE_PRESENT; if (data[5] > 5) flags |= CA_CI_MODULE_READY; - av7110->ci_slot[data[0]].flags = flags; + av7110->ci_slot[data_0].flags = flags; } else ci_get_data(&av7110->ci_rbuffer, av7110->debi_virt, diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c index b3dc45b91101ddb0861c41de1c083b70aabe1eb7..9b545c7431685391ffa8f85cfab3c46c3bd24530 100644 --- a/drivers/media/pci/ttpci/budget-core.c +++ b/drivers/media/pci/ttpci/budget-core.c @@ -383,20 +383,25 @@ static int budget_register(struct budget *budget) ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; budget->mem_frontend.source = DMX_MEMORY_FE; ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); if (ret < 0) - return ret; + goto err_release_dmx; ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); return 0; + +err_release_dmx: + dvb_dmxdev_release(&budget->dmxdev); + dvb_dmx_release(&budget->demux); + return ret; } static void budget_unregister(struct budget *budget) diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c index 89a86c19579b8ab9d2cbee42a4cc25913dab9b96..50fc71d0cb9f3dfcdec003bc4502572eec67650b 100644 --- a/drivers/media/platform/davinci/vpss.c +++ b/drivers/media/platform/davinci/vpss.c @@ -514,19 +514,31 @@ static void vpss_exit(void) static int __init vpss_init(void) { + int ret; + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) return -EBUSY; oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); if (unlikely(!oper_cfg.vpss_regs_base2)) { - release_mem_region(VPSS_CLK_CTRL, 4); - return -ENOMEM; + ret = -ENOMEM; + goto err_ioremap; } writel(VPSS_CLK_CTRL_VENCCLKEN | - VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); + VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); + + ret = platform_driver_register(&vpss_driver); + if (ret) + goto err_pd_register; + + return 0; - return platform_driver_register(&vpss_driver); +err_pd_register: + iounmap(oper_cfg.vpss_regs_base2); +err_ioremap: + release_mem_region(VPSS_CLK_CTRL, 4); + return ret; } subsys_initcall(vpss_init); module_exit(vpss_exit); diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index b5993532831da6b3aada06390b8338f3b32f66d4..2d25a197dc657c2d0fa5485a558dba40dc35381f 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1259,6 +1259,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_IDLE); + if (IS_ERR(pctl->state_idle)) + return PTR_ERR(pctl->state_idle); + return 0; } diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c index f05856013ec36eeb8a52cbe8b53a66d5996f6ce6..105cb6b23c4bbb09185dbc276cca5d0e8a918fbc 100644 --- a/drivers/media/platform/msm/cvp/msm_cvp_common.c +++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c @@ -1749,7 +1749,8 @@ int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst) return rc; error: - cvp_comm_release_persist_buffers(inst); + if (rc != -ENOMEM) + cvp_comm_release_persist_buffers(inst); return rc; } diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 591c6de498f8913b15b43771696d1c1bba089100..20857ae42a77fbd42c377886305cdc72b31b7aee 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2290,7 +2290,7 @@ static int preview_init_entities(struct isp_prev_device *prev) me->ops = &preview_media_ops; ret = media_entity_pads_init(me, PREV_PADS_NUM, pads); if (ret < 0) - return ret; + goto error_handler_free; preview_init_formats(sd, NULL); @@ -2323,6 +2323,8 @@ static int preview_init_entities(struct isp_prev_device *prev) omap3isp_video_cleanup(&prev->video_in); error_video_in: media_entity_cleanup(&prev->subdev.entity); +error_handler_free: + v4l2_ctrl_handler_free(&prev->ctrls); return ret; } diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 96d1b1b3fe8e86f5fff388d5fcc356e83a23a17b..681de42f12e9a81423c9ee26550c904a06bbfa0d 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -208,22 +208,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_info.data.format = ctx->out.fmt->hw_format; dst_info.data.swap = ctx->out.fmt->color_swap; - if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { - if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) { - switch (ctx->in.colorspace) { - case V4L2_COLORSPACE_REC709: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT709_R0; - break; - default: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT601_R0; - break; - } + /* + * CSC mode must only be set when the colorspace families differ between + * input and output. It must remain unset (zeroed) if both are the same. + */ + + if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) { + switch (ctx->in.colorspace) { + case V4L2_COLORSPACE_REC709: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; + break; + default: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0; + break; } } - if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { + if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) { switch (ctx->out.colorspace) { case V4L2_COLORSPACE_REC709: dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h index ca3c204abe420650f89bd2b828bfbf51440c525a..3e4b70eb9ced5857afcb544258137ee5c00433ea 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.h +++ b/drivers/media/platform/rockchip/rga/rga-hw.h @@ -103,6 +103,11 @@ #define RGA_COLOR_FMT_CP_8BPP 15 #define RGA_COLOR_FMT_MASK 15 +#define RGA_COLOR_FMT_IS_YUV(fmt) \ + (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP)) +#define RGA_COLOR_FMT_IS_RGB(fmt) \ + ((fmt) < RGA_COLOR_FMT_YUV422SP) + #define RGA_COLOR_NONE_SWAP 0 #define RGA_COLOR_RB_SWAP 1 #define RGA_COLOR_ALPHA_SWAP 2 diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index be3155275a6bac10b2f20a6054d8e0f8c5df9485..d945323fc437d3b7d73f42639bc960e1b4805994 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx) } static void cal_wr_dma_config(struct cal_ctx *ctx, - unsigned int width) + unsigned int width, unsigned int height) { u32 val; val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); + set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, CAL_WR_DMA_CTRL_DTAG_MASK); set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, @@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) csi2_lane_config(ctx); csi2_ctx_config(ctx); pix_proc_config(ctx); - cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); + cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, + ctx->v_fmt.fmt.pix.height); cal_wr_dma_addr(ctx, addr); csi2_ppi_enable(ctx); diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index a5634ca85a3165c3aa51ca9f829abf1390c9f700..a07caf981e15a54c4a774945a9c7a0f438b57ccb 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type, if (!pool) return NULL; + pool->vsp1 = vsp1; + spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free); diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index cd476cab978204699c46073dd4ef76c0650896ff..4e70b67ccd181dd562fdf08e4e7c414b51f1cb62 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -87,13 +87,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, // space edge = ktime_add_us(edge, txbuf[i]); delta = ktime_us_delta(edge, ktime_get()); - if (delta > 10) { - spin_unlock_irqrestore(&gpio_ir->lock, flags); - usleep_range(delta, delta + 10); - spin_lock_irqsave(&gpio_ir->lock, flags); - } else if (delta > 0) { + if (delta > 0) udelay(delta); - } } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index c30affbd43a98a0a244faaf468d49e3d0f7bcdbf..cf3df733d9605268f87bbaa780fb1523b6d84cd8 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1245,6 +1245,10 @@ static ssize_t store_protocols(struct device *device, } mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } old_protocols = *current_protocols; new_protocols = old_protocols; @@ -1383,6 +1387,10 @@ static ssize_t store_filter(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } new_filter = *filter; if (fattr->mask) @@ -1497,6 +1505,10 @@ static ssize_t store_wakeup_protocols(struct device *device, int i; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } allowed = dev->allowed_wakeup_protocols; @@ -1556,25 +1568,25 @@ static void rc_dev_release(struct device *device) kfree(dev); } -#define ADD_HOTPLUG_VAR(fmt, val...) \ - do { \ - int err = add_uevent_var(env, fmt, val); \ - if (err) \ - return err; \ - } while (0) - static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); + int ret = 0; - if (dev->rc_map.name) - ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); - if (dev->driver_name) - ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name); - if (dev->device_name) - ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name); + mutex_lock(&dev->lock); - return 0; + if (!dev->registered) + ret = -ENODEV; + if (ret == 0 && dev->rc_map.name) + ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name); + if (ret == 0 && dev->driver_name) + ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name); + if (ret == 0 && dev->device_name) + ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name); + + mutex_unlock(&dev->lock); + + return ret; } /* @@ -1958,14 +1970,14 @@ void rc_unregister_device(struct rc_dev *dev) del_timer_sync(&dev->timer_keyup); del_timer_sync(&dev->timer_repeat); - rc_free_rx_device(dev); - mutex_lock(&dev->lock); if (dev->users && dev->close) dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); + rc_free_rx_device(dev); + /* * lirc device should be freed with dev->registered = false, so * that userspace polling will get notified. diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index 19c6a0354ce000fa848b4ac154f0a731cd8b6b9c..b84a6f65486103a93696d819f094c5fd0fbe27fc 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf, struct go7007_usb *usb; const struct go7007_usb_board *board; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_host_endpoint *ep; unsigned num_i2c_devs; char *name; int video_pipe, i, v_urb_len; @@ -1148,7 +1149,8 @@ static int go7007_usb_probe(struct usb_interface *intf, if (usb->intr_urb->transfer_buffer == NULL) goto allocfail; - if (go->board_id == GO7007_BOARDID_SENSORAY_2250) + ep = usb->usbdev->ep_in[4]; + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, usb_rcvbulkpipe(usb->usbdev, 4), usb->intr_urb->transfer_buffer, 2*sizeof(u16), diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index ee9c656d121f120a2d056133f6137b3a4f6c1339..2308c0b4f5e7eeb48151475a75d013cddab48740 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -113,7 +113,8 @@ static int usbtv_probe(struct usb_interface *intf, usbtv_audio_fail: /* we must not free at this point */ - usb_get_dev(usbtv->udev); + v4l2_device_get(&usbtv->v4l2_dev); + /* this will undo the v4l2_device_get() */ usbtv_video_free(usbtv); usbtv_video_fail: diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index a4403a57ddc89631ba934058b2c95688a7e47aff..09acaa2cf74a2334c0ad7e81e287536601750c07 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -1433,6 +1433,15 @@ int arizona_dev_init(struct arizona *arizona) arizona_irq_exit(arizona); err_pm: pm_runtime_disable(arizona->dev); + + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } err_reset: arizona_enable_reset(arizona); regulator_disable(arizona->dcvdd); @@ -1455,6 +1464,15 @@ int arizona_dev_exit(struct arizona *arizona) regulator_disable(arizona->dcvdd); regulator_put(arizona->dcvdd); + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } + mfd_remove_devices(arizona->dev); arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 6ea0dd37b4535a1497313d05c60d4ee90598c4ed..fe614ba5fec9074a6a5d965f23f96a09b1af1ca6 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -290,7 +290,11 @@ static void dln2_rx(struct urb *urb) len = urb->actual_length - sizeof(struct dln2_header); if (handle == DLN2_HANDLE_EVENT) { + unsigned long flags; + + spin_lock_irqsave(&dln2->event_cb_lock, flags); dln2_run_event_callbacks(dln2, id, echo, data, len); + spin_unlock_irqrestore(&dln2->event_cb_lock, flags); } else { /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ if (dln2_transfer_complete(dln2, urb, handle, echo)) diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 742d6c1973f4f372e565a2bc66371d210278aac8..adea7ff63132fcd59438d73474374dcae71a5b6b 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -176,6 +176,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* EBG */ + { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info }, diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 182973df1aed4b7c5f0ce6766f5fb94a2e675ab1..77c965c6a65f13ac2a8e078f32dbd3101b33254a 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0; + if (!cell->enable) { + dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); + return 0; + } + /* only call enable hook if the cell wasn't previously enabled */ if (atomic_inc_return(cell->usage_count) == 1) err = cell->enable(pdev); @@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0; + if (!cell->disable) { + dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); + return 0; + } + /* only disable if no other clients are using it */ if (atomic_dec_return(cell->usage_count) == 0) err = cell->disable(pdev); diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 629e2e156412487d99edf30ae825b9e3cf790072..0baa229d2b7d45f1df6e08c97b7a4367f422b696 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -628,7 +628,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, &afu->dev.kobj, "cr%i", cr->cr); if (rc) - goto err; + goto err1; rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); if (rc) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 84af4d7aafb34d0355719742877260ff58c30aff..e6ad8e5319a6bdcccba6cc4a6962b44a91373889 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -302,7 +302,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd) } } -static void mmc_part_add(struct mmc_card *card, unsigned int size, +static void mmc_part_add(struct mmc_card *card, u64 size, unsigned int part_cfg, char *name, int idx, bool ro, int area_type) { @@ -318,7 +318,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) { int idx; u8 hc_erase_grp_sz, hc_wp_grp_sz; - unsigned int part_size; + u64 part_size; /* * General purpose partition feature support -- @@ -348,8 +348,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] << 8) + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; - part_size *= (size_t)(hc_erase_grp_sz * - hc_wp_grp_sz); + part_size *= (hc_erase_grp_sz * hc_wp_grp_sz); mmc_part_add(card, part_size << 19, EXT_CSD_PART_CONFIG_ACC_GP0 + idx, "gp%d", idx, false, @@ -414,7 +413,7 @@ void mmc_check_bkops_support(struct mmc_card *card, u8 *ext_csd) static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; - unsigned int part_size; + u64 part_size; struct device_node *np; bool broken_hpi = false; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 50ed0d8d737537769323058e19569774f4418e09..33ed018d3a903a04b54dc127fce5ed1006f1842f 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -196,7 +196,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) - q->limits.discard_granularity = 0; + q->limits.discard_granularity = SECTOR_SIZE; if (mmc_can_secure_erase_trim(card)) blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 382172fb3da8f55886a27874dbcffd585e4aaf95..74eea8247490d8876352835ba191f6b0667fc1fb 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -222,15 +222,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) DTRAN_CTRL_DM_START); } -static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; enum dma_data_direction dir; - spin_lock_irq(&host->lock); - if (!host->data) - goto out; + return false; if (host->data->flags & MMC_DATA_READ) dir = DMA_FROM_DEVICE; @@ -243,6 +240,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) if (dir == DMA_FROM_DEVICE) clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + return true; +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + spin_lock_irq(&host->lock); + if (!renesas_sdhi_internal_dmac_complete(host)) + goto out; + tmio_mmc_do_data_irq(host); out: spin_unlock_irq(&host->lock); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 313dba439bb407a3d31abbc7603cfeb49d987377..86d6bcd7f3e51af51e65ef441e8bc4a485c2ab5b 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1412,7 +1412,7 @@ void sdhci_msm_exit_dbg_mode(struct sdhci_host *host) int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) { unsigned long flags; - int tuning_seq_cnt = 3; + int tuning_seq_cnt = 10; u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt; const u32 *tuning_block_pattern = tuning_block_64; int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */ @@ -1615,6 +1615,22 @@ int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_msm_set_mmc_drv_type(host, opcode, 0); if (tuned_phase_cnt) { + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { + /* + * All phases valid is _almost_ as bad as no phases + * valid. Probably all phases are not really reliable + * but we didn't detect where the unreliable place is. + * That means we'll essentially be guessing and hoping + * we get a good phase. Better to try a few times. + */ + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", + mmc_hostname(mmc)); + if (--tuning_seq_cnt) { + tuned_phase_cnt = 0; + goto retry; + } + } + rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 35168b47afe6cf645e84f00a07b747ee575f91db..a411300f9d6dc9790c2c7a763e434fdea8295510 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -739,7 +739,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && - dmi_match(DMI_BIOS_VENDOR, "LENOVO"); + (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || + dmi_match(DMI_SYS_VENDOR, "IRBIS")); } static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 1dbc9554a0786851647cacacb08bd642337150ec..3ab75d3e2ce326534adb094a326223e2dbf093ae 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -727,7 +727,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) kfree(mtd->eraseregions); kfree(mtd); kfree(cfi->cmdset_priv); - kfree(cfi->cfiq); return NULL; } diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 3ea44cff9b759e3c3fdf6a1680ae70f27af3da57..c29205ee82e20a2893658a4c8cdf610a5f0fdc5b 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -231,12 +231,29 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id; + char *p, *mtd_id, *semicol; + + /* + * Replace the first ';' by a NULL char so strrchr can work + * properly. + */ + semicol = strchr(s, ';'); + if (semicol) + *semicol = '\0'; mtd_id = s; - /* fetch */ - p = strchr(s, ':'); + /* + * fetch . We use strrchr to ignore all ':' that could + * be present in the MTD name, only the last one is interpreted + * as an / separator. + */ + p = strrchr(s, ':'); + + /* Restore the ';' now. */ + if (semicol) + *semicol = ';'; + if (!p) { pr_err("no mtd-id\n"); return -EINVAL; diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 02389528f622d819431fe24b0277031b214dd702..5afc653c09e20ed573c48e8f1ebb14309159599d 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -368,9 +368,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint32_t retlen; int ret = 0; - if (!(file->f_mode & FMODE_WRITE)) - return -EPERM; - if (length > 4096) return -EINVAL; @@ -655,6 +652,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) pr_debug("MTD_ioctl\n"); + /* + * Check the file mode to require "dangerous" commands to have write + * permissions. + */ + switch (cmd) { + /* "safe" commands */ + case MEMGETREGIONCOUNT: + case MEMGETREGIONINFO: + case MEMGETINFO: + case MEMREADOOB: + case MEMREADOOB64: + case MEMLOCK: + case MEMUNLOCK: + case MEMISLOCKED: + case MEMGETOOBSEL: + case MEMGETBADBLOCK: + case MEMSETBADBLOCK: + case OTPSELECT: + case OTPGETREGIONCOUNT: + case OTPGETREGIONINFO: + case OTPLOCK: + case ECCGETLAYOUT: + case ECCGETSTATS: + case MTDFILEMODE: + case BLKPG: + case BLKRRPART: + break; + + /* "dangerous" commands */ + case MEMERASE: + case MEMERASE64: + case MEMWRITEOOB: + case MEMWRITEOOB64: + case MEMWRITE: + if (!(file->f_mode & FMODE_WRITE)) + return -EPERM; + break; + + default: + return -ENOTTY; + } + switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) @@ -702,9 +741,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct erase_info *erase; - if(!(file->f_mode & FMODE_WRITE)) - return -EPERM; - erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; @@ -997,9 +1033,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) ret = 0; break; } - - default: - ret = -ENOTTY; } return ret; @@ -1043,6 +1076,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EPERM; + break; + } + if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index a3f32f939cc1732a1a2938696019dfe9c7bf88a9..6736777a4156794593454fffda69277047ee6bc6 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev) < 0) { ret = -EINVAL; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 1f9d64aeb863e2b4e1cfdde76855bf5458f5988a..9fcbcf4b217b66d9e2697bb26606cf012fcd575f 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -466,11 +466,13 @@ struct qcom_nand_host { * among different NAND controllers. * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM + * @is_qpic - whether NAND CTRL is part of qpic IP * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; + bool is_qpic; u32 dev_cmd_reg_start; }; @@ -2766,7 +2768,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) u32 nand_ctrl; /* kill onenand */ - nandc_write(nandc, SFLASHC_BURST_CFG, 0); + if (!nandc->props->is_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), NAND_DEV_CMD_VLD_VAL); @@ -3022,12 +3025,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { static const struct qcom_nandc_props ipq4019_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x0, }; static const struct qcom_nandc_props ipq8074_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x7000, }; diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 5b8502fd50cbc681be0207327aa5dde0775ee771..88075e420f907c01712f5d5a016837cc858ca016 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1947,7 +1947,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(nand); + nand_cleanup(nand); return ret; } diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 98f7d6be8d1fcba102b9c14555536c9e63aef074..e08f6b4637dda6e33713e6bee60bff6d6a78ed45 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -48,6 +48,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) return victim; } +static inline void return_unused_peb(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->free); + ubi->free_count++; +} + /** * return_unused_pool_pebs - returns unused PEB to the free tree. * @ubi: UBI device description object @@ -61,23 +68,10 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, for (i = pool->used; i < pool->size; i++) { e = ubi->lookuptbl[pool->pebs[i]]; - wl_tree_add(e, &ubi->free); - ubi->free_count++; + return_unused_peb(ubi, e); } } -static int anchor_pebs_available(struct rb_root *root) -{ - struct rb_node *p; - struct ubi_wl_entry *e; - - ubi_rb_for_each_entry(p, e, root, u.rb) - if (e->pnum < UBI_FM_MAX_START) - return 1; - - return 0; -} - /** * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. * @ubi: UBI device description object @@ -286,8 +280,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); + + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* No luck, trigger wear leveling to produce a new anchor PEB */ + ubi->fm_do_produce_anchor = 1; if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -303,7 +315,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) return -ENOMEM; } - wrk->anchor = 1; wrk->func = &wear_leveling_worker; __schedule_ubi_work(ubi, wrk); return 0; @@ -365,6 +376,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) return_unused_pool_pebs(ubi, &ubi->fm_pool); return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); + if (ubi->fm_anchor) { + return_unused_peb(ubi, ubi->fm_anchor); + ubi->fm_anchor = NULL; + } + if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 8e292992f84c7ad365e09726ac605d9637115b0c..b88ef875236cc2d51d51870b7070802ad594ae63 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1552,14 +1552,6 @@ int ubi_update_fastmap(struct ubi_device *ubi) return 0; } - ret = ubi_ensure_anchor_pebs(ubi); - if (ret) { - up_write(&ubi->fm_eba_sem); - up_write(&ubi->work_sem); - up_write(&ubi->fm_protect); - return ret; - } - new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); if (!new_fm) { up_write(&ubi->fm_eba_sem); @@ -1630,7 +1622,8 @@ int ubi_update_fastmap(struct ubi_device *ubi) } spin_lock(&ubi->wl_lock); - tmp_e = ubi_wl_get_fm_peb(ubi, 1); + tmp_e = ubi->fm_anchor; + ubi->fm_anchor = NULL; spin_unlock(&ubi->wl_lock); if (old_fm) { @@ -1682,6 +1675,9 @@ int ubi_update_fastmap(struct ubi_device *ubi) up_write(&ubi->work_sem); up_write(&ubi->fm_protect); kfree(old_fm); + + ubi_ensure_anchor_pebs(ubi); + return ret; err: diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index d47b9e436e6730af38e414821c0348f1e5ac7550..d248ec371cc172ace7bc2e06a9c494ccf58fea03 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -504,6 +504,8 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap + * @fm_anchor: The next anchor PEB to use for fastmap + * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks @@ -612,6 +614,8 @@ struct ubi_device { struct work_struct fm_work; int fm_work_scheduled; int fast_attach; + struct ubi_wl_entry *fm_anchor; + int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ struct rb_root used; @@ -802,7 +806,6 @@ struct ubi_attach_info { * @vol_id: the volume ID on which this erasure is being performed * @lnum: the logical eraseblock number * @torture: if the physical eraseblock has to be tortured - * @anchor: produce a anchor PEB to by used by fastmap * * The @func pointer points to the worker function. If the @shutdown argument is * not zero, the worker has to free the resources and exit immediately as the @@ -818,7 +821,6 @@ struct ubi_work { int vol_id; int lnum; int torture; - int anchor; }; #include "debug.h" diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6f2ac865ff05e78391a59725a86a2643a0019a23..80d64d7e7a8be44174250a7c6dd58a9630df5e87 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -331,13 +331,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, } } - /* If no fastmap has been written and this WL entry can be used - * as anchor PEB, hold it back and return the second best WL entry - * such that fastmap can use the anchor PEB later. */ - if (prev_e && !ubi->fm_disabled && - !ubi->fm && e->pnum < UBI_FM_MAX_START) - return prev_e; - return e; } @@ -648,9 +641,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; int erase = 0, keep = 0, vol_id = -1, lnum = -1; -#ifdef CONFIG_MTD_UBI_FASTMAP - int anchor = wrk->anchor; -#endif struct ubi_wl_entry *e1, *e2; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; @@ -690,11 +680,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, } #ifdef CONFIG_MTD_UBI_FASTMAP - /* Check whether we need to produce an anchor PEB */ - if (!anchor) - anchor = !anchor_pebs_available(&ubi->free); - - if (anchor) { + if (ubi->fm_do_produce_anchor) { e1 = find_anchor_wl_entry(&ubi->used); if (!e1) goto out_cancel; @@ -705,6 +691,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); + ubi->fm_do_produce_anchor = 0; } else if (!ubi->scrub.rb_node) { #else if (!ubi->scrub.rb_node) { @@ -1037,7 +1024,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) goto out_cancel; } - wrk->anchor = 0; wrk->func = &wear_leveling_worker; if (nested) __schedule_ubi_work(ubi, wrk); @@ -1079,8 +1065,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { spin_lock(&ubi->wl_lock); - wl_tree_add(e, &ubi->free); - ubi->free_count++; + + if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { + ubi->fm_anchor = e; + ubi->fm_do_produce_anchor = 0; + } else { + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } + spin_unlock(&ubi->wl_lock); /* @@ -1724,6 +1717,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (err) goto out_free; +#ifdef CONFIG_MTD_UBI_FASTMAP + ubi_ensure_anchor_pebs(ubi); +#endif return 0; out_free: diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index a9e2d669acd81d4c7e2bb37c683d489ff6b4e203..c93a53293786358148ad5cc9b8aac10a36cb8f21 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -2,7 +2,6 @@ #ifndef UBI_WL_H #define UBI_WL_H #ifdef CONFIG_MTD_UBI_FASTMAP -static int anchor_pebs_available(struct rb_root *root); static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 11429df743067031b10c7270358e9d58606445e1..a59333b87eafde4573af95e48adee143979df9f5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1123,6 +1123,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev, bond_dev->type = slave_dev->type; bond_dev->hard_header_len = slave_dev->hard_header_len; + bond_dev->needed_headroom = slave_dev->needed_headroom; bond_dev->addr_len = slave_dev->addr_len; memcpy(bond_dev->broadcast, slave_dev->broadcast, @@ -2029,7 +2030,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); @@ -2772,6 +2774,9 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_time_in_interval(bond, last_rx, 1)) { bond_propose_link_state(slave, BOND_LINK_UP); commit++; + } else if (slave->link == BOND_LINK_BACK) { + bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; } continue; } @@ -2882,6 +2887,19 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; + case BOND_LINK_FAIL: + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_NOW); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); + + /* A slave has just been enslaved and has become + * the current active slave. + */ + if (rtnl_dereference(bond->curr_active_slave)) + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + continue; + default: netdev_err(bond->dev, "impossible: new_link %d on slave %s\n", slave->link_new_state, slave->dev->name); @@ -2931,8 +2949,6 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && bond_slave_is_up(slave)) before = slave; @@ -4200,13 +4216,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4218,8 +4244,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 11f3993ab7f307ed266bb4febb43095871b3c76f..294be86420b6d3afd82e78be9b1679746fce0cc5 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1335,6 +1335,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return ret; switch (ret) { + case -ETIMEDOUT: + return ret; case -ENOSPC: dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", addr, vid); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 43b00e8bcdcd7dab1049671580b8d76f8167f060..6fa8aa69b4180df02cdeab543e918574ce46a374 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2930,7 +2930,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index c281c488a306fb6aed2839b0b0442a2877ad7328..430988f797225474fc718bae4b55e5970e9bd0e6 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, int ret; int i; + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, member, untag); + /* Update the 4K table */ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); if (ret) return ret; - vlan4k.member = member; - vlan4k.untag = untag; + vlan4k.member |= member; + vlan4k.untag |= untag; vlan4k.fid = fid; ret = smi->ops->set_vlan_4k(smi, &vlan4k); if (ret) return ret; + dev_dbg(smi->dev, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + /* Try to find an existing MC entry for this VID */ for (i = 0; i < smi->num_vlan_mc; i++) { struct rtl8366_vlan_mc vlanmc; @@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, if (vid == vlanmc.vid) { /* update the MC entry */ - vlanmc.member = member; - vlanmc.untag = untag; + vlanmc.member |= member; + vlanmc.untag |= untag; vlanmc.fid = fid; ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + + dev_dbg(smi->dev, + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", + vid, vlanmc.member, vlanmc.untag); + break; } } @@ -384,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) dev_err(smi->dev, "port is DSA or CPU port\n"); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { int pvid_val = 0; dev_info(smi->dev, "add VLAN %04x\n", vid); @@ -407,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (ret < 0) return; } - } - ret = rtl8366_set_vlan(smi, port, member, untag, 0); - if (ret) - dev_err(smi->dev, - "failed to set up VLAN %04x", - vid); + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); + } } EXPORT_SYMBOL_GPL(rtl8366_vlan_add); @@ -439,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, return ret; if (vid == vlanmc.vid) { - /* clear VLAN member configurations */ - vlanmc.vid = 0; - vlanmc.priority = 0; - vlanmc.member = 0; - vlanmc.untag = 0; - vlanmc.fid = 0; - + /* Remove this port from the VLAN */ + vlanmc.member &= ~BIT(port); + vlanmc.untag &= ~BIT(port); + /* + * If no ports are members of this VLAN + * anymore then clear the whole member + * config so it can be reused. + */ + if (!vlanmc.member && vlanmc.untag) { + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.fid = 0; + } ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); if (ret) { dev_err(smi->dev, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 8736718b17359f226ab569f51b37a6536049212e..3c3222e2dcfcf28163586d760b9b05b462c247a7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2647,16 +2647,14 @@ static void ena_fw_reset_device(struct work_struct *work) { struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); - struct pci_dev *pdev = adapter->pdev; - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - dev_err(&pdev->dev, - "device reset schedule while reset bit is off\n"); - return; - } rtnl_lock(); - ena_destroy_device(adapter, false); - ena_restore_device(adapter); + + if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + ena_destroy_device(adapter, false); + ena_restore_device(adapter); + } + rtnl_unlock(); } @@ -2738,7 +2736,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, } u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx = missed_tx; + tx_ring->tx_stats.missed_tx += missed_tx; u64_stats_update_end(&tx_ring->syncp); return rc; @@ -3392,8 +3390,11 @@ static void ena_remove(struct pci_dev *pdev) netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ - del_timer_sync(&adapter->timer_service); + /* Make sure timer and reset routine won't be called after + * freeing device resources. + */ + del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); unregister_netdev(netdev); @@ -3543,6 +3544,9 @@ static void ena_keep_alive_wd(void *adapter_data, rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; u64_stats_update_begin(&adapter->syncp); + /* These stats are accumulated by the device, so the counters indicate + * all drops since last reset. + */ adapter->dev_stats.rx_drops = rx_drops; u64_stats_update_end(&adapter->syncp); } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index dab5891b9714564153b6f90ed2f1dde7ec7b56ed..d48595470ec8221406d75ed92f471ec3a20aa9e3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -774,7 +774,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { - err = EBADRQC; + err = -EBADRQC; goto err_exit; } for (self->aq_nic_cfg->mc_list_count = 0U; diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 0187dbf3b87dfcdaa2dc1bf39f37a4d4bf47c7a6..54cdafdd067db3d19ac81dd7132e09a13fe86445 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(priv->dev, "Failed to request gpio: %d\n", error); + mdiobus_free(bus); return error; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6b761f6b8fd562e023d3d0229590d477968bdec0..9a614c5cdfa221f9dc5d700b16d0082c24d7872a 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2441,8 +2441,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->tx_rings = devm_kcalloc(&pdev->dev, txq, sizeof(struct bcm_sysport_tx_ring), GFP_KERNEL); - if (!priv->tx_rings) - return -ENOMEM; + if (!priv->tx_rings) { + ret = -ENOMEM; + goto err_free_netdev; + } priv->is_lite = params->is_lite; priv->num_rx_desc_words = params->num_rx_desc_words; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ab4d1dacb5854ece9294b3c68335a35429ffe4de..c3f04fb319556b6d1ee3a02b6c868a9351ebac40 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6836,16 +6836,17 @@ static ssize_t bnxt_show_temp(struct device *dev, struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; struct bnxt *bp = dev_get_drvdata(dev); - u32 temp = 0; + u32 len = 0; + int rc; resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) - temp = resp->temp * 1000; /* display millidegree */ + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - - return sprintf(buf, "%u\n", temp); + return rc ?: len; } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); @@ -6865,7 +6866,16 @@ static void bnxt_hwmon_close(struct bnxt *bp) static void bnxt_hwmon_open(struct bnxt *bp) { + struct hwrm_temp_monitor_query_input req = {0}; struct pci_dev *pdev = bp->pdev; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == -EACCES || rc == -EOPNOTSUPP) { + bnxt_hwmon_close(bp); + return; + } bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, DRV_MODULE_NAME, bp, @@ -7024,15 +7034,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_enable_napi(bp); - bnxt_debug_dev_init(bp); - rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); - goto open_err; + goto open_err_irq; } + bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); + if (link_re_init) { mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); @@ -7063,10 +7073,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_vf_reps_open(bp); return 0; -open_err: - bnxt_debug_dev_exit(bp); - bnxt_disable_napi(bp); - open_err_irq: bnxt_del_napi(bp); @@ -9128,6 +9134,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (long)pci_resource_start(pdev, 0), dev->dev_addr); pcie_print_link_status(pdev); + pci_save_state(pdev); return 0; init_err_cleanup_tc: @@ -9289,6 +9296,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); } else { pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); err = bnxt_hwrm_func_reset(bp); if (!err && netif_running(netdev)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 63730e449e0886240cbbaaf036a60941469f6af0..1ea81c23039f5277d26187eb86d28aeccd741887 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -471,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_tx_sch_inputs; /* Get the most up-to-date max_tx_sch_inputs. */ - if (BNXT_NEW_RM(bp)) + if (netif_running(dev) && BNXT_NEW_RM(bp)) bnxt_hwrm_func_resc_qcaps(bp, false); max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; @@ -1369,9 +1369,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (!BNXT_SINGLE_PF(bp)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); if (epause->autoneg) { - if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) - return -EINVAL; + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + rc = -EINVAL; + goto pause_exit; + } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; if (bp->hwrm_spec_code >= 0x10201) @@ -1392,11 +1395,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) { - mutex_lock(&bp->link_lock); + if (netif_running(dev)) rc = bnxt_hwrm_set_pause(bp); - mutex_unlock(&bp->link_lock); - } + +pause_exit: + mutex_unlock(&bp->link_lock); return rc; } @@ -1877,6 +1880,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) if (rc != 0) return rc; + if (!dir_entries || !entry_length) + return -EIO; + /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; @@ -2110,8 +2116,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) struct bnxt *bp = netdev_priv(dev); struct ethtool_eee *eee = &bp->eee; struct bnxt_link_info *link_info = &bp->link_info; - u32 advertising = - _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + u32 advertising; int rc = 0; if (!BNXT_SINGLE_PF(bp)) @@ -2120,19 +2125,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); + advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); if (!edata->eee_enabled) goto eee_ok; if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { netdev_warn(dev, "EEE requires autoneg\n"); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } if (edata->tx_lpi_enabled) { if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || edata->tx_lpi_timer < bp->lpi_tmr_lo)) { netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", bp->lpi_tmr_lo, bp->lpi_tmr_hi); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } else if (!bp->lpi_tmr_hi) { edata->tx_lpi_timer = eee->tx_lpi_timer; } @@ -2142,7 +2151,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) } else if (edata->advertised & ~advertising) { netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", edata->advertised, advertising); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } eee->advertised = edata->advertised; @@ -2154,6 +2164,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) if (netif_running(dev)) rc = bnxt_hwrm_set_link_setting(bp, false, true); +eee_exit: + mutex_unlock(&bp->link_lock); return rc; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index be845df0503999445c4469a7386d86e39feb5f8d..6fcf9646d141b3c34366b46d5b8d6382abb408af 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7219,8 +7219,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp) { - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } @@ -11213,18 +11213,27 @@ static void tg3_reset_task(struct work_struct *work) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, true); - if (err) + if (err) { + tg3_full_unlock(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + /* Clear this flag so that tg3_reset_task_cancel() will not + * call cancel_work_sync() and wait forever. + */ + tg3_flag_clear(tp, RESET_TASK_PENDING); + dev_close(tp->dev); goto out; + } tg3_netif_start(tp); -out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); +out: rtnl_unlock(); } diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 9f4f3c1d504341d6387ee8be598a0a02c3c1fd68..55fe80ca10d39eef074efaeb5bb6ab2a0f60f73a 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & CN23XX_PCIE_SRIOV_FDL_MASK); } else { - ret = EINVAL; + ret = -EINVAL; /* Under some virtual environments, extended PCI regs are * inaccessible, in which case the above read will have failed. diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index dca02b35c231a85a81ec98f7f0ce574fa206e2cd..99eea9e6a8ea6dab8dc7c3c3708d3f114e4849e6 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2015,11 +2015,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ - spin_lock(&nic->rx_mode_wq_lock); + spin_lock_bh(&nic->rx_mode_wq_lock); mode = vf_work->mode; mc = vf_work->mc; vf_work->mc = NULL; - spin_unlock(&nic->rx_mode_wq_lock); + spin_unlock_bh(&nic->rx_mode_wq_lock); __nicvf_set_rx_mode_task(mode, mc, nic); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 97d97de9accc5e1c03b163874b1ab22f322c0bf5..bb3ee55cb72cbe26953411968f020d45c5b244f5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1591,13 +1591,16 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id, static int configure_filter_tcb(struct adapter *adap, unsigned int tid, struct filter_entry *f) { - if (f->fs.hitcnts) + if (f->fs.hitcnts) { set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, - TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | + TCB_TIMESTAMP_V(TCB_TIMESTAMP_M), + TCB_TIMESTAMP_V(0ULL), + 1); + set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), - TCB_TIMESTAMP_V(0ULL) | TCB_RTT_TS_RECENT_AGE_V(0ULL), 1); + } if (f->fs.newdmac) set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 3d4a765e9e61dd97f7f649137012f5bd6ca10c65..7801f2aeeb30ed011fe4ea46d0fcdaaef1d17080 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2367,6 +2367,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 01a212097836019b8b8002e6ce53fb5f7f033da5..5242687060b449ff9ca3661ee72f4e9492269282 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2392,7 +2392,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); - netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM); + netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); if (!netdev) { dev_err(dev, "Can't allocate ethernet device #%d\n", id); return -ENOMEM; @@ -2451,7 +2451,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); - return PTR_ERR(port->reset); + ret = PTR_ERR(port->reset); + goto unprepare; } reset_control_reset(port->reset); usleep_range(100, 500); @@ -2507,23 +2508,24 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port_names[port->id], port); if (ret) - return ret; + goto unprepare; ret = register_netdev(netdev); - if (!ret) { + if (ret) + goto unprepare; + + netdev_info(netdev, + "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", + port->irq, &dmares->start, + &gmacres->start); + ret = gmac_setup_phy(netdev); + if (ret) netdev_info(netdev, - "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", - port->irq, &dmares->start, - &gmacres->start); - ret = gmac_setup_phy(netdev); - if (ret) - netdev_info(netdev, - "PHY init failed, deferring to ifup time\n"); - return 0; - } + "PHY init failed, deferring to ifup time\n"); + return 0; - port->netdev = NULL; - free_netdev(netdev); +unprepare: + clk_disable_unprepare(port->pclk); return ret; } @@ -2532,7 +2534,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - free_netdev(port->netdev); return 0; } diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496cc3e2b981b02adb3b32275578c3ce..b312cd9bce169d0e75c3ac0c007910fba37720a8 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi #define DSL CONFIG_DE2104X_DSL #endif -#define DE_RX_RING_SIZE 64 +#define DE_RX_RING_SIZE 128 #define DE_TX_RING_SIZE 64 #define DE_RING_BYTES \ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 48c58f93b124bb10fbbbfc98e05e65ad0d89bf75..3b6da228140e3144da33ff73fcbcbb3f5d6326b2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3659,11 +3659,11 @@ fec_probe(struct platform_device *pdev) failed_irq: failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); failed_clk_ahb: diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 21d8023535ae4b230aa71595dedefd736ff3f7ef..eba7e54ecf85bca82a69761fe492d9130a211662 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1396,8 +1396,7 @@ static void enable_time_stamp(struct fman *fman) { struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; u16 fm_clk_freq = fman->state->fm_clk_freq; - u32 tmp, intgr, ts_freq; - u64 frac; + u32 tmp, intgr, ts_freq, frac; ts_freq = (u32)(1 << fman->state->count1_micro_bit); /* configure timestamp so that bit 8 will count 1 microsecond diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 1ca543ac8f2cd606e5b6e5e727a4f21b96e8550e..d2de9ea80c43f46375da20048a31d68a369e34d1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->multicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; @@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->unicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index dd6d0526f6c1f562ff858b4c72a9f1ec5691cc67..19f327efdaff3e62b5ae484fa372663f3326a965 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) struct eth_hash_t *hash; /* Allocate address hash table */ - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + hash = kmalloc(sizeof(*hash), GFP_KERNEL); if (!hash) return NULL; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index e1901874c19f09417e47e78d5c1e366b51b21992..9088b4f4b4b8722862165caa8d6571b3ce607456 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, tmp = ioread32be(®s->command_config); tmp &= ~CMD_CFG_PFC_MODE; - priority = 0; iowrite32be(tmp, ®s->command_config); @@ -986,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ee82ee1384eb3160651ce4764f382c3873edaa91..47f6fee1f396436ecb8524675eda721379ee8e9b 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct fman_port *port; struct fman *fman; struct device_node *fm_node, *port_node; + struct platform_device *fm_pdev; struct resource res; struct resource *dev_res; u32 val; @@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + fm_pdev = of_find_device_by_node(fm_node); of_node_put(fm_node); + if (!fm_pdev) { + err = -EINVAL; + goto return_err; + } + + fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; goto return_err; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index f75b9c11b2d293783cab997be83404b735b62ebe..ac5a281e0ec3b5ddf09669d1fa150ddce7ed0cea 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index cf2d1e846a692d0d77829b4cc3dc2a9f253aa3f5..8243501c375743e063e56078f0cb451e1d125fc1 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -844,8 +844,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) continue; err = gfar_parse_group(child, priv, model); - if (err) + if (err) { + of_node_put(child); goto err_grp_init; + } } } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 024b08fafd3b281823f53aeb14bda428fbcbed10..4de65a9de0a6330d55a910d22c018131c556bc74 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2297,8 +2297,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + } /* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@ -2314,7 +2316,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; } ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5e9e45befc875a776f5083ea6133c398278c7688..d8115a9333e05a4f51b1afe4418d20d6766d7d4a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2926,7 +2926,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 47b867c64b147a5c7efd7d56a08aa22c319fd446..195108858f38f46f43ccb75e4e1735674010cc7d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - e1000_down(adapter); - e1000_up(adapter); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); } @@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT; - while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) usleep_range(10000, 20000); - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + e1000_down(adapter); e1000_power_down_phy(adapter); e1000_free_irq(adapter); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 80e3eec6134ee82f0ab69725e5eb08695685b11b..a5e5e7e14e6c59c27512f374326e4246928bb2e4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1206,7 +1206,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index eb0ae6ab01e265403eef21c2eed6d79c4ea99253..e75b4c4872c0947e0b0df93eec1d13d51171b24b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1970,6 +1970,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, return status; } +/** + * i40e_is_aq_api_ver_ge + * @aq: pointer to AdminQ info containing HW API version to compare + * @maj: API major value + * @min: API minor value + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, + u16 min) +{ + return (aq->api_maj_ver > maj || + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct @@ -2095,18 +2110,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1))) - flags |= I40E_AQC_SET_VSI_PROMISC_TX; + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2203,11 +2216,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) + if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a74b01bf581e99b22d997cd2ea8b716afe35d005..3200c75b9ed2ab6653d5b4d1d1ea8f68fbdb6294 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -14152,6 +14152,9 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + usleep_range(1000, 2000); + /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6a677fd540d649cc8d0f950224a37fe5696d5b64..bc4eda52372af822d1f5889d9a50a7e37810cc11 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) * check for the valid queue id **/ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, - u8 qid) + u16 qid) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); @@ -196,7 +196,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, * * check for the valid vector id **/ -static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) +static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) { struct i40e_pf *pf = vf->pf; @@ -441,14 +441,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; u32 msix_vf, size; + int ret = 0; + + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + + if (qvlist_info->num_vectors > msix_vf) { + dev_warn(&pf->pdev->dev, + "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", + qvlist_info->num_vectors, + msix_vf); + ret = -EINVAL; + goto err_out; + } size = sizeof(struct virtchnl_iwarp_qvlist_info) + (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); + kfree(vf->qvlist_info); vf->qvlist_info = kzalloc(size, GFP_KERNEL); - if (!vf->qvlist_info) - return -ENOMEM; - + if (!vf->qvlist_info) { + ret = -ENOMEM; + goto err_out; + } vf->qvlist_info->num_vectors = qvlist_info->num_vectors; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -459,8 +473,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, v_idx = qv_info->v_idx; /* Validate vector id belongs to this vf */ - if (!i40e_vc_isvalid_vector_id(vf, v_idx)) - goto err; + if (!i40e_vc_isvalid_vector_id(vf, v_idx)) { + ret = -EINVAL; + goto err_free; + } vf->qvlist_info->qv_info[i] = *qv_info; @@ -502,10 +518,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, } return 0; -err: +err_free: kfree(vf->qvlist_info); vf->qvlist_info = NULL; - return -EINVAL; +err_out: + return ret; } /** @@ -3335,7 +3352,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; - goto err; + goto err_out; } if (!vf->adq_enabled) { @@ -3343,15 +3360,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) "VF %d: ADq is not enabled, can't apply cloud filter\n", vf->vf_id); aq_ret = I40E_ERR_PARAM; - goto err; + goto err_out; } if (i40e_validate_cloud_filter(vf, vcf)) { dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; - goto err; + aq_ret = I40E_ERR_PARAM; + goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); @@ -3412,13 +3429,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) "VF %d: Failed to add cloud filter, err %s aq_err %s\n", vf->vf_id, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - goto err; + goto err_free; } INIT_HLIST_NODE(&cfilter->cloud_node); hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); + /* release the pointer passing it to the collection */ + cfilter = NULL; vf->num_cloud_filters++; -err: +err_free: + kfree(cfilter); +err_out: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, aq_ret); } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 36db874f3c9288c1d92b3e1fd0c1c1e12a644b97..d85eb80d824971becf1611f3c1580953babf75a4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6226,9 +6226,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ccd852ad62a4b109ff26ea40e7587024f8b0d198..d50c5b55da1806083c6c36b2f8ac279585d8aed6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index ee7857298361ded94b87e7e715c87610a2a05924..cf7e10fbab0e706c92626f6816931bfecf4b1372 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -319,15 +319,25 @@ static int orion_mdio_probe(struct platform_device *pdev) init_waitqueue_head(&dev->smi_busy_wait); - for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { - dev->clk[i] = of_clk_get(pdev->dev.of_node, i); - if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { + if (pdev->dev.of_node) { + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + dev->clk[i] = of_clk_get(pdev->dev.of_node, i); + if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk; + } + if (IS_ERR(dev->clk[i])) + break; + clk_prepare_enable(dev->clk[i]); + } + } else { + dev->clk[0] = clk_get(&pdev->dev, NULL); + if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_clk; } - if (IS_ERR(dev->clk[i])) - break; - clk_prepare_enable(dev->clk[i]); + if (!IS_ERR(dev->clk[0])) + clk_prepare_enable(dev->clk[0]); } dev->err_interrupt = platform_get_irq(pdev, 0); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1d55f014725efd75bd94dfbe2484dfae0792f857..b72a4fad7bc8fcf7f293796d07c9ea029cddcfc0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2452,6 +2452,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index f7825c7b92fe31e372345eff922dd15db3245ae6..8d7bb9a8896770ea1e7c32a80bf3b292f5da12c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4311,12 +4311,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev) static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 1a11bc0e16123e918e68e7a8f8bed703825665fa..cfa0bba3940fb7c57f473053189b5077763fd4c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!buddy->bits[i]) goto err_out_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 76cc10e44080b012d50da503cd6ed60520aa6154..7ddacc9e4fe401890a8962abfa6c69012e57f828 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -217,6 +217,9 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; } + if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type)) + return 0; + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(*rule_p)) { @@ -397,8 +400,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } @@ -415,8 +417,12 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); + + /* must be called after DESTROY bit is set and + * set_rx_mode is called and flushed + */ + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7e6706333fa8d27250e12fc5f276bd7603e4ab0e..51edc507b7b5dddcb6b9af11511ea7000ba016d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -519,7 +519,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -564,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 701624a63d2f4f62f8b080604e206a64107b5b79..1ab40d622ae1e5805c9db30b9d2cd3471c2f1e16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -198,7 +198,7 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw->mode == SRIOV_NONE) + if (esw->mode != SRIOV_OFFLOADS) return -EOPNOTSUPP; switch (attr->id) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 54f1a40a68edde54149cfa1edb46c32bd3f8d1b9..d359e850dbf07f115879be0b4d3e851f04c23484 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -366,10 +366,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e180ec4f1a24851fe67d17097f825b7e3fffa2ac..d8e7ca48753fbd22562f284fe61a4c1004ca1451 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1384,7 +1384,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -1605,11 +1605,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6a79c8e4a7a404a9ae48ca8bf3eae492cc3783ad..9043d2cadd5de330e500f0b4d62fdf7c9ec5ec6d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -744,8 +744,8 @@ nfp_port_get_fecparam(struct net_device *netdev, struct nfp_eth_table_port *eth_port; struct nfp_port *port; - param->active_fec = ETHTOOL_FEC_NONE_BIT; - param->fec = ETHTOOL_FEC_NONE_BIT; + param->active_fec = ETHTOOL_FEC_NONE; + param->fec = ETHTOOL_FEC_NONE; port = nfp_port_from_netdev(netdev); eth_port = nfp_port_get_eth_port(port); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index f9e475075d3ea249225b47538344403c59294db0..61d5d765456870d33bf7de82e196d03747faa0ad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1015,7 +1015,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 71a7af134dd8e74622a8cecb99d04628a7683069..886c7aae662fac3f7bdc84e8f313bd4feab4d62a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 2a0cbc535a2ed5d527f6a622093d653766c1082d..19673ed929e680c70088c751679b2c6f6a630a98 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -493,13 +493,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); if (ret) - return ret; + goto disable_clk_axi; ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); if (ret) - return ret; + goto disable_clk_cfg_ahb; + + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + if (ret) + goto disable_clk_cfg_ahb; - return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + return 0; + +disable_clk_cfg_ahb: + clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); +disable_clk_axi: + clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); + + return ret; } /* Enable clocks; needs emac_clks_phase1_init to be called before */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index faaf74073a1201abc144b32035cc376550930d72..569e698b5c80737c9482ed675f779b1ab4203ca4 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1445,6 +1445,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1453,15 +1454,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 4d75158c64b29976d7cd75826f620377057c6673..826626e870d5cdbb10c42cf28b1ea5f4001772ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; + plat_dat->multicast_filter_bins = 0; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index e4e9a7591efe92fb1b762dbca45c83b2c0fc4ff2..4d617ba11ecb5b490c8ba83ccf350b23268eb94d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -176,6 +176,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, value = GMAC_FRAME_FILTER_PR; } else if (dev->flags & IFF_ALLMULTI) { value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 4d5fb4b51cc4fda15fce1dd54d40498662e8c743..5986fe927ad0b3556c4abbfafb305b8f5847b9de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -694,23 +694,16 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); int ret; - if (!edata->eee_enabled) { + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + if (!edata->eee_enabled) stmmac_disable_eee_mode(priv); - } else { - /* We are asking for enabling the EEE but it is safe - * to verify all by invoking the eee_init function. - * In case of failure it will return an error. - */ - edata->eee_enabled = stmmac_eee_init(priv); - if (!edata->eee_enabled) - return -EOPNOTSUPP; - } ret = phy_ethtool_set_eee(dev->phydev, edata); if (ret) return ret; - priv->eee_enabled = edata->eee_enabled; priv->tx_lpi_timer = edata->tx_lpi_timer; return 0; } diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 23417266b7ecc24aca2a0e562cd17985399c9cc3..e66014e0427f7de8997c11c182d88ae258dea6a6 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -296,8 +296,8 @@ spider_net_free_chain(struct spider_net_card *card, descr = descr->next; } while (descr != chain->ring); - dma_free_coherent(&card->pdev->dev, chain->num_desc, - chain->hwring, chain->dma_addr); + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), + chain->hwring, chain->dma_addr); } /** diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 817c290b78cd99efab89601587cab3fbc30d6370..d0b5844c8a31539a674e9447714554f767128756 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -721,7 +721,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4, struct flowi4 *fl4, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -737,6 +738,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->flowi4_proto = IPPROTO_UDP; fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; tos = info->key.tos; if ((tos == 1) && !geneve->collect_md) { @@ -771,7 +774,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6, struct flowi6 *fl6, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -787,6 +791,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->flowi6_proto = IPPROTO_UDP; fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; + fl6->fl6_dport = dport; + fl6->fl6_sport = sport; + prio = info->key.tos; if ((prio == 1) && !geneve->collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); @@ -833,14 +840,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, GENEVE_IPV4_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -875,13 +883,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -958,13 +967,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); + __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -974,9 +988,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -987,8 +1005,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; } - info->key.tp_src = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = geneve->info.key.tp_dst; return 0; } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index d73850ebb671f64c59fc67e0c47ff089100415ec..f2fecb6842209a2c958d4f97fad7a93a8453d539 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1187,6 +1187,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index bdb55db4523b15e7989314d105f33640dde37e2c..2dff0e110c6f3ff0f351ad07d1ae9d7940363122 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -378,7 +378,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, } rcu_read_unlock(); - while (unlikely(txq >= ndev->real_num_tx_queues)) + while (txq >= ndev->real_num_tx_queues) txq -= ndev->real_num_tx_queues; return txq; @@ -513,7 +513,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, int rc; skb->dev = vf_netdev; - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); rc = dev_queue_xmit(skb); if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { @@ -543,12 +543,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; - /* if VF is present and up then redirect packets - * already called with rcu_read_lock_bh + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. */ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); if (vf_netdev && netif_running(vf_netdev) && - !netpoll_tx_running(net)) + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net)) return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 71be8524cca87d7f0d5ccd52cda4a89b441fb0fc..a686926bba71ef594d0e3b5546d0b259bd99dd30 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -883,7 +883,9 @@ static int adf7242_rx(struct adf7242_local *lp) int ret; u8 lqi, len_u8, *data; - adf7242_read_reg(lp, 0, &len_u8); + ret = adf7242_read_reg(lp, 0, &len_u8); + if (ret) + return ret; len = len_u8; diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 38a41651e451cfcf4cc2345f5e21f90ee8624c3c..deace0aadad24a68415b0733798cbe06d0bad434 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2923,6 +2923,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv) ); if (!priv->irq_workqueue) { dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); + destroy_workqueue(priv->mlme_workqueue); return -ENOMEM; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 87f605a33c37aba35b896d14f4f0dfaf6a59b688..9fa3c0bd6ec7859f6401b6c8d2701de123595ee9 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -177,12 +177,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -196,7 +205,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; @@ -297,7 +308,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -802,10 +820,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 4ad3b877e5fd9afff1d28aca975cee4d9ec32dde..4c5b67a2d63a0970dcfda155167261228ad4f70d 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1085,6 +1085,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct macsec_rx_sa *rx_sa; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; + unsigned int len; sci_t sci; u32 pn; bool cbit; @@ -1240,9 +1241,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsc_put(rx_sc); skb_orphan(skb); + len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) - count_rx(dev, skb->len); + count_rx(dev, len); else macsec->secy.netdev->stats.rx_dropped++; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 349123592af0f5387f3dd8aec32eb7c5fdcce9bd..e226a96da3a395d0a323b2fd3f7a37cb4977541e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1230,6 +1230,9 @@ static void macvlan_port_destroy(struct net_device *dev) static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct nlattr *nla, *head; + int rem, len; + if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; @@ -1277,6 +1280,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } + if (data[IFLA_MACVLAN_MACADDR_DATA]) { + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(nla))) + return -EADDRNOTAVAIL; + } + } + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; @@ -1333,10 +1350,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { - if (nla_type(nla) != IFLA_MACVLAN_MACADDR || - nla_len(nla) != ETH_ALEN) - continue; - addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 1f5fd24cd749e34606537600736a89090e1c7468..2386871e129494fc533a1aaeb176011941f329a1 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -154,6 +154,7 @@ config MDIO_THUNDER depends on 64BIT depends on PCI select MDIO_CAVIUM + select MDIO_DEVRES help This driver supports the MDIO interfaces found on Cavium ThunderX SoCs when the MDIO bus device appears as a PCI diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 54ac599cffb4da40f0b1514dbf2902ac0d2b537c..b884b681d5c5249aa0c4d1565062b722dd093455 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1154,7 +1154,8 @@ void phy_detach(struct phy_device *phydev) phy_led_triggers_unregister(phydev); - module_put(phydev->mdio.dev.driver->owner); + if (phydev->mdio.dev.driver) + module_put(phydev->mdio.dev.driver->owner); /* If the device had no specific driver before (i.e. - it * was using the generic driver), we unbind the device diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 53d9562a8818b8bb92136ea38abbf01d4f8fe092..3eb034a5a659b10a6ee8e0e20247d09d58678e09 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -294,7 +294,7 @@ static int __team_options_register(struct team *team, for (i--; i >= 0; i--) __team_option_inst_del_option(team, dst_opts[i]); - i = option_count - 1; + i = option_count; alloc_rollback: for (i--; i >= 0; i--) kfree(dst_opts[i]); @@ -2086,6 +2086,7 @@ static void team_setup_by_port(struct net_device *dev, dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; + dev->needed_headroom = port_dev->needed_headroom; dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 023b8d0bf1754e833e08514b9cf6165ce3240984..8d27786acad9148586dd3293f897da80a621ea38 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -309,7 +309,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) netdev_dbg(dev->net, "asix_get_phy_addr()\n"); - if (ret < 0) { + if (ret < 2) { netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); goto out; } diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 8455f72007b9e7370b917f14952db0354ea5e8b0..a9d0df435e2669764ee8f6724e16fb2e4a4c9150 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1735,6 +1735,7 @@ static const struct driver_info belkin_info = { .status = ax88179_status, .link_reset = ax88179_link_reset, .reset = ax88179_reset, + .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index b91f92e4e5f22d659d89c35d8accc6ef03191b74..915ac75b55fc7c43f55db19b379983216b93de5f 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -625,6 +625,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 5251c5f6f96edf601891d123f534d8aab7306591..bff268b4a9a460e70bf7162203acd64b3356b45d 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1403,8 +1403,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); @@ -2273,12 +2274,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, minor = get_free_serial_index(); if (minor < 0) - goto exit; + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) + goto exit2; /* fill in specific data for later use */ serial->minor = minor; @@ -2323,6 +2326,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 299e1e494ac2e67ce4274c34785a06ef39a3a46f..366cf2408414ee1520f60d031a05bae389b569db 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -376,10 +376,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2868,78 +2864,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3711,6 +3641,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3759,6 +3690,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3770,18 +3729,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { @@ -3794,6 +3742,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ea3c8911861471621f6c5a89557fc5f497f96bbb..af58bf54aa9b6ab222388a8cce3052bb47f48d86 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1227,6 +1227,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ @@ -1262,6 +1263,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index b807c91abe1da94fc16c7eca39c0de3a1e2070a3..a22ae3137a3f8b548face7b3478280b087418246 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -213,7 +213,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } - msleep(20); + msleep(40); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 80373a9171dd2d0e06a19cb21aeb37934bea4b37..933d1a74bcdb3669cf3c2bcb430da27aef401402 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -277,12 +277,20 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) return 1; } -static inline void set_ethernet_addr(rtl8150_t * dev) +static void set_ethernet_addr(rtl8150_t *dev) { - u8 node_id[6]; + u8 node_id[ETH_ALEN]; + int ret; + + ret = get_registers(dev, IDR, sizeof(node_id), node_id); - get_registers(dev, IDR, sizeof(node_id), node_id); - memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); + if (ret == sizeof(node_id)) { + ether_addr_copy(dev->netdev->dev_addr, node_id); + } else { + eth_hw_addr_random(dev->netdev); + netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", + dev->netdev->dev_addr); + } } static int rtl8150_set_mac_address(struct net_device *netdev, void *p) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 09f0b53b2b773fe1ea9f370a398c07822ff5242a..abf85f0ab72fc766f6b59aeee154297540a781dc 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -975,6 +975,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -987,12 +988,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -2219,7 +2223,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2256,7 +2260,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 4e9fe75d70675d052ad2f2be3f513b5c20dfef9a..21190dfbabb1684dbb5ee86ba37c8e8ad590eaac 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -199,7 +199,7 @@ config WANXL_BUILD_FIRMWARE depends on WANXL && !PREVENT_FIRMWARE_BUILD help Allows you to rebuild firmware run by the QUICC processor. - It requires as68k, ld68k and hexdump programs. + It requires m68k toolchains and hexdump programs. You should never need this option, say N. diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 9532e69fda8781eb5a18dfd181703b9d399d7a85..0500282e176e0eafaeb6059c856a981f9eeaec38 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -41,17 +41,17 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) - AS68K = $(AS) - LD68K = $(LD) + M68KCC = $(CC) + M68KLD = $(LD) else - AS68K = as68k - LD68K = ld68k + M68KCC = $(CROSS_COMPILE_M68K)gcc + M68KLD = $(CROSS_COMPILE_M68K)ld endif quiet_cmd_build_wanxlfw = BLD FW $@ cmd_build_wanxlfw = \ - $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \ - $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ + $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $(obj)/wanxlfw.o $<; \ + $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \ rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 320039d329c7d17a5473d17d12e77e244a94a79b..2c6e3fa6947a0f6be20aed691f479a8d97ac2617 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -121,6 +121,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, skb_put(skb, sizeof(struct cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); @@ -374,6 +375,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); spin_lock_init(&state(hdlc)->lock); dev->header_ops = &cisco_header_ops; + dev->hard_header_len = sizeof(struct hdlc_header); dev->type = ARPHRD_CISCO; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 038236a9c60eee688f51422ef05cf6d4efa06b6d..03b5f5cce6f473178e263d3aeb352876bed7cc79 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -436,6 +436,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) if (pvc->state.fecn) /* TX Congestion counter */ dev->stats.tx_compressed++; skb->dev = pvc->frad; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); dev_queue_xmit(skb); return NETDEV_TX_OK; } @@ -558,6 +560,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); @@ -1044,7 +1047,7 @@ static void pvc_setup(struct net_device *dev) { dev->type = ARPHRD_DLCI; dev->flags = IFF_POINTOPOINT; - dev->hard_header_len = 10; + dev->hard_header_len = 0; dev->addr_len = 2; netif_keep_dst(dev); } @@ -1096,6 +1099,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) dev->mtu = HDLC_MAX_MTU; dev->min_mtu = 68; dev->max_mtu = HDLC_MAX_MTU; + dev->needed_headroom = 10; dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index ab8b3cbbb205cc42f2bea15259bee0b77c2ff055..20d9b6585fba335ee5fc615ac4edb529d1890c04 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -254,6 +254,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); skb_queue_tail(&tx_queue, skb); } @@ -386,11 +387,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } for (opt = data; len; len -= opt[1], opt += opt[1]) { - if (len < 2 || len < opt[1]) { - dev->stats.rx_errors++; - kfree(out); - return; /* bad packet, drop silently */ - } + if (len < 2 || opt[1] < 2 || len < opt[1]) + goto err_out; if (pid == PID_LCP) switch (opt[0]) { @@ -398,6 +396,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ + if (opt[1] < sizeof(valid_accm)) + goto err_out; if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; @@ -409,6 +409,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } break; case LCP_OPTION_MAGIC: + if (len < 6) + goto err_out; if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ @@ -427,6 +429,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); + return; + +err_out: + dev->stats.rx_errors++; + kfree(out); } static int ppp_rx(struct sk_buff *skb) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index ac34257e9f2036787bdffc512ba47441067cdd46..e5fc1b95cea6abc059ef2762468f7070d6d5b6ec 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -160,6 +160,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, if (!netif_running(dev)) goto drop; + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) + goto drop; + switch (skb->data[0]) { case X25_IFACE_DATA: break; @@ -195,8 +201,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) struct net_device *dev; int size = skb->len; - skb->protocol = htons(ETH_P_X25); - ptr = skb_push(skb, 2); *ptr++ = size % 256; @@ -207,6 +211,10 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb->protocol = htons(ETH_P_DEC); + + skb_reset_network_header(skb); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); @@ -308,6 +316,7 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; + dev->hard_header_len = 0; dev->mtu = 1000; dev->addr_len = 0; } @@ -334,7 +343,9 @@ static int lapbeth_new_device(struct net_device *dev) * then this driver prepends a length field of 2 bytes, * then the underlying Ethernet device prepends its own header. */ - ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; + ndev->needed_tailroom = dev->needed_tailroom; lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index da2d179430ca5a39e20239f0a7c6215730c6dde3..4c57e79e5779aad51b37f332807b2fe4f0adf43b 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ + AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect + SMCWUSBT-G2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 0baaad90b8d18708ae2058076e04c1f5627128e2..4e980e78ba95c3d955ec922b9da48a58e85e871c 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1521,7 +1521,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, *len += scnprintf(buf + *len, buf_len - *len, "No. Preamble Rate_code "); - for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) + for (i = 0; i < tpc_stats->num_tx_chain; i++) *len += scnprintf(buf + *len, buf_len - *len, "tpc_value%d ", i); @@ -2365,6 +2365,7 @@ void ath10k_debug_destroy(struct ath10k *ar) ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); + kfree(ar->debug.tpc_stats_final); } int ath10k_debug_register(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 7cff0d52338fe1de23da635d23c0e469593f0c2a..fd011bdabb963ac97095f578d148321aebb48c66 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1329,7 +1329,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1536,7 +1538,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index fac58c3c576a2054a4ef65a2b8d3657896a9037f..3ff65a0a834a2c13b087e6ecaa02963f24b66ea0 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -753,7 +753,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 0 +#define TARGET_10_4_DMA_BURST_SIZE 1 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 0ecaba824fb288d7eb36705d2ff70b838195a1e4..0cdaecb0e28a9ae2e8d63d0676b8c0499bd66010 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1567,23 +1567,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { int ret; + void *mem; + + mem = kzalloc(buf_len, GFP_KERNEL); + if (!mem) + return -ENOMEM; /* set window register to start read cycle */ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); if (ret) { ath10k_warn(ar, "failed to set mbox window read address: %d", ret); - return ret; + goto out; } /* read the data */ - ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len); + ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len); if (ret) { ath10k_warn(ar, "failed to read from mbox window data address: %d\n", ret); - return ret; + goto out; } - return 0; + memcpy(buf, mem, buf_len); + +out: + kfree(mem); + + return ret; } static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 3372dfa0deccf076532db157d93f4476f0bdf1af..3f3fbee631c3494e7d562bfe4d0c4898815680be 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4550,16 +4550,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, } pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++; - for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, rate_code[i], type); @@ -4672,7 +4669,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_config_event *ev; @@ -4688,6 +4685,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) return; } + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", + rate_max, WMI_TPC_RATE_MAX); + rate_max = WMI_TPC_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; @@ -4704,8 +4708,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, @@ -4900,16 +4904,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, } pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++; - for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, rate_code[i], type, pream_idx); @@ -4925,7 +4926,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_final_table_event *ev; @@ -4933,12 +4934,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_FINAL_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", + rate_max, WMI_TPC_FINAL_RATE_MAX); + rate_max = WMI_TPC_FINAL_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; - num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); @@ -4951,8 +4964,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index d2e062eaf56149b3ea76d452d75ecf81f9154a1b..f705f0e1cb5bed8b3139ba2e55b0e39b725e2fa3 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -173,6 +173,7 @@ static int htc_config_pipe_credits(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -208,6 +209,7 @@ static int htc_setup_complete(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -280,6 +282,7 @@ int htc_connect_service(struct htc_target *target, if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); + kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index e7a3127395be9c8aecf88d8cff2e12295aa4bc0c..066677bb83eb008f0bfdd542c645b78106581241 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -339,6 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); + kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index d5bb81e887624ca4b17c737f430f9a5a4124d7b4..9d2367133c7c6aaa4edefc1e3601b96e12c63d43 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -30,7 +30,7 @@ #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ -#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 +#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004 #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ #define BRCMF_STA_WME 0x00000002 /* WMM association */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 1de8497d92b8a9506d93b7b7b23cc250ae18065c..dc7c970257d2f71f9843d6c7e6cfae74d832c857 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -653,6 +653,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { + struct brcmf_fws_hanger_item *hi; bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; @@ -664,6 +665,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); brcmu_pkt_buf_free_skb(skb); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 96870d1b3b73b8c12c96b95f5a53b74b6df05fd1..a5195bdb4d9bd4fcb0f85b4f2b8a253806c63b90 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3633,7 +3633,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); sdio_claim_host(bus->sdiodev->func1); - brcmf_sdio_wd_timer(bus, false); +#ifdef DEBUG + if (!BRCMF_FWCON_ON() || + bus->console_interval == 0) +#endif + brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); sdio_release_host(bus->sdiodev->func1); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 3d54e80eca4eaf02313dd61ba323e0b3485b87f0..6a757e0d6e8da20781d76fda975c977c4cd655e1 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -357,6 +357,24 @@ int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode) } EXPORT_SYMBOL(cnss_set_fw_log_mode); +int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (plat_priv->device_id != QCA6490_DEVICE_ID || + !plat_priv->fw_pcie_gen_switch) + return -ENOTSUPP; + + if (pcie_gen_speed < QMI_PCIE_GEN_SPEED_1_V01 || + pcie_gen_speed > QMI_PCIE_GEN_SPEED_3_V01) + return -EINVAL; + + cnss_pr_dbg("WLAN provided PCIE gen speed: %d\n", pcie_gen_speed); + plat_priv->pcie_gen_speed = pcie_gen_speed; + return 0; +} +EXPORT_SYMBOL(cnss_set_pcie_gen_speed); + static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv) { int ret = 0; @@ -433,6 +451,8 @@ static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv) set_bit(CNSS_FW_READY, &plat_priv->driver_state); clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); + cnss_wlfw_send_pcie_gen_speed_sync(plat_priv); + if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) { clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state); clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index c2b458fdedfc8e9b9d973ab1885c695c7505c684..b6d487d5de8c7eacfb1abe17e25d3c0ab6557279 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -403,6 +403,8 @@ struct cnss_plat_data { int (*get_info_cb)(void *ctx, void *event, int event_len); u8 use_nv_mac; u8 set_wlaon_pwr_ctrl; + u8 fw_pcie_gen_switch; + u8 pcie_gen_speed; }; #ifdef CONFIG_ARCH_QCOM diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index ab1d1056465ab05154ab55818a0c08ef121f3da5..da9ba280f2e7d6ba50599e3079a9942c2e7f7198 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -461,6 +461,10 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv) if (resp->otp_version_valid) plat_priv->otp_version = resp->otp_version; + if (resp->fw_caps_valid) + plat_priv->fw_pcie_gen_switch = + !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01); + cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s, otp_version: 0x%x\n", plat_priv->chip_info.chip_id, plat_priv->chip_info.chip_family, @@ -1296,6 +1300,64 @@ int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv, return ret; } +int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_pcie_gen_switch_req_msg_v01 req; + struct wlfw_pcie_gen_switch_resp_msg_v01 resp; + struct qmi_txn txn; + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 || + !plat_priv->fw_pcie_gen_switch) { + cnss_pr_dbg("PCIE Gen speed not setup\n"); + return 0; + } + + cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n", + plat_priv->pcie_gen_speed, plat_priv->driver_state); + req.pcie_speed = (enum wlfw_pcie_gen_speed_v01) + plat_priv->pcie_gen_speed; + + ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, + wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp); + if (ret < 0) { + cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n", + ret); + goto out; + } + + ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, + QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01, + WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_pcie_gen_switch_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); + if (ret < 0) { + cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n", + plat_priv->pcie_gen_speed, resp.resp.result, + resp.resp.error); + ret = -resp.resp.result; + } +out: + /* Reset PCIE Gen speed after one time use */ + plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01; + return ret; +} + int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv) { struct wlfw_antenna_switch_req_msg_v01 *req; diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h index baa5811d835f620c22457e7bce843e8b4034d502..533a0affc47bc1ab0fb6c9ca46f2ad1bc50fec1f 100644 --- a/drivers/net/wireless/cnss2/qmi.h +++ b/drivers/net/wireless/cnss2/qmi.h @@ -71,6 +71,7 @@ int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv); int cnss_register_ims_service(struct cnss_plat_data *plat_priv); void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv); void cnss_ignore_qmi_failure(bool ignore); +int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv); #else #define QMI_WLFW_TIMEOUT_MS 10000 @@ -226,6 +227,9 @@ static inline void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv) {} void cnss_ignore_qmi_failure(bool ignore) {}; +static inline +int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv) {} + #endif /* CONFIG_CNSS2_QMI */ #endif /* _CNSS_QMI_H */ diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index e16f2597c219914a8c1c5d38ce2e393fd75edf31..c1c1cf330de7fd4183e182b003636d6ad3fc6996 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4302,8 +4302,8 @@ il_apm_init(struct il_priv *il) * power savings, even without L1. */ if (il->cfg->set_l0s) { - pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { + ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); + if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { /* L1-ASPM enabled; disable(!) L0S */ il_set_bit(il, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 1fb76d2f5d3fdfe680886148e93e53b4e208d207..8b9d0809daf62f8202b1ba187df7a3a94ea7c8d1 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -953,7 +953,7 @@ struct mwifiex_tkip_param { struct mwifiex_aes_param { u8 pn[WPA_PN_SIZE]; __le16 key_len; - u8 key[WLAN_KEY_LEN_CCMP]; + u8 key[WLAN_KEY_LEN_CCMP_256]; } __packed; struct mwifiex_wapi_param { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 69e3b624adbb9432fe49de863de2d0e95418c739..7003767eef423c6098b63173cafac894153a4243 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -581,6 +581,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_key_material *key = &resp->params.key_material; + int len; + + len = le16_to_cpu(key->key_param_set.key_len); + if (len > sizeof(key->key_param_set.key)) + return -EINVAL; if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { @@ -594,9 +599,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, memset(priv->aes_key.key_param_set.key, 0, sizeof(key->key_param_set.key)); - priv->aes_key.key_param_set.key_len = key->key_param_set.key_len; - memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, - le16_to_cpu(priv->aes_key.key_param_set.key_len)); + priv->aes_key.key_param_set.key_len = cpu_to_le16(len); + memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len); return 0; } @@ -611,9 +615,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_802_11_key_material_v2 *key_v2; - __le16 len; + int len; key_v2 = &resp->params.key_material_v2; + + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); + if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) + return -EINVAL; + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); @@ -627,12 +636,11 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, return 0; memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, - WLAN_KEY_LEN_CCMP); + sizeof(key_v2->key_param_set.key_params.aes.key)); priv->aes_key_v2.key_param_set.key_params.aes.key_len = - key_v2->key_param_set.key_params.aes.key_len; - len = priv->aes_key_v2.key_param_set.key_params.aes.key_len; + cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, - key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len)); + key_v2->key_param_set.key_params.aes.key, len); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index d44d57e6eb27ae643462a4af6e07cde7a19fee32..97df6b3a472b1c4a138cf836e899f444576d05b1 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -278,6 +278,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) if (!skb) continue; + tid->reorder_buf[i] = NULL; tid->nframes--; dev_kfree_skb(skb); } diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 1893640555c1e0168f5612cf58f0b33cfe4b46ca..3d6c0d8c71d7e9aff168def07e2e50d775f9ed4e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -739,8 +739,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); - if (err) + if (err) { + usb_unanchor_urb(urb); + usb_free_urb(urb); goto err_out; + } usb_free_urb(urb); } return 0; diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index f5acd24d0e2b142d96ef1e24cdd94a7252d66b52..988abb49771f96660e2f691d85551206eb0ac396 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -84,7 +84,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl, break; } - return 0; + return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 2ca5658bbc2abef5a3699b0d191ed3dd05df3777..43c7b37dec0c9d2db8768287c2e8c507328dc7c3 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -3671,8 +3671,10 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(wl->dev); goto out; + } ret = wlcore_cmd_regdomain_config_locked(wl); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index b6e19c2d66b0a80c61b789ccf3f574cad300a955..250bcbf4ea2f2e0069d69868b7359ee6213b446f 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -877,6 +877,7 @@ void wl1271_tx_work(struct work_struct *work) ret = wlcore_tx_work_locked(wl); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); wl12xx_queue_recovery_work(wl); goto out; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 6b4675a9494b2c3b8471c33a9da37278de418b74..c8e84276e63979071884f5b66d3009315fc3d557 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1337,12 +1339,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2142,28 +2147,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ -static int xennet_remove(struct xenbus_device *dev) +static void xennet_bus_close(struct xenbus_device *dev) { - struct netfront_info *info = dev_get_drvdata(&dev->dev); - - dev_dbg(&dev->dev, "%s\n", dev->nodename); + int ret; - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + +static int xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev_get_drvdata(&dev->dev); + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 9d9c8d57a042d45084082e10b30f686d82b7edb4..64b58455e620be65a3e9164c2b9bb87c13e5c755 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -209,6 +209,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: + kfree_skb(skb); return -ENODEV; } } diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 01acb6e533655d6b6041cbbde43af8c1364aec60..c4b6e29c071920f9c5411a702b95f5d79b1763de 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -981,7 +981,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, rc = down_killable(&stcontext->exchange_lock); if (rc) { WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); - return rc; + goto free_skb_resp; } rc = st95hf_spi_send(&stcontext->spicontext, skb->data, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0d60f2f8f3eecf8f33e045e20a15204e92108ef0..b633ea40430ee0d31b6a3b42dc5829c3ef7b9eca 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -255,11 +255,8 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && - blk_path_error(status)) { - nvme_failover_req(req); + if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) return; - } if (!blk_queue_dying(req->q)) { nvme_req(req)->retries++; @@ -1602,7 +1599,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); - revalidate_disk(ns->head->disk); + nvme_mpath_update_disk_size(ns->head->disk); } #endif } @@ -2608,10 +2605,26 @@ static int nvme_dev_open(struct inode *inode, struct file *file) return -EWOULDBLOCK; } + nvme_get_ctrl(ctrl); + if (!try_module_get(ctrl->ops->module)) { + nvme_put_ctrl(ctrl); + return -EINVAL; + } + file->private_data = ctrl; return 0; } +static int nvme_dev_release(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + module_put(ctrl->ops->module); + nvme_put_ctrl(ctrl); + return 0; +} + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) { struct nvme_ns *ns; @@ -2672,6 +2685,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, static const struct file_operations nvme_dev_fops = { .owner = THIS_MODULE, .open = nvme_dev_open, + .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = nvme_dev_ioctl, }; @@ -2859,6 +2873,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + /* Can't delete non-created controllers */ + if (!ctrl->created) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -3579,6 +3597,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->async_event_work); nvme_start_queues(ctrl); } + ctrl->created = true; } EXPORT_SYMBOL_GPL(nvme_start_ctrl); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index bcd09d3a44dad5cf0d8d99de5c96f25a1aa1ff84..05dd46f98441448799516c76a0d5ab7c80fb5001 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -577,7 +577,6 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * which is require to set the queue live in the appropinquate states. */ switch (ctrl->state) { - case NVME_CTRL_NEW: case NVME_CTRL_CONNECTING: if (req->cmd->common.opcode == nvme_fabrics_command && req->cmd->fabrics.fctype == nvme_fabrics_type_connect) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index ed43b06353a391849d68aa91eedd72ae2ef84f1e..ed88d50217724e6db40362b1aaca80e5c2daea14 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1716,7 +1716,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; goto out_on_error; } @@ -1726,7 +1726,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); @@ -1791,6 +1791,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; + cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (!aen_op->fcp_req.private) @@ -3293,12 +3294,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != laddr.nn || - lport->localport.port_name != laddr.pn) + lport->localport.port_name != laddr.pn || + lport->localport.port_state != FC_OBJSTATE_ONLINE) continue; list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != raddr.nn || - rport->remoteport.port_name != raddr.pn) + rport->remoteport.port_name != raddr.pn || + rport->remoteport.port_state != FC_OBJSTATE_ONLINE) continue; /* if fail to get reference fall through. Will error */ diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2e63c1106030b9b83772e94e55aef8ba4495b7e7..e71075338ff5c1346c99492155724f465ad5babf 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -73,17 +73,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } } -void nvme_failover_req(struct request *req) +bool nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status; unsigned long flags; - spin_lock_irqsave(&ns->head->requeue_lock, flags); - blk_steal_bios(&ns->head->requeue_list, req); - spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); - switch (status & 0x7ff) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: @@ -111,15 +106,17 @@ void nvme_failover_req(struct request *req) nvme_mpath_clear_current_path(ns); break; default: - /* - * Reset the controller for any non-ANA error as we don't know - * what caused the error. - */ - nvme_reset_ctrl(ns->ctrl); - break; + /* This was a non-ANA error so follow the normal error path. */ + return false; } + spin_lock_irqsave(&ns->head->requeue_lock, flags); + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); + return true; } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index cc4273f1198943867e6dc0388af18dcd3f0fff1e..9c2e7a151e40099964a69a5620e6034778d9b0a4 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -206,6 +206,7 @@ struct nvme_ctrl { struct nvme_command ka_cmd; struct work_struct fw_act_work; unsigned long events; + bool created; #ifdef CONFIG_NVME_MULTIPATH /* asymmetric namespace access: */ @@ -477,7 +478,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -void nvme_failover_req(struct request *req); +bool nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); @@ -503,6 +504,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) kblockd_schedule_work(&head->requeue_work); } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; @@ -521,8 +532,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } -static inline void nvme_failover_req(struct request *req) +static inline bool nvme_failover_req(struct request *req) { + return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { @@ -568,6 +580,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f393a6193252e1f1345a8bdcb5ad1a06bf886879..077c678166651b38197bd72c19e2e83fe4cdc51f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -118,6 +118,7 @@ struct nvme_rdma_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + struct mutex teardown_lock; bool use_inline_data; }; @@ -739,6 +740,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; @@ -880,6 +882,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); if (ctrl->ctrl.admin_tagset) @@ -887,11 +890,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, nvme_cancel_request, &ctrl->ctrl); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); @@ -902,6 +907,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); } + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) @@ -1955,6 +1961,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); + mutex_init(&ctrl->teardown_lock); if (opts->mask & NVMF_OPT_TRSVCID) port = opts->trsvcid; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 776b7e9e23b9efc5e29bb671e1d444c05f9b1e6e..f28df233dfcd0e12d4c9514c3d870d3d019608dd 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -307,6 +307,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -316,6 +319,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 29b4b236afd85fc7dc668d4ed60f66a093174d17..77e4d184bc995d30d20ec79b059bdf9a5c8ee33e 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1986,9 +1986,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 08f997a390d5da6e49c711b53cf3ea560424f1f7..cfd26437aeaea5d5fc6a87225319684206dfe3a6 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -83,6 +83,7 @@ enum nvmet_rdma_queue_state { struct nvmet_rdma_queue { struct rdma_cm_id *cm_id; + struct ib_qp *qp; struct nvmet_port *port; struct ib_cq *cq; atomic_t sq_wr_avail; @@ -471,7 +472,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, if (ndev->srq) ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); else - ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); + ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); if (unlikely(ret)) pr_err("post_recv cmd failed\n"); @@ -510,7 +511,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); if (rsp->n_rdma) { - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); } @@ -594,7 +595,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); rsp->n_rdma = 0; @@ -737,7 +738,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) } if (nvmet_rdma_need_data_in(rsp)) { - if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, + if (rdma_rw_ctx_post(&rsp->rw, queue->qp, queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { @@ -1020,6 +1021,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } + queue->qp = queue->cm_id->qp; atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); @@ -1048,11 +1050,10 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { - struct ib_qp *qp = queue->cm_id->qp; - - ib_drain_qp(qp); - rdma_destroy_id(queue->cm_id); - ib_destroy_qp(qp); + ib_drain_qp(queue->qp); + if (queue->cm_id) + rdma_destroy_id(queue->cm_id); + ib_destroy_qp(queue->qp); ib_free_cq(queue->cq); } @@ -1286,9 +1287,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - schedule_work(&queue->release_work); - /* Destroying rdma_cm id is not needed here */ - return 0; + /* + * Don't destroy the cm_id in free path, as we implicitly + * destroy the cm_id here with non-zero ret code. + */ + queue->cm_id = NULL; + goto free_queue; } mutex_lock(&nvmet_rdma_queue_mutex); @@ -1297,6 +1301,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, return 0; +free_queue: + nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 6dd1780a5885ddadddafe2efddc273dbba440908..0f19cc75cc0cfd4fa504026652c967d35ea878ae 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1291,7 +1291,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index a3ad2fe185b9c517923fc0a26897af0e289dcdd6..3c8ffd62dc0069a0f397a8a0d95cda671be22df4 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) + __must_hold(&pci_lock) { - DECLARE_WAITQUEUE(wait, current); - - __add_wait_queue(&pci_cfg_wait, &wait); do { - set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); - schedule(); + wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5cb40b2518f9376dbe7edd4bf11d303daf97c025..87a2829dffd440380b454eb888a87ba42fabb057 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -323,12 +323,8 @@ void pci_bus_add_device(struct pci_dev *dev) dev->match_driver = true; retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) { + if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - return; - } pci_dev_assign_added(dev, true); } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index e292801fff7fd8ed4e434958cc4c5363457d19dc..1bdac298a943fcf55fbd0832bebaee3adb127a48 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -45,7 +45,13 @@ #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -76,6 +82,18 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 @@ -275,6 +293,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; @@ -319,9 +338,29 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) val &= ~BIT(0); writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + /* enable external reference clock */ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); ret = reset_control_deassert(res->phy_reset); diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c index d3bc48683db77acdf069d8b66cd4aaa9ad864c09..c123c1efc24becaee511b55c80714ecca6f861fc 100644 --- a/drivers/pci/controller/pci-msm.c +++ b/drivers/pci/controller/pci-msm.c @@ -7298,12 +7298,10 @@ int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, break; } - if (msm_pcie_dev[rc_idx].link_status != - MSM_PCIE_LINK_DISABLED) { + if (msm_pcie_dev[rc_idx].power_on) { PCIE_ERR(&msm_pcie_dev[rc_idx], - "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n", - rc_idx, msm_pcie_dev[rc_idx].link_status, - msm_pcie_dev[rc_idx].num_active_ep); + "PCIe: RC%d: requested to resume when link is already powered on. Number of active EP(s): %d\n", + rc_idx, msm_pcie_dev[rc_idx].num_active_ep); break; } diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 6f86583605a46992ef3e73cf4e8e064cf67ad3da..097c02197ec8fe52b0a45bf68694385b7b542636 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2400,7 +2400,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(dev, "fail to enable pcie controller: %d\n", err); - goto teardown_msi; + goto pm_runtime_put; } err = tegra_pcie_request_resources(pcie); @@ -2440,7 +2440,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) pm_runtime_put: pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); -teardown_msi: tegra_pcie_msi_teardown(pcie); put_resources: tegra_pcie_put_resources(pcie); diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c index ec394f6a19c8e6b50cd22c4bea0c5e2e01c59ac8..ae7affcb1a811f88786aa662398e08cfa77ee1d4 100644 --- a/drivers/pci/controller/pcie-cadence-host.c +++ b/drivers/pci/controller/pcie-cadence-host.c @@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index ad39b404f10a49f14553ab189e6ed9a5edd2753b..3d1b004a58f8f453c097d3ecb7678842287c0ea9 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -718,6 +718,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); return -ENODEV; } @@ -820,6 +821,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd) static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); + struct fwnode_handle *fn = vmd->irq_domain->fwnode; sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); @@ -828,6 +830,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_teardown_dma_ops(vmd); vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index c94c135254479848fe31fa16dc783eb23d90fad7..be35bbfa6968731054e86e670fd05738a2f7db91 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev) struct acpiphp_context *context; acpi_lock_hp_context(); + context = acpiphp_get_context(adev); - if (!context || context->func.parent->is_going_away) { - acpi_unlock_hp_context(); - return NULL; + if (!context) + goto unlock; + + if (context->func.parent->is_going_away) { + acpiphp_put_context(context); + context = NULL; + goto unlock; } + get_bridge(context->func.parent); acpiphp_put_context(context); + +unlock: acpi_unlock_hp_context(); return context; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 07940d1d83b70bdbdbb382b6f13a03cb58dde30e..005817e40ad39d022d4447ff2372a4bdbb54390f 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -530,7 +530,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct device *parent = pdev->dev.parent; - u16 status, events; + u16 status, events = 0; /* * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4). @@ -553,6 +553,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) } } +read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); if (status == (u16) ~0) { ctrl_info(ctrl, "%s: no response from device\n", __func__); @@ -565,24 +566,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) * Slot Status contains plain status bits as well as event * notification bits; right now we only want the event bits. */ - events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | - PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | - PCI_EXP_SLTSTA_DLLSC); + status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | + PCI_EXP_SLTSTA_DLLSC; /* * If we've already reported a power fault, don't report it again * until we've done something to handle it. */ if (ctrl->power_fault_detected) - events &= ~PCI_EXP_SLTSTA_PFD; + status &= ~PCI_EXP_SLTSTA_PFD; + events |= status; if (!events) { if (parent) pm_runtime_put(parent); return IRQ_NONE; } - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + if (status) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + + /* + * In MSI mode, all event bits must be zero before the port + * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). + * So re-read the Slot Status register in case a bit was set + * between read and write. + */ + if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) + goto read_status; + } + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); if (parent) pm_runtime_put(parent); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 6e50f84733b752cd28a15daf2eea76937d0aa90f..279f9f0197b0191f450a125ce3c7de72d4dd1a17 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1164,6 +1164,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); + cnt += sprintf(buffer + cnt, "\n"); return cnt; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0862cb6338496cbf4c03a7a298b481cb5caa39f6..af2149632102a8ded4844287b356bbdb5f5fa64d 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2307,6 +2307,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this @@ -4321,6 +4334,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) if (ACPI_FAILURE(status)) return -ENODEV; + acpi_put_table(header); + /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); @@ -5053,7 +5068,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if (pdev->device == 0x7340 && pdev->revision != 0xc5) + if ((pdev->device == 0x7312 && pdev->revision != 0x00) || + (pdev->device == 0x7340 && pdev->revision != 0xc5)) return; pci_info(pdev, "disabling ATS\n"); @@ -5064,6 +5080,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 137bf0cee897cf791f33885f2df5aece15464ee7..8fc9a4e911e3a2d6419ba82964560a2396780b3d 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) pci_disable_rom(pdev); } EXPORT_SYMBOL(pci_unmap_rom); - -/** - * pci_platform_rom - provides a pointer to any ROM image provided by the - * platform - * @pdev: pointer to pci device struct - * @size: pointer to receive size of pci window over ROM - */ -void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) -{ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt((phys_addr_t)pdev->rom); - } - - return NULL; -} -EXPORT_SYMBOL(pci_platform_rom); diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index a32897f83ee517312a0d48a8e9449ea68c15687d..fb7478b6c4f9d60d639c72f06884c98264d3bb57 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -303,13 +303,16 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + kfree(slot); goto err; } err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) + if (err) { + kobject_put(&slot->kobj); goto err; + } INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); @@ -328,7 +331,6 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index cf515928fed09653d3ab92c87253f84bd5cf23c5..68107611c70a2d0381649cda38f93316f2f26094 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -311,8 +311,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), @@ -338,7 +338,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), @@ -347,7 +346,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), - QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7), }; static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { @@ -355,6 +353,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36), + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), }; static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { @@ -365,7 +365,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), - QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4), }; static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { @@ -818,6 +817,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .mask_pcs_ready = PHYSTATUS, }; +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; /* list of resets */ static const char * const ipq8074_pciephy_reset_l[] = { "phy", "common", @@ -835,8 +837,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), .pcs_tbl = ipq8074_pcie_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), - .clk_list = NULL, - .num_clks = 0, + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), .vreg_list = NULL, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 5d78d43ba9fc5e3a6d55349d0048e84cae821dc9..6b3aaf521e588053fa15841f08aa167c6bf651fa 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -77,6 +77,8 @@ #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc /* Only for QMP V2 PHY - TX registers */ +#define QSERDES_TX_EMP_POST1_LVL 0x018 +#define QSERDES_TX_SLEW_CNTL 0x040 #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 #define QSERDES_TX_DEBUG_BUS_SEL 0x064 #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index b8b226a2001465da56beb1246abeacfb8377ddc1..1feb1e1bf85e24f30f2ae4fed14c4c4a3d165033 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -717,7 +717,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy) struct phy_usb_instance *inst = phy_get_drvdata(phy); struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); - return exynos5420_usbdrd_phy_calibrate(phy_drd); + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) + return exynos5420_usbdrd_phy_calibrate(phy_drd); + return 0; } static const struct phy_ops exynos5_usbdrd_phy_ops = { diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c index f6f72339bbc32ace9605fed669f9d34e42bfae52..bb7fdf491c1c2ae4bea42d95d6cc4eb0af28f949 100644 --- a/drivers/phy/samsung/phy-s5pv210-usb2.c +++ b/drivers/phy/samsung/phy-s5pv210-usb2.c @@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) udelay(10); rst &= ~rstbits; writel(rst, drv->reg_phy + S5PV210_UPHYRST); + /* The following delay is necessary for the reset sequence to be + * completed + */ + udelay(80); } else { pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); pwr |= phypwr; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c index 43231fd065a186b4bf0729fff828f9e368be29e4..1a9450ef932b5fd1028bcfe5cdb6915e0fd0698d 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c @@ -418,7 +418,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = { MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)), MPP_MODE(15, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS), - MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)), + MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)), MPP_MODE(16, MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS), MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)), diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 7ec72ff2419a025ce72d86bacad93cd0584a8dfd..04a4e761e9a9c772bd5b9fa4c000496356cd908f 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, /* If pinconf isn't supported, don't parse properties in below. */ if (!PCS_HAS_PINCONF) - return 0; + return -ENOTSUPP; /* cacluate how much properties are supported in current node */ for (i = 0; i < ARRAY_SIZE(prop2); i++) { @@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, nconfs++; } if (!nconfs) - return 0; + return -ENOTSUPP; func->conf = devm_kcalloc(pcs->dev, nconfs, sizeof(struct pcs_conf_vals), @@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF && function) { res = pcs_parse_pinconf(pcs, np, function, map); - if (res) + if (res == 0) + *num_maps = 2; + else if (res == -ENOTSUPP) + *num_maps = 1; + else goto free_pingroups; - *num_maps = 2; } else { *num_maps = 1; } diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index bb646fb97b72eed295a94a27b5ba6eb0faa13781..273ed10ecd3b87cae22453dfbcc71079e5489c79 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -87,6 +87,28 @@ config IPA3 Kernel and user-space processes can call the IPA driver to configure IPA core. +config IPA + tristate "IPA support" + depends on SPS && NET + help + This driver supports the Internet Packet Accelerator (IPA3) core. + IPA is a programmable protocol processor HW block. + It is designed to support generic HW processing of UL/DL IP packets + for various use cases independent of radio technology. + The driver support client connection and configuration + for the IPA core. + Kernel and user-space processes can call the IPA driver + to configure IPA core. + +config RMNET_IPA + tristate "IPA RMNET WWAN Network Device" + depends on IPA && QCOM_QMI_HELPERS + help + This WWAN Network Driver implements network stack class device. + It supports Embedded data transfer from A7 to Q6. Configures IPA HW + for RmNet Data Driver and also exchange of QMI messages between + A7 and Q6 IPA-driver. + config IPA_DEBUG bool "IPA DEBUG for non-perf build" depends on IPA3 @@ -117,7 +139,7 @@ config RMNET_IPA3 config ECM_IPA tristate "STD ECM LAN Driver support" - depends on IPA3 + depends on IPA || IPA3 help Enables LAN between applications processor and a tethered host using the STD ECM protocol. @@ -126,7 +148,7 @@ config ECM_IPA config RNDIS_IPA tristate "RNDIS_IPA Network Interface Driver support" - depends on IPA3 + depends on IPA || IPA3 help Enables LAN between applications processor and a tethered host using the RNDIS protocol. diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile index d1c26b6bed6dbe9b2108442e0ec51e70c4c2a1a2..d1e4d52dcb80be1e0b73c15aabe7b88d964bc7c5 100644 --- a/drivers/platform/msm/ipa/Makefile +++ b/drivers/platform/msm/ipa/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common obj-$(CONFIG_IPA_UT) += test/ diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 639231528cf10937011baa918ce6c8894d1c3aba..2dab4cecae3186ea78e2b16fba8516658cef4bdc 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -14,7 +14,6 @@ #include #include #include "ipa_api.h" -#include "ipa_v3/ipa_i.h" /* * The following for adding code (ie. for EMULATION) not found on x86. @@ -335,7 +334,7 @@ u8 *ipa_pad_to_32(u8 *dest) return dest; } - i = (long)dest & 0x7; + i = (long)dest & 0x3; if (i) for (j = 0; j < (4 - i); j++) @@ -384,6 +383,55 @@ int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr) return 0; } +/** + * ipa_connect() - low-level IPA client connect + * @in: [in] input parameters from client + * @sps: [out] sps output from IPA needed by client for sps_connect + * @clnt_hdl: [out] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to connect to + * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api + * expects caller to take responsibility to add any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps, + u32 *clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_connect, in, sps, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_connect); + +/** + * ipa_disconnect() - low-level IPA client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to disconnect + * from IPA in BAM-BAM mode. this api expects caller to take responsibility to + * free any needed headers, routing and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disconnect(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconnect, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disconnect); + + /** * ipa_clear_endpoint_delay() - Clear ep_delay. * @clnt_hdl: [in] IPA client handle @@ -2834,18 +2882,23 @@ enum ipa_client_type ipa_get_client_mapping(int pipe_idx) EXPORT_SYMBOL(ipa_get_client_mapping); /** - * ipa_get_rm_resource_from_ep() - this function is part of the deprecated - * RM mechanism but is still used by some drivers so we kept the definition. + * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. */ - enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx) { - IPAERR("IPA RM is not supported idx=%d\n", pipe_idx); - return -EFAULT; -} -EXPORT_SYMBOL(ipa_get_rm_resource_from_ep); + int ret; + IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx); + return ret; +} +EXPORT_SYMBOL(ipa_get_rm_resource_from_ep); /** * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt @@ -3187,6 +3240,10 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) /* call probe based on IPA HW version */ switch (ipa_api_hw_type) { + case IPA_HW_v2_6L: + result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl, + ipa_plat_drv_match); + break; case IPA_HW_v3_0: case IPA_HW_v3_1: case IPA_HW_v3_5: @@ -3739,6 +3796,18 @@ int ipa_get_prot_id(enum ipa_client_type client) } EXPORT_SYMBOL(ipa_get_prot_id); +/** + * ipa_pm_is_used() - Returns if IPA PM framework is used + */ +bool ipa_pm_is_used(void) +{ + bool ret; + + IPA_API_DISPATCH_RETURN(ipa_pm_is_used); + + return ret; +} + static const struct dev_pm_ops ipa_pm_ops = { .suspend_late = ipa_ap_suspend, .resume_early = ipa_ap_resume, diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 216b0520fc8a7b5a3a98c7e7fe68a5ee77b96398..901b4c3f27b183e3a5160e967adfe71d3716cd1a 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #include @@ -12,6 +12,12 @@ #define _IPA_API_H_ struct ipa_api_controller { + + int (*ipa_connect)(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl); + + int (*ipa_disconnect)(u32 clnt_hdl); + int (*ipa_reset_endpoint)(u32 clnt_hdl); int (*ipa_clear_endpoint_delay)(u32 clnt_hdl); @@ -344,6 +350,8 @@ struct ipa_api_controller { enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx); + enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx); + bool (*ipa_get_modem_cfg_emb_pipe_flt)(void); enum ipa_transport_type (*ipa_get_transport_type)(void); @@ -436,6 +444,7 @@ struct ipa_api_controller { struct ipa_smmu_out_params *out); int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res); + bool (*ipa_pm_is_used)(void); int (*ipa_wigig_internal_init)( struct ipa_wdi_uc_ready_params *inout, ipa_wigig_misc_int_cb int_notify, @@ -484,6 +493,19 @@ struct ipa_api_controller { int (*ipa_get_prot_id)(enum ipa_client_type client); }; +#ifdef CONFIG_IPA +int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +#else +static inline int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +#endif /* (CONFIG_IPA) */ + #ifdef CONFIG_IPA3 int ipa3_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c index d183dfa08736e8aa4424ad646d83fdebe9f7ae9b..b91d00599cb2aa24381ddc8987d7fd41b7f5217f 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -831,7 +831,9 @@ void ecm_ipa_cleanup(void *priv) ecm_ipa_rules_destroy(ecm_ipa_ctx); ecm_ipa_debugfs_destroy(ecm_ipa_ctx); + ECM_IPA_DEBUG("ECM_IPA unregister_netdev started\n"); unregister_netdev(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("ECM_IPA unregister_netdev completed\n"); free_netdev(ecm_ipa_ctx->net); ECM_IPA_INFO("ECM_IPA was destroyed successfully\n"); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 47bfe38f8c930869cf5d28bd92658fa0b396d0fa..1213f9e627ef45b0e9e17b6341767fe4c8a2d07d 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -31,6 +31,7 @@ #define DEBUGFS_DIR_NAME "rndis_ipa" #define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation" #define NETDEV_NAME "rndis" +#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD #define IPV4_HDR_NAME "rndis_eth_ipv4" #define IPV6_HDR_NAME "rndis_eth_ipv6" #define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS @@ -159,6 +160,7 @@ enum rndis_ipa_operation { * @rx_dropped: number of filtered out Rx packets * @rx_dump_enable: dump all Rx packets * @icmp_filter: allow all ICMP packet to pass through the filters + * @rm_enable: flag that enable/disable Resource manager request prior to Tx * @deaggregation_enable: enable/disable IPA HW deaggregation logic * @during_xmit_error: flags that indicate that the driver is in a middle * of error handling in Tx path @@ -194,6 +196,7 @@ struct rndis_ipa_dev { u32 rx_dropped; bool rx_dump_enable; bool icmp_filter; + bool rm_enable; bool deaggregation_enable; bool during_xmit_error; struct dentry *directory; @@ -255,10 +258,18 @@ static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx); static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net); static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode); static int rndis_ipa_deregister_properties(char *netdev_name); +static void rndis_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, + unsigned long data); +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx); static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx); static bool rx_filter(struct sk_buff *skb); static bool tx_filter(struct sk_buff *skb); +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx); +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx); +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx); static netdev_tx_t rndis_ipa_start_xmit (struct sk_buff *skb, struct net_device *net); static int rndis_ipa_debugfs_atomic_open @@ -541,6 +552,7 @@ int rndis_ipa_init(struct ipa_usb_init_params *params) rndis_ipa_ctx->tx_filter = false; rndis_ipa_ctx->rx_filter = false; rndis_ipa_ctx->icmp_filter = true; + rndis_ipa_ctx->rm_enable = true; rndis_ipa_ctx->tx_dropped = 0; rndis_ipa_ctx->rx_dropped = 0; rndis_ipa_ctx->tx_dump_enable = false; @@ -589,7 +601,8 @@ int rndis_ipa_init(struct ipa_usb_init_params *params) } RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr); - if (ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS, + if ((ipa_get_hw_type() >= IPA_HW_v3_0) && + ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS, &rndis_ipa_ctx->is_vlan_mode)) { RNDIS_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n"); goto fail_get_vlan_mode; @@ -749,12 +762,15 @@ int rndis_ipa_pipe_connect_notify( return -EINVAL; } - result = rndis_ipa_register_pm_client(rndis_ipa_ctx); + if (ipa_pm_is_used()) + result = rndis_ipa_register_pm_client(rndis_ipa_ctx); + else + result = rndis_ipa_create_rm_resource(rndis_ipa_ctx); if (result) { - RNDIS_IPA_ERROR("fail on PM register\n"); - goto fail_register_pm; + RNDIS_IPA_ERROR("fail on RM create\n"); + goto fail_create_rm; } - RNDIS_IPA_DEBUG("PM client was registered\n"); + RNDIS_IPA_DEBUG("RM resource was created\n"); rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl; rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl; @@ -832,8 +848,11 @@ int rndis_ipa_pipe_connect_notify( return 0; fail: - rndis_ipa_deregister_pm_client(rndis_ipa_ctx); -fail_register_pm: + if (ipa_pm_is_used()) + rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + else + rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); +fail_create_rm: return result; } EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify); @@ -951,11 +970,11 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, goto out; } - ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl); - if (unlikely(ret)) { - RNDIS_IPA_DEBUG("Failed activate PM client\n"); + ret = resource_request(rndis_ipa_ctx); + if (ret) { + RNDIS_IPA_DEBUG("Waiting to resource\n"); netif_stop_queue(net); - goto fail_pm_activate; + goto resource_busy; } if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >= @@ -984,8 +1003,8 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, fail_tx_packet: rndis_ipa_xmit_error(skb); out: - ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl); -fail_pm_activate: + resource_release(rndis_ipa_ctx); +resource_busy: RNDIS_IPA_DEBUG ("packet Tx done - %s\n", (status == NETDEV_TX_OK) ? "OK" : "FAIL"); @@ -1073,6 +1092,50 @@ static void rndis_ipa_tx_timeout(struct net_device *net) net->stats.tx_errors++; } +/** + * rndis_ipa_rm_notify() - callback supplied to IPA resource manager + * for grant/release events + * user_data: the driver context supplied to IPA resource manager during call + * to ipa_rm_create_resource(). + * event: the event notified to us by IPA resource manager (Release/Grant) + * data: reserved field supplied by IPA resource manager + * + * This callback shall be called based on resource request/release sent + * to the IPA resource manager. + * In case the queue was stopped during EINPROGRESS for Tx path and the + * event received is Grant then the queue shall be restarted. + * In case the event notified is a release notification the netdev discard it. + */ +static void rndis_ipa_rm_notify( + void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = user_data; + + RNDIS_IPA_LOG_ENTRY(); + + if (event == IPA_RM_RESOURCE_RELEASED) { + RNDIS_IPA_DEBUG("Resource Released\n"); + return; + } + + if (event != IPA_RM_RESOURCE_GRANTED) { + RNDIS_IPA_ERROR + ("Unexceoted event receieved from RM (%d\n)", event); + return; + } + RNDIS_IPA_DEBUG("Resource Granted\n"); + + if (netif_queue_stopped(rndis_ipa_ctx->net)) { + RNDIS_IPA_DEBUG("starting queue\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG("queue already awake\n"); + } + + RNDIS_IPA_LOG_EXIT(); +} + /** * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from * tethered PC (USB->IPA). @@ -1292,12 +1355,15 @@ int rndis_ipa_pipe_disconnect_notify(void *private) rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts; atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0); - retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + if (ipa_pm_is_used()) + retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + else + retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); if (retval) { - RNDIS_IPA_ERROR("Fail to deregister PM\n"); + RNDIS_IPA_ERROR("Fail to clean RM\n"); return retval; } - RNDIS_IPA_DEBUG("PM was successfully deregistered\n"); + RNDIS_IPA_DEBUG("RM was successfully destroyed\n"); spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, @@ -1389,8 +1455,9 @@ void rndis_ipa_cleanup(void *private) rndis_ipa_debugfs_destroy(rndis_ipa_ctx); RNDIS_IPA_DEBUG("debugfs remove was done\n"); + RNDIS_IPA_DEBUG("RNDIS_IPA netdev unregistered started\n"); unregister_netdev(rndis_ipa_ctx->net); - RNDIS_IPA_DEBUG("netdev unregistered\n"); + RNDIS_IPA_DEBUG("RNDIS_IPA netdev unregistered completed\n"); spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, @@ -1765,7 +1832,86 @@ static int rndis_ipa_deregister_properties(char *netdev_name) return 0; } +/** + * rndis_ipa_create_rm_resource() -creates the resource representing + * this Netdev and supply notification callback for resource event + * such as Grant/Release + * @rndis_ipa_ctx: this driver context + * + * In order make sure all needed resources are available during packet + * transmit this Netdev shall use Request/Release mechanism of + * the IPA resource manager. + * This mechanism shall iterate over a dependency graph and make sure + * all dependent entities are ready to for packet Tx + * transfer (Apps->IPA->USB). + * In this function the resource representing the Netdev is created + * in addition to the basic dependency between the Netdev and the USB client. + * Hence, USB client, is a dependency for the Netdev and may be notified in + * case of packet transmit from this Netdev to tethered Host. + * As implied from the "may" in the above sentence there is a scenario where + * the USB is not notified. This is done thanks to the IPA resource manager + * inactivity timer. + * The inactivity timer allow the Release requests to be delayed in order + * prevent ping-pong with the USB and other dependencies. + */ +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct ipa_rm_create_params create_params = {0}; + struct ipa_rm_perf_profile profile; + int result; + + RNDIS_IPA_LOG_ENTRY(); + + create_params.name = DRV_RESOURCE_ID; + create_params.reg_params.user_data = rndis_ipa_ctx; + create_params.reg_params.notify_cb = rndis_ipa_rm_notify; + result = ipa_rm_create_resource(&create_params); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n"); + goto fail_rm_create; + } + RNDIS_IPA_DEBUG("RM client was created\n"); + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile); + result = ipa_rm_inactivity_timer_init + (DRV_RESOURCE_ID, + INACTIVITY_MSEC_DELAY); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n"); + goto fail_inactivity_timer; + } + + RNDIS_IPA_DEBUG("rm_it client was created\n"); + + result = ipa_rm_add_dependency_sync + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n"); + + result = ipa_rm_add_dependency_sync + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("USB/APPS dependency was set\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail_inactivity_timer: +fail_rm_create: + return result; +} static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event) { @@ -1790,6 +1936,64 @@ static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event) RNDIS_IPA_LOG_EXIT(); } +/** + * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy + * the resource done on rndis_ipa_create_rm_resource() + * @rndis_ipa_ctx: this driver context + * + * This function shall delete the dependency create between + * the Netdev to the USB. + * In addition the inactivity time shall be destroy and the resource shall + * be deleted. + */ +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result; + + RNDIS_IPA_LOG_ENTRY(); + + result = ipa_rm_delete_dependency + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + if (result && result != -EINPROGRESS) { + RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n"); + goto bail; + } + RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n"); + + result = ipa_rm_delete_dependency + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result == -EINPROGRESS) { + RNDIS_IPA_DEBUG("RM dependency deletion is in progress"); + } else if (result) { + RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n"); + goto bail; + } else { + RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n"); + } + + result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("Fail to destroy inactivity timern"); + goto bail; + } + RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n"); + + result = ipa_rm_delete_resource(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("resource deletion failed\n"); + goto bail; + } + RNDIS_IPA_DEBUG + ("Netdev RM resource was deleted (resid:%d)\n", + DRV_RESOURCE_ID); + + RNDIS_IPA_LOG_EXIT(); + +bail: + return result; +} static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx) { @@ -1818,6 +2022,52 @@ static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx) return 0; } +/** + * resource_request() - request for the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * This function shall send the IPA resource manager inactivity time a request + * to Grant the Netdev producer. + * In case the resource is already Granted the function shall return immediately + * and "pet" the inactivity timer. + * In case the resource was not already Granted this function shall + * return EINPROGRESS and the Netdev shall stop the send queue until + * the IPA resource manager notify it that the resource is + * granted (done in a differ context) + */ +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result = 0; + + if (!rm_enabled(rndis_ipa_ctx)) + return result; + + if (ipa_pm_is_used()) + return ipa_pm_activate(rndis_ipa_ctx->pm_hdl); + + return ipa_rm_inactivity_timer_request_resource( + DRV_RESOURCE_ID); + +} + +/** + * resource_release() - release the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * start the inactivity timer count down.by using the IPA resource + * manager inactivity time. + * The actual resource release shall occur only if no request shall be done + * during the INACTIVITY_MSEC_DELAY. + */ +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + if (!rm_enabled(rndis_ipa_ctx)) + return; + if (ipa_pm_is_used()) + ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl); + else + ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID); +} /** * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with @@ -1905,6 +2155,19 @@ static bool tx_filter(struct sk_buff *skb) return true; } +/** + * rm_enabled() - allow the use of resource manager Request/Release to + * be bypassed + * @rndis_ipa_ctx: main driver context + * + * By disabling the resource manager flag the Request for the Netdev resource + * shall be bypassed and the packet shall be sent. + * accordingly, Release request shall be bypass as well. + */ +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + return rndis_ipa_ctx->rm_enable; +} /** * rndis_ipa_ep_registers_cfg() - configure the USB endpoints @@ -2180,6 +2443,14 @@ static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) goto fail_file; } + file = debugfs_create_bool + ("rm_enable", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs rm file\n"); + goto fail_file; + } + file = debugfs_create_u32 ("outstanding_high", flags_read_write, rndis_ipa_ctx->directory, diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index 3d71e390a829c8281eb22b770690d57f709b242b..4a313ad450c485fc9d2f6245971d52a787dc5d95 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. */ #include @@ -428,6 +428,8 @@ int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); const char *ipa_get_version_string(enum ipa_hw_type ver); int ipa_start_gsi_channel(u32 clnt_hdl); +bool ipa_pm_is_used(void); + int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, struct sg_table *in_sgt_ptr); int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr); diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b70f49a9e1e6f5f506596c27db2d765293a0dbd8 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_IPA) += ipat.o +ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ + ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \ + ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \ + ipa_wdi3_i.o + +obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c new file mode 100644 index 0000000000000000000000000000000000000000..7e8e083d8715474093b0d95732dc5c6820762ab7 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -0,0 +1,5049 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define CREATE_TRACE_POINTS +#include "ipa_trace.h" + +#define IPA_SUMMING_THRESHOLD (0x10) +#define IPA_PIPE_MEM_START_OFST (0x0) +#define IPA_PIPE_MEM_SIZE (0x0) +#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \ + x == IPA_MODE_MOBILE_AP_WAN || \ + x == IPA_MODE_MOBILE_AP_WLAN) +#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL) +#define IPA_A5_MUX_HEADER_LENGTH (8) +#define IPA_ROUTING_RULE_BYTE_SIZE (4) +#define IPA_BAM_CNFG_BITS_VALv1_1 (0x7FFFE004) +#define IPA_BAM_CNFG_BITS_VALv2_0 (0xFFFFE004) +#define IPA_STATUS_CLEAR_OFST (0x3f28) +#define IPA_STATUS_CLEAR_SIZE (32) + +#define IPA_AGGR_MAX_STR_LENGTH (10) + +#define CLEANUP_TAG_PROCESS_TIMEOUT 150 + +#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048 + +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0 +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1 +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2 +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3 + +#define MAX_POLLING_ITERATION 40 +#define MIN_POLLING_ITERATION 1 +#define ONE_MSEC 1 + +#define IPA_AGGR_STR_IN_BYTES(str) \ + (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) + +#define IPA_SPS_PROD_TIMEOUT_MSEC 100 + +#ifdef CONFIG_COMPAT +#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR, \ + compat_uptr_t) +#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_RT_TBL, \ + compat_uptr_t) +#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_COPY_HDR, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_TX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_RX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ + compat_uptr_t) +#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HDR, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_MEM, \ + compat_uptr_t) +#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_INIT_NAT, \ + compat_uptr_t) +#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_DMA, \ + compat_uptr_t) +#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_DEL_NAT, \ + compat_uptr_t) +#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_NAT_OFFSET, \ + compat_uptr_t) +#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_PULL_MSG, \ + compat_uptr_t) +#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_ADD_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_DEL_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GENERATE_FLT_EQ, \ + compat_uptr_t) +#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_RT_TBL_INDEX, \ + compat_uptr_t) +#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_WRITE_QMAPID, \ + compat_uptr_t) +#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ + compat_uptr_t) +#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_RT_RULE, \ + compat_uptr_t) + +/** + * struct ipa_ioc_nat_alloc_mem32 - nat table memory allocation + * properties + * @dev_name: input parameter, the name of table + * @size: input parameter, size of table in bytes + * @offset: output parameter, offset into page in case of system memory + */ +struct ipa_ioc_nat_alloc_mem32 { + char dev_name[IPA_RESOURCE_NAME_MAX]; + compat_size_t size; + compat_off_t offset; +}; +#endif + +static void ipa_start_tag_process(struct work_struct *work); +static DECLARE_WORK(ipa_tag_work, ipa_start_tag_process); + +static void ipa_sps_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work, + ipa_sps_release_resource); + +static struct ipa_plat_drv_res ipa_res = {0, }; + +static struct msm_bus_scale_pdata *bus_scale_table; + +static struct clk *ipa_clk_src; +static struct clk *ipa_clk; +static struct clk *smmu_clk; +static struct clk *sys_noc_ipa_axi_clk; +static struct clk *ipa_cnoc_clk; +static struct clk *ipa_inactivity_clk; + +struct ipa_context *ipa_ctx; +static struct device *master_dev; +static struct platform_device *ipa_pdev; +static struct { + bool present; + bool arm_smmu; + bool fast_map; + bool s1_bypass; + u32 ipa_base; + u32 ipa_size; +} smmu_info; + +static char *active_clients_table_buf; + +static u32 register_ipa_bus_hdl; + +int ipa2_active_clients_log_print_buffer(char *buf, int size) +{ + int i; + int nbytes; + int cnt = 0; + int start_idx; + int end_idx; + + start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) % + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + end_idx = ipa_ctx->ipa2_active_clients_logging.log_head; + for (i = start_idx; i != end_idx; + i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) { + nbytes = scnprintf(buf + cnt, size - cnt, "%s\n", + ipa_ctx->ipa2_active_clients_logging + .log_buffer[i]); + cnt += nbytes; + } + + return cnt; +} + +int ipa2_active_clients_log_print_table(char *buf, int size) +{ + int i; + struct ipa2_active_client_htable_entry *iterator; + int cnt = 0; + + cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n"); + hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i, + iterator, list) { + switch (iterator->type) { + case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d ENDPOINT\n", + iterator->id_string, iterator->count); + break; + case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SIMPLE\n", + iterator->id_string, iterator->count); + break; + case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d RESOURCE\n", + iterator->id_string, iterator->count); + break; + case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SPECIAL\n", + iterator->id_string, iterator->count); + break; + default: + IPAERR("Trying to print illegal active_clients type"); + break; + } + } + cnt += scnprintf(buf + cnt, size - cnt, + "\nTotal active clients count: %d\n", + ipa_ctx->ipa_active_clients.cnt); + + return cnt; +} + + +static int ipa2_clean_modem_rule(void) +{ + struct ipa_install_fltr_rule_req_msg_v01 *req; + int val = 0; + + req = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req->filter_spec_list_valid = false; + req->filter_spec_list_len = 0; + req->source_pipe_index_valid = 0; + val = qmi_filter_request_send(req); + kfree(req); + + return val; +} + +static int ipa2_active_clients_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + ipa_active_clients_lock(); + ipa2_active_clients_log_print_table(active_clients_table_buf, + IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE); + IPAERR("%s", active_clients_table_buf); + ipa_active_clients_unlock(); + + return NOTIFY_DONE; +} + +static struct notifier_block ipa2_active_clients_panic_blk = { + .notifier_call = ipa2_active_clients_panic_notifier, +}; + +static int ipa2_active_clients_log_insert(const char *string) +{ + int head; + int tail; + + head = ipa_ctx->ipa2_active_clients_logging.log_head; + tail = ipa_ctx->ipa2_active_clients_logging.log_tail; + + if (!ipa_ctx->ipa2_active_clients_logging.log_rdy) + return -EPERM; + memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_', + IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN); + strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string, + (size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN); + head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + if (tail == head) + tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + + ipa_ctx->ipa2_active_clients_logging.log_tail = tail; + ipa_ctx->ipa2_active_clients_logging.log_head = head; + + return 0; +} + +static int ipa2_active_clients_log_init(void) +{ + int i; + + ipa_ctx->ipa2_active_clients_logging.log_buffer[0] = kcalloc( + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES, + sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]), + GFP_KERNEL); + active_clients_table_buf = kzalloc(sizeof( + char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL); + if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) { + IPAERR("Active Clients Logging memory allocation failed"); + goto bail; + } + for (i = 0; i < IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) { + ipa_ctx->ipa2_active_clients_logging.log_buffer[i] = + ipa_ctx->ipa2_active_clients_logging.log_buffer[0] + + (IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN * i); + } + ipa_ctx->ipa2_active_clients_logging.log_head = 0; + ipa_ctx->ipa2_active_clients_logging.log_tail = + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + hash_init(ipa_ctx->ipa2_active_clients_logging.htable); + atomic_notifier_chain_register(&panic_notifier_list, + &ipa2_active_clients_panic_blk); + ipa_ctx->ipa2_active_clients_logging.log_rdy = true; + + return 0; + +bail: + return -ENOMEM; +} + +void ipa2_active_clients_log_clear(void) +{ + ipa_active_clients_lock(); + ipa_ctx->ipa2_active_clients_logging.log_head = 0; + ipa_ctx->ipa2_active_clients_logging.log_tail = + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + ipa_active_clients_unlock(); +} + +static void ipa2_active_clients_log_destroy(void) +{ + ipa_ctx->ipa2_active_clients_logging.log_rdy = false; + kfree(active_clients_table_buf); + active_clients_table_buf = NULL; + kfree(ipa_ctx->ipa2_active_clients_logging.log_buffer[0]); + ipa_ctx->ipa2_active_clients_logging.log_head = 0; + ipa_ctx->ipa2_active_clients_logging.log_tail = + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; +} + +static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX]; + +struct iommu_domain *ipa2_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type) +{ + if (VALID_IPA_SMMU_CB_TYPE(cb_type) && smmu_cb[cb_type].valid) + return smmu_cb[cb_type].iommu_domain; + + IPAERR("cb_type(%d) not valid\n", cb_type); + return NULL; +} + +struct iommu_domain *ipa2_get_smmu_domain(void) +{ + return ipa2_get_smmu_domain_by_type(IPA_SMMU_CB_AP); +} + +struct iommu_domain *ipa2_get_uc_smmu_domain(void) +{ + return ipa2_get_smmu_domain_by_type(IPA_SMMU_CB_UC); +} + +struct iommu_domain *ipa2_get_wlan_smmu_domain(void) +{ + return ipa2_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN); +} + +struct device *ipa2_get_dma_dev(void) +{ + return ipa_ctx->pdev; +} + +/** + * ipa2_get_smmu_ctx()- Return the smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(enum ipa_smmu_cb_type cb_type) +{ + return &smmu_cb[cb_type]; +} + + +/** + * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_WLAN]; +} + +/** + * ipa2_get_uc_smmu_ctx()- Return the uc smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_UC]; +} + +static int ipa_open(struct inode *inode, struct file *filp) +{ + struct ipa_context *ctx = NULL; + + IPADBG_LOW("ENTER\n"); + ctx = container_of(inode->i_cdev, struct ipa_context, cdev); + filp->private_data = ctx; + + return 0; +} + +/** + * ipa_flow_control() - Enable/Disable flow control on a particular client. + * Return codes: + * None + */ +void ipa_flow_control(enum ipa_client_type ipa_client, + bool enable, uint32_t qmap_id) +{ + struct ipa_ep_cfg_ctrl ep_ctrl = {0}; + int ep_idx; + struct ipa_ep_context *ep; + + /* Check if tethered flow control is needed or not.*/ + if (!ipa_ctx->tethered_flow_control) { + IPADBG("Apps flow control is not needed\n"); + return; + } + + /* Check if ep is valid. */ + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPADBG("Invalid IPA client\n"); + return; + } + + ep = &ipa_ctx->ep[ep_idx]; + if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) { + IPADBG("EP not valid/Not applicable for client.\n"); + return; + } + + spin_lock(&ipa_ctx->disconnect_lock); + /* Check if the QMAP_ID matches. */ + if (ep->cfg.meta.qmap_id != qmap_id) { + IPADBG("Flow control ind not for same flow: %u %u\n", + ep->cfg.meta.qmap_id, qmap_id); + spin_unlock(&ipa_ctx->disconnect_lock); + return; + } + if (!ep->disconnect_in_progress) { + if (enable) { + IPADBG("Enabling Flow\n"); + ep_ctrl.ipa_ep_delay = false; + IPA_STATS_INC_CNT(ipa_ctx->stats.flow_enable); + } else { + IPADBG("Disabling Flow\n"); + ep_ctrl.ipa_ep_delay = true; + IPA_STATS_INC_CNT(ipa_ctx->stats.flow_disable); + } + ep_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(ep_idx, &ep_ctrl); + } else { + IPADBG("EP disconnect is in progress\n"); + } + spin_unlock(&ipa_ctx->disconnect_lock); +} + +static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != WAN_UPSTREAM_ROUTE_ADD && + type != WAN_UPSTREAM_ROUTE_DEL && + type != WAN_EMBMS_CONNECT) { + IPAERR("Wrong type given. buff %p type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, + bool is_cache) +{ + int retval; + struct ipa_wan_msg *wan_msg; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg cache_wan_msg; + + wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL); + if (!wan_msg) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user(wan_msg, (const void __user *)usr_param, + sizeof(struct ipa_wan_msg))) { + kfree(wan_msg); + return -EFAULT; + } + + memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg)); + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + retval = ipa2_send_msg(&msg_meta, wan_msg, ipa_wan_msg_free_cb); + if (retval) { + IPAERR("ipa2_send_msg failed: %d\n", retval); + kfree(wan_msg); + return retval; + } + + if (is_cache) { + mutex_lock(&ipa_ctx->ipa_cne_evt_lock); + + /* cache the cne event */ + memcpy(&ipa_ctx->ipa_cne_evt_req_cache[ + ipa_ctx->num_ipa_cne_evt_req].wan_msg, + &cache_wan_msg, + sizeof(cache_wan_msg)); + + memcpy(&ipa_ctx->ipa_cne_evt_req_cache[ + ipa_ctx->num_ipa_cne_evt_req].msg_meta, + &msg_meta, + sizeof(struct ipa_msg_meta)); + + ipa_ctx->num_ipa_cne_evt_req++; + ipa_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE; + mutex_unlock(&ipa_ctx->ipa_cne_evt_lock); + } + + return 0; +} + + +static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int retval = 0; + u32 pyld_sz; + u8 header[192] = { 0 }; + u8 *param = NULL; + struct ipa_ioc_nat_alloc_mem nat_mem; + struct ipa_ioc_v4_nat_init nat_init; + struct ipa_ioc_v4_nat_del nat_del; + struct ipa_ioc_rm_dependency rm_depend; + size_t sz; + int pre_entry; + + IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC) + return -ENOTTY; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + switch (cmd) { + case IPA_IOC_ALLOC_NAT_MEM: + if (copy_from_user(&nat_mem, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa2_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, &nat_mem, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_V4_INIT_NAT: + if (copy_from_user(&nat_init, (const void __user *)arg, + sizeof(struct ipa_ioc_v4_nat_init))) { + retval = -EFAULT; + break; + } + if (ipa2_nat_init_cmd(&nat_init)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_NAT_DMA: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_dma_cmd))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_nat_dma_cmd *)header)->entries; + pyld_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + pre_entry * sizeof(struct ipa_ioc_nat_dma_one); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_nat_dma_cmd *)param)->entries, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_V4_DEL_NAT: + if (copy_from_user(&nat_del, (const void __user *)arg, + sizeof(struct ipa_ioc_v4_nat_del))) { + retval = -EFAULT; + break; + } + if (ipa2_nat_del_cmd(&nat_del)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr *)header)->num_hdrs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr) + + pre_entry * sizeof(struct ipa_hdr_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr *)param)->num_hdrs, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_hdr_usr((struct ipa_ioc_add_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr) + + pre_entry * sizeof(struct ipa_hdr_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_hdr_by_user((struct ipa_ioc_del_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_RT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_RT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_mdfy_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_mdfy); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_RT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_rt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_rt_rule *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_FLT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_FLT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_flt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_flt_rule *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_FLT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_mdfy_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_mdfy); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_COMMIT_HDR: + retval = ipa2_commit_hdr(); + break; + case IPA_IOC_RESET_HDR: + retval = ipa2_reset_hdr(false); + break; + case IPA_IOC_COMMIT_RT: + retval = ipa2_commit_rt(arg); + break; + case IPA_IOC_RESET_RT: + retval = ipa2_reset_rt(arg, false); + break; + case IPA_IOC_COMMIT_FLT: + retval = ipa2_commit_flt(arg); + break; + case IPA_IOC_RESET_FLT: + retval = ipa2_reset_flt(arg, false); + break; + case IPA_IOC_GET_RT_TBL: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + if (ipa2_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_RT_TBL: + retval = ipa2_put_rt_tbl(arg); + break; + case IPA_IOC_GET_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + if (ipa2_get_hdr((struct ipa_ioc_get_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_HDR: + retval = ipa2_put_hdr(arg); + break; + case IPA_IOC_SET_FLT: + retval = ipa_cfg_filter(arg); + break; + case IPA_IOC_COPY_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + if (ipa2_copy_hdr((struct ipa_ioc_copy_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + if (ipa_query_intf((struct ipa_ioc_query_intf *)header)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_TX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_tx_props); + if (copy_from_user(header, (const void __user *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_tx_props *) + header)->num_tx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_tx_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa_query_intf_tx_props( + (struct ipa_ioc_query_intf_tx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_RX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_rx_props); + if (copy_from_user(header, (const void __user *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_rx_props *) + header)->num_rx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_rx_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa_query_intf_rx_props( + (struct ipa_ioc_query_intf_rx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_ext_props); + if (copy_from_user(header, (const void __user *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_ext_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa_query_intf_ext_props( + (struct ipa_ioc_query_intf_ext_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PULL_MSG: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_msg_meta))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_msg_meta *)header)->msg_len; + pyld_sz = sizeof(struct ipa_msg_meta) + + pre_entry; + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_msg_meta *)param)->msg_len + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_msg_meta *)param)->msg_len, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa_pull_msg((struct ipa_msg_meta *)param, + (char *)param + sizeof(struct ipa_msg_meta), + ((struct ipa_msg_meta *)param)->msg_len) != + ((struct ipa_msg_meta *)param)->msg_len) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_RM_ADD_DEPENDENCY: + if (copy_from_user(&rm_depend, (const void __user *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_add_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_RM_DEL_DEPENDENCY: + if (copy_from_user(&rm_depend, (const void __user *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_delete_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_GENERATE_FLT_EQ: + { + struct ipa_ioc_generate_flt_eq flt_eq; + + if (copy_from_user(&flt_eq, (const void __user *)arg, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + if (ipa_generate_flt_eq(flt_eq.ip, &flt_eq.attrib, + &flt_eq.eq_attrib)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, &flt_eq, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + break; + } + case IPA_IOC_QUERY_EP_MAPPING: + { + retval = ipa2_get_ep_mapping(arg); + break; + } + case IPA_IOC_QUERY_RT_TBL_INDEX: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + if (ipa2_query_rt_index( + (struct ipa_ioc_get_rt_tbl_indx *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_WRITE_QMAPID: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + if (ipa2_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD: + retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true); + if (retval) { + IPAERR("ipa_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL: + retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true); + if (retval) { + IPAERR("ipa_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED: + retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT, false); + if (retval) { + IPAERR("ipa_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_ADD_HDR_PROC_CTX: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr_proc_ctx *) + header)->num_proc_ctxs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs, pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_hdr_proc_ctx( + (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_DEL_HDR_PROC_CTX: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *) + param)->num_hdls != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr_proc_ctx *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_hdr_proc_ctx_by_user( + (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GET_HW_VERSION: + pyld_sz = sizeof(enum ipa_hw_type); + param = kmemdup(&ipa_ctx->ipa_hw_type, pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + memcpy(param, &ipa_ctx->ipa_hw_type, pyld_sz); + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_CLEANUP: + /*Route and filter rules will also be clean*/ + IPADBG("Got IPA_IOC_CLEANUP\n"); + retval = ipa2_reset_hdr(true); + memset(&nat_del, 0, sizeof(nat_del)); + nat_del.table_index = 0; + retval = ipa2_nat_del_cmd(&nat_del); + retval = ipa2_clean_modem_rule(); + break; + + case IPA_IOC_QUERY_WLAN_CLIENT: + IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n"); + retval = ipa2_resend_wlan_msg(); + break; + + default: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -ENOTTY; + } + kfree(param); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return retval; +} + +/** + * ipa_setup_dflt_rt_tables() - Setup default routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa_setup_dflt_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) { + IPAERR("fail to alloc mem\n"); + return -ENOMEM; + } + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS; + rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl; + + if (ipa2_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa2_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* + * because these tables are the very first to be added, they will both + * have the same index (0) which is essential for programming the + * "route" end-point config + */ + + kfree(rt_rule); + + return 0; +} + +static int ipa_setup_exception_path(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + struct ipa_route route = { 0 }; + int ret; + + /* install the basic exception header */ + hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add), GFP_KERNEL); + if (!hdr) { + IPAERR("fail to alloc exception hdr\n"); + return -ENOMEM; + } + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { + strlcpy(hdr_entry->name, IPA_A5_MUX_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + /* set template for the A5_MUX hdr in header addition block */ + hdr_entry->hdr_len = IPA_A5_MUX_HEADER_LENGTH; + } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH; + } else { + WARN_ON(1); + } + + if (ipa2_add_hdr(hdr)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl; + + /* set the route register to pass exception packets to Apps */ + route.route_def_pipe = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + route.route_frag_def_pipe = ipa2_get_ep_mapping( + IPA_CLIENT_APPS_LAN_CONS); + route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl; + + if (ipa_cfg_route(&route)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static int ipa_init_smem_region(int memory_region_size, + int memory_region_offset) +{ + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; + struct ipa_desc desc; + struct ipa_mem_buffer mem; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc; + + if (memory_region_size == 0) + return 0; + + memset(&desc, 0, sizeof(desc)); + memset(&mem, 0, sizeof(mem)); + + mem.size = memory_region_size; + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), + flag); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->size = mem.size; + cmd->system_addr = mem.phys_base; + cmd->local_addr = ipa_ctx->smem_restricted_bytes + + memory_region_offset; + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = cmd; + desc.len = sizeof(*cmd); + desc.type = IPA_IMM_CMD_DESC; + + + rc = ipa_send_cmd(1, &desc); + if (rc) { + IPAERR("failed to send immediate command (error %d)\n", rc); + rc = -EFAULT; + } + + kfree(cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + + return rc; +} + +/** + * ipa_init_q6_smem() - Initialize Q6 general memory and + * header memory regions in IPA. + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate dma memory + * -EFAULT: failed to send IPA command to initialize the memory + */ +int ipa_init_q6_smem(void) +{ + int rc; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0) { + rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) - + IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE, + IPA_MEM_PART(modem_ofst)); + } else { + rc = ipa_init_smem_region(IPA_MEM_PART(modem_size), + IPA_MEM_PART(modem_ofst)); + } + + if (rc) { + IPAERR("failed to initialize Modem RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_size), + IPA_MEM_PART(modem_hdr_ofst)); + if (rc) { + IPAERR("failed to initialize Modem HDRs RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size), + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + if (rc) { + IPAERR("failed to initialize Modem proc ctx RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size), + IPA_MEM_PART(modem_comp_decomp_ofst)); + if (rc) { + IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return rc; +} + +static void ipa_free_buffer(void *user1, int user2) +{ + kfree(user1); +} + +int ipa_q6_monitor_holb_mitigation(bool enable) +{ + int ep_idx; + int client_idx; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) { + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + /* Send a command to Uc to enable/disable + * holb monitoring. + */ + ipa_uc_monitor_holb(client_idx, enable); + } + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +static int ipa_q6_avoid_holb(bool zip_pipes) +{ + u32 reg_val; + int ep_idx; + int client_idx; + struct ipa_ep_cfg_ctrl avoid_holb; + + memset(&avoid_holb, 0, sizeof(avoid_holb)); + avoid_holb.ipa_ep_suspend = true; + + /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + /* Skip the processing for non Q6 pipes. */ + if (!IPA_CLIENT_IS_Q6_CONS(client_idx)) + continue; + /* Skip the processing for NON-ZIP pipes. */ + else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) + continue; + /* Skip the processing for ZIP pipes. */ + else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) + continue; + + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + /* + * ipa2_cfg_ep_holb is not used here because we are + * setting HOLB on Q6 pipes, and from APPS perspective + * they are not valid, therefore, the above function + * will fail. + */ + reg_val = 0; + IPA_SETFIELD_IN_REG(reg_val, 0, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx), + reg_val); + + reg_val = 0; + IPA_SETFIELD_IN_REG(reg_val, 1, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx), + reg_val); + + ipa2_cfg_ep_ctrl(ep_idx, &avoid_holb); + } + + return 0; +} + +static u32 ipa_get_max_flt_rt_cmds(u32 num_pipes) +{ + u32 max_cmds = 0; + + /* As many filter tables as there are pipes, x2 for IPv4 and IPv6 */ + max_cmds += num_pipes * 2; + + /* For each of the Modem routing tables */ + max_cmds += (IPA_MEM_PART(v4_modem_rt_index_hi) - + IPA_MEM_PART(v4_modem_rt_index_lo) + 1); + + max_cmds += (IPA_MEM_PART(v6_modem_rt_index_hi) - + IPA_MEM_PART(v6_modem_rt_index_lo) + 1); + + return max_cmds; +} + +static int ipa_q6_clean_q6_tables(void) +{ + struct ipa_desc *desc; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; + int pipe_idx; + int num_cmds = 0; + int index; + int retval; + struct ipa_mem_buffer mem = { NULL }; + u32 *entry; + u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes); + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base, + GFP_ATOMIC); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size 4\n"); + return -ENOMEM; + } + + mem.size = 4; + entry = mem.base; + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + + desc = kcalloc(max_cmds, sizeof(struct ipa_desc), GFP_KERNEL); + if (!desc) { + IPAERR("failed to allocate memory\n"); + retval = -ENOMEM; + goto bail_dma; + } + + cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + flag); + if (!cmd) { + IPAERR("failed to allocate memory\n"); + retval = -ENOMEM; + goto bail_desc; + } + + /* + * Iterating over all the pipes which are either invalid but connected + * or connected but not configured by AP. + */ + for (pipe_idx = 0; pipe_idx < ipa_ctx->ipa_num_pipes; pipe_idx++) { + if (!ipa_ctx->ep[pipe_idx].valid || + ipa_ctx->ep[pipe_idx].skip_ep_cfg) { + /* + * Need to point v4 and v6 fltr tables to an empty + * table + */ + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = + ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + 8 + pipe_idx * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = + ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + 8 + pipe_idx * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + } + + /* Need to point v4/v6 modem routing tables to an empty table */ + for (index = IPA_MEM_PART(v4_modem_rt_index_lo); + index <= IPA_MEM_PART(v4_modem_rt_index_hi); + index++) { + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_ofst) + index * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + + for (index = IPA_MEM_PART(v6_modem_rt_index_lo); + index <= IPA_MEM_PART(v6_modem_rt_index_hi); + index++) { + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_ofst) + index * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + + retval = ipa_send_cmd(num_cmds, desc); + if (retval) { + IPAERR("failed to send immediate command (error %d)\n", retval); + retval = -EFAULT; + } + + kfree(cmd); + +bail_desc: + kfree(desc); + +bail_dma: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + return retval; +} + +static void ipa_q6_disable_agg_reg(struct ipa_register_write *reg_write, + int ep_idx) +{ + reg_write->skip_pipeline_clear = 0; + + reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(ep_idx); + reg_write->value = + (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + reg_write->value_mask = + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + + reg_write->value |= + ((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) << + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT); + reg_write->value_mask |= + ((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK << + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT)); +} + +static int ipa_q6_set_ex_path_dis_agg(void) +{ + int ep_idx; + int client_idx; + struct ipa_desc *desc; + int num_descs = 0; + int index; + struct ipa_register_write *reg_write; + int retval; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc), + GFP_KERNEL); + if (!desc) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + + /* Set the exception path to AP */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + if (ipa_ctx->ep[ep_idx].valid && + ipa_ctx->ep[ep_idx].skip_ep_cfg) { + ipa_assert_on(num_descs >= ipa_ctx->ipa_num_pipes); + reg_write = kzalloc(sizeof(*reg_write), flag); + + if (!reg_write) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + reg_write->skip_pipeline_clear = 0; + reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx); + reg_write->value = + (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) & + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) << + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT; + reg_write->value_mask = + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK << + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT; + + desc[num_descs].opcode = IPA_REGISTER_WRITE; + desc[num_descs].pyld = reg_write; + desc[num_descs].len = sizeof(*reg_write); + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa_free_buffer; + desc[num_descs].user1 = reg_write; + num_descs++; + } + } + + /* Disable AGGR on IPA->Q6 pipes */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) || + IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) { + reg_write = kzalloc(sizeof(*reg_write), flag); + + if (!reg_write) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + + ipa_q6_disable_agg_reg(reg_write, ep_idx); + + desc[num_descs].opcode = IPA_REGISTER_WRITE; + desc[num_descs].pyld = reg_write; + desc[num_descs].len = sizeof(*reg_write); + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa_free_buffer; + desc[num_descs].user1 = reg_write; + num_descs++; + } + } + + /* Will wait 150msecs for IPA tag process completion */ + retval = ipa_tag_process(desc, num_descs, + msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT)); + if (retval) { + IPAERR("TAG process failed! (error %d)\n", retval); + /* For timeout error ipa_free_buffer cb will free user1 */ + if (retval != -ETIME) { + for (index = 0; index < num_descs; index++) + kfree(desc[index].user1); + retval = -EINVAL; + } + } + + kfree(desc); + + return retval; +} + +/** + * ipa_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration + * in IPA HW before modem shutdown. This is performed in + * case of SSR. + * + * Return codes: + * 0: success + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ +int ipa_q6_pre_shutdown_cleanup(void) +{ + /* If uC has notified the APPS upon a ZIP engine error, + * APPS need to assert (This is a non recoverable error). + */ + if (ipa_ctx->uc_ctx.uc_zip_error) + ipa_assert(); + + IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6"); + + /* + * Do not delay Q6 pipes here. This may result in IPA reading a + * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this + * situation IPA will be remain locked as the DMA_TASK with unlock + * bit will not be read by IPA as pipe delay is enabled. IPA uC will + * wait for pipe to be empty before issuing a BAM pipe reset. + */ + + if (ipa_q6_monitor_holb_mitigation(false)) { + IPAERR("Failed to disable HOLB monitroing on Q6 pipes\n"); + ipa_assert(); + } + + if (ipa_q6_avoid_holb(false)) { + IPAERR("Failed to set HOLB on Q6 pipes\n"); + ipa_assert(); + } + if (ipa_q6_clean_q6_tables()) { + IPAERR("Failed to clean Q6 tables\n"); + ipa_assert(); + } + if (ipa_q6_set_ex_path_dis_agg()) { + IPAERR("Failed to disable aggregation on Q6 pipes\n"); + ipa_assert(); + } + + ipa_ctx->q6_proxy_clk_vote_valid = true; + return 0; +} + +/** + * ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes + * in IPA HW after modem shutdown. This is performed + * in case of SSR. + * + * Return codes: + * 0: success + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ +int ipa_q6_post_shutdown_cleanup(void) +{ + int client_idx; + int res; + + /* + * Do not delay Q6 pipes here. This may result in IPA reading a + * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this + * situation IPA will be remain locked as the DMA_TASK with unlock + * bit will not be read by IPA as pipe delay is enabled. IPA uC will + * wait for pipe to be empty before issuing a BAM pipe reset. + */ + + if (ipa_q6_avoid_holb(true)) { + IPAERR("Failed to set HOLB on Q6 ZIP pipes\n"); + ipa_assert(); + } + + if (!ipa_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded, won't reset Q6 pipes\n"); + return 0; + } + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) + if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) || + IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) || + IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) || + IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) { + res = ipa_uc_reset_pipe(client_idx); + if (res) + ipa_assert(); + } + return 0; +} + +int _ipa_init_sram_v2(void) +{ + void __iomem *ipa_sram_mmio; + unsigned long phys_addr; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; + struct ipa_desc desc = {0}; + struct ipa_mem_buffer mem; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = 0; + + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0( + ipa_ctx->smem_restricted_bytes / 4); + + ipa_sram_mmio = ioremap(phys_addr, + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + +#define IPA_SRAM_SET(ofst, val) iowrite32(val, ipa_sram_mmio + (ofst - 4)) + + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(uc_info_ofst), IPA_MEM_CANARY_VAL); + + + iounmap(ipa_sram_mmio); + + mem.size = IPA_STATUS_CLEAR_SIZE; + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->size = mem.size; + cmd->system_addr = mem.phys_base; + cmd->local_addr = IPA_STATUS_CLEAR_OFST; + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc.type = IPA_IMM_CMD_DESC; + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_sram_v2_5(void) +{ + void __iomem *ipa_sram_mmio; + unsigned long phys_addr; + + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_SW_FIRST_v2_5; + + ipa_sram_mmio = ioremap(phys_addr, + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + +#define IPA_SRAM_SET(ofst, val) iowrite32(val, ipa_sram_mmio + (ofst - 4)) + + IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4, + IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(end_ofst), IPA_MEM_CANARY_VAL); + + iounmap(ipa_sram_mmio); + + return 0; +} + +static inline void ipa_sram_set_canary(void __iomem *sram_mmio, int offset) +{ + /* Set 4 bytes of CANARY before the offset */ + iowrite32(IPA_MEM_CANARY_VAL, sram_mmio + (offset - 4)); +} + +int _ipa_init_sram_v2_6L(void) +{ + void __iomem *ipa_sram_mmio; + unsigned long phys_addr; + + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_SW_FIRST_v2_5; + + ipa_sram_mmio = ioremap(phys_addr, + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + /* Consult with ipa_ram_mmap.h on the location of the CANARY values */ + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_comp_decomp_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_comp_decomp_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst)); + + iounmap(ipa_sram_mmio); + + return 0; +} + +int _ipa_init_hdr_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_hdr_init_local *cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = 0; + + mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("Failed to alloc header init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->hdr_table_src_addr = mem.phys_base; + cmd->size_hdr_table = mem.size; + cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_ofst); + + desc.opcode = IPA_HDR_INIT_LOCAL; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hdr_init_local); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_hdr_v2_5(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_hdr_init_local *cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("Failed to alloc header init command object\n"); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + return -ENOMEM; + } + + cmd->hdr_table_src_addr = mem.phys_base; + cmd->size_hdr_table = mem.size; + cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_ofst); + + desc.opcode = IPA_HDR_INIT_LOCAL; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hdr_init_local); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + kfree(cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + return -EFAULT; + } + + kfree(cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) + + IPA_MEM_PART(apps_hdr_proc_ctx_size); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + memset(&desc, 0, sizeof(desc)); + + dma_cmd = kzalloc(sizeof(*dma_cmd), flag); + if (dma_cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + dma_free_coherent(ipa_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -ENOMEM; + } + + dma_cmd->system_addr = mem.phys_base; + dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_proc_ctx_ofst); + dma_cmd->size = mem.size; + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = (void *)dma_cmd; + desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + kfree(dma_cmd); + dma_free_coherent(ipa_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -EFAULT; + } + + ipa_write_reg(ipa_ctx->mmio, + IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST, + dma_cmd->local_addr); + + kfree(dma_cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + return 0; +} + +int _ipa_init_hdr_v2_6L(void) +{ + /* Same implementation as IPAv2 */ + return _ipa_init_hdr_v2(); +} + +int _ipa_init_rt4_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v4_routing_init *v4_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v4_modem_rt_index_lo); + i <= IPA_MEM_PART(v4_modem_rt_index_hi); + i++) + ipa_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i); + IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]); + + mem.size = IPA_MEM_PART(v4_rt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + for (i = 0; i < IPA_MEM_PART(v4_num_index); i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v4_cmd = kzalloc(sizeof(*v4_cmd), flag); + if (v4_cmd == NULL) { + IPAERR("Failed to alloc v4 routing init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V4_ROUTING_INIT; + v4_cmd->ipv4_rules_addr = mem.phys_base; + v4_cmd->size_ipv4_rules = mem.size; + v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_ofst); + IPADBG("putting Routing IPv4 rules to phys 0x%x", + v4_cmd->ipv4_addr); + + desc.pyld = (void *)v4_cmd; + desc.len = sizeof(struct ipa_ip_v4_routing_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v4_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_rt6_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v6_routing_init *v6_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v6_modem_rt_index_lo); + i <= IPA_MEM_PART(v6_modem_rt_index_hi); + i++) + ipa_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i); + IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]); + + mem.size = IPA_MEM_PART(v6_rt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + for (i = 0; i < IPA_MEM_PART(v6_num_index); i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v6_cmd = kzalloc(sizeof(*v6_cmd), flag); + if (v6_cmd == NULL) { + IPAERR("Failed to alloc v6 routing init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V6_ROUTING_INIT; + v6_cmd->ipv6_rules_addr = mem.phys_base; + v6_cmd->size_ipv6_rules = mem.size; + v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_ofst); + IPADBG("putting Routing IPv6 rules to phys 0x%x", + v6_cmd->ipv6_addr); + + desc.pyld = (void *)v6_cmd; + desc.len = sizeof(struct ipa_ip_v6_routing_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v6_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_flt4_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v4_filter_init *v4_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + mem.size = IPA_MEM_PART(v4_flt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + + *entry = ((0xFFFFF << 1) | 0x1); + entry++; + + for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v4_cmd = kzalloc(sizeof(*v4_cmd), flag); + if (v4_cmd == NULL) { + IPAERR("Failed to alloc v4 fliter init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V4_FILTER_INIT; + v4_cmd->ipv4_rules_addr = mem.phys_base; + v4_cmd->size_ipv4_rules = mem.size; + v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst); + IPADBG("putting Filtering IPv4 rules to phys 0x%x", + v4_cmd->ipv4_addr); + + desc.pyld = (void *)v4_cmd; + desc.len = sizeof(struct ipa_ip_v4_filter_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v4_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_flt6_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v6_filter_init *v6_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + mem.size = IPA_MEM_PART(v6_flt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + + *entry = (0xFFFFF << 1) | 0x1; + entry++; + + for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v6_cmd = kzalloc(sizeof(*v6_cmd), flag); + if (v6_cmd == NULL) { + IPAERR("Failed to alloc v6 fliter init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V6_FILTER_INIT; + v6_cmd->ipv6_rules_addr = mem.phys_base; + v6_cmd->size_ipv6_rules = mem.size; + v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst); + IPADBG("putting Filtering IPv6 rules to phys 0x%x", + v6_cmd->ipv6_addr); + + desc.pyld = (void *)v6_cmd; + desc.len = sizeof(struct ipa_ip_v6_filter_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v6_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +static int ipa_setup_apps_pipes(void) +{ + struct ipa_sys_connect_params sys_in; + int result = 0; + + /* CMD OUT (A5->IPA) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_CMD_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS; + sys_in.skip_ep_cfg = true; + if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) { + IPAERR(":setup sys pipe failed.\n"); + result = -EPERM; + goto fail_cmd; + } + IPADBG("Apps to IPA cmd pipe is connected\n"); + + ipa_ctx->ctrl->ipa_init_sram(); + IPADBG("SRAM initialized\n"); + + ipa_ctx->ctrl->ipa_init_hdr(); + IPADBG("HDR initialized\n"); + + ipa_ctx->ctrl->ipa_init_rt4(); + IPADBG("V4 RT initialized\n"); + + ipa_ctx->ctrl->ipa_init_rt6(); + IPADBG("V6 RT initialized\n"); + + ipa_ctx->ctrl->ipa_init_flt4(); + IPADBG("V4 FLT initialized\n"); + + ipa_ctx->ctrl->ipa_init_flt6(); + IPADBG("V6 FLT initialized\n"); + + if (ipa_setup_exception_path()) { + IPAERR(":fail to setup excp path\n"); + result = -EPERM; + goto fail_schedule_delayed_work; + } + IPADBG("Exception path was successfully set"); + + if (ipa_setup_dflt_rt_tables()) { + IPAERR(":fail to setup dflt routes\n"); + result = -EPERM; + goto fail_schedule_delayed_work; + } + IPADBG("default routing was set\n"); + + /* LAN IN (IPA->A5) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_CONS; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { + sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1; + sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_A5_MUX_HEADER_LENGTH; + } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + sys_in.notify = ipa_lan_rx_cb; + sys_in.priv = NULL; + sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH; + sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD; + sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; + sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL; + } else { + WARN_ON(1); + } + + /** + * ipa_lan_rx_cb() intended to notify the source EP about packet + * being received on the LAN_CONS via calling the source EP call-back. + * There could be a race condition with calling this call-back. Other + * thread may nullify it - e.g. on EP disconnect. + * This lock intended to protect the access to the source EP call-back + */ + spin_lock_init(&ipa_ctx->disconnect_lock); + if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) { + IPAERR(":setup sys pipe failed.\n"); + result = -EPERM; + goto fail_schedule_delayed_work; + } + + /* LAN-WAN OUT (A5->IPA) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD; + sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) { + IPAERR(":setup sys pipe failed.\n"); + result = -EPERM; + goto fail_data_out; + } + + return 0; + +fail_data_out: + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in); +fail_schedule_delayed_work: + if (ipa_ctx->dflt_v6_rt_rule_hdl) + __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl); + if (ipa_ctx->dflt_v4_rt_rule_hdl) + __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl); + if (ipa_ctx->excp_hdr_hdl) + __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false); + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd); +fail_cmd: + return result; +} + +static void ipa_teardown_apps_pipes(void) +{ + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out); + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in); + __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl); + __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl); + __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false); + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd); +} + +#ifdef CONFIG_COMPAT +static long compat_ipa_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int retval = 0; + struct ipa_ioc_nat_alloc_mem32 nat_mem32; + struct ipa_ioc_nat_alloc_mem nat_mem; + + switch (cmd) { + case IPA_IOC_ADD_HDR32: + cmd = IPA_IOC_ADD_HDR; + break; + case IPA_IOC_DEL_HDR32: + cmd = IPA_IOC_DEL_HDR; + break; + case IPA_IOC_ADD_RT_RULE32: + cmd = IPA_IOC_ADD_RT_RULE; + break; + case IPA_IOC_DEL_RT_RULE32: + cmd = IPA_IOC_DEL_RT_RULE; + break; + case IPA_IOC_ADD_FLT_RULE32: + cmd = IPA_IOC_ADD_FLT_RULE; + break; + case IPA_IOC_DEL_FLT_RULE32: + cmd = IPA_IOC_DEL_FLT_RULE; + break; + case IPA_IOC_GET_RT_TBL32: + cmd = IPA_IOC_GET_RT_TBL; + break; + case IPA_IOC_COPY_HDR32: + cmd = IPA_IOC_COPY_HDR; + break; + case IPA_IOC_QUERY_INTF32: + cmd = IPA_IOC_QUERY_INTF; + break; + case IPA_IOC_QUERY_INTF_TX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_TX_PROPS; + break; + case IPA_IOC_QUERY_INTF_RX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_RX_PROPS; + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS32: + cmd = IPA_IOC_QUERY_INTF_EXT_PROPS; + break; + case IPA_IOC_GET_HDR32: + cmd = IPA_IOC_GET_HDR; + break; + case IPA_IOC_ALLOC_NAT_MEM32: + if (copy_from_user(&nat_mem32, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_alloc_mem32))) { + retval = -EFAULT; + goto ret; + } + memcpy(nat_mem.dev_name, nat_mem32.dev_name, + IPA_RESOURCE_NAME_MAX); + nat_mem.size = (size_t)nat_mem32.size; + nat_mem.offset = (off_t)nat_mem32.offset; + + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa2_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + goto ret; + } + nat_mem32.offset = (compat_off_t)nat_mem.offset; + if (copy_to_user((void __user *)arg, (u8 *)&nat_mem32, + sizeof(struct ipa_ioc_nat_alloc_mem32))) { + retval = -EFAULT; + } +ret: + return retval; + case IPA_IOC_V4_INIT_NAT32: + cmd = IPA_IOC_V4_INIT_NAT; + break; + case IPA_IOC_NAT_DMA32: + cmd = IPA_IOC_NAT_DMA; + break; + case IPA_IOC_V4_DEL_NAT32: + cmd = IPA_IOC_V4_DEL_NAT; + break; + case IPA_IOC_GET_NAT_OFFSET32: + cmd = IPA_IOC_GET_NAT_OFFSET; + break; + case IPA_IOC_PULL_MSG32: + cmd = IPA_IOC_PULL_MSG; + break; + case IPA_IOC_RM_ADD_DEPENDENCY32: + cmd = IPA_IOC_RM_ADD_DEPENDENCY; + break; + case IPA_IOC_RM_DEL_DEPENDENCY32: + cmd = IPA_IOC_RM_DEL_DEPENDENCY; + break; + case IPA_IOC_GENERATE_FLT_EQ32: + cmd = IPA_IOC_GENERATE_FLT_EQ; + break; + case IPA_IOC_QUERY_RT_TBL_INDEX32: + cmd = IPA_IOC_QUERY_RT_TBL_INDEX; + break; + case IPA_IOC_WRITE_QMAPID32: + cmd = IPA_IOC_WRITE_QMAPID; + break; + case IPA_IOC_MDFY_FLT_RULE32: + cmd = IPA_IOC_MDFY_FLT_RULE; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL; + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32: + cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED; + break; + case IPA_IOC_MDFY_RT_RULE32: + cmd = IPA_IOC_MDFY_RT_RULE; + break; + case IPA_IOC_COMMIT_HDR: + case IPA_IOC_RESET_HDR: + case IPA_IOC_COMMIT_RT: + case IPA_IOC_RESET_RT: + case IPA_IOC_COMMIT_FLT: + case IPA_IOC_RESET_FLT: + case IPA_IOC_DUMP: + case IPA_IOC_PUT_RT_TBL: + case IPA_IOC_PUT_HDR: + case IPA_IOC_SET_FLT: + case IPA_IOC_QUERY_EP_MAPPING: + break; + default: + return -ENOIOCTLCMD; + } + return ipa_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static const struct file_operations ipa_drv_fops = { + .owner = THIS_MODULE, + .open = ipa_open, + .read = ipa_read, + .unlocked_ioctl = ipa_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ipa_ioctl, +#endif +}; + +static int ipa_get_clks(struct device *dev) +{ + ipa_clk = clk_get(dev, "core_clk"); + if (IS_ERR(ipa_clk)) { + if (ipa_clk != ERR_PTR(-EPROBE_DEFER)) + IPAERR("fail to get ipa clk\n"); + return PTR_ERR(ipa_clk); + } + + if (smmu_info.present && smmu_info.arm_smmu) { + smmu_clk = clk_get(dev, "smmu_clk"); + if (IS_ERR(smmu_clk)) { + if (smmu_clk != ERR_PTR(-EPROBE_DEFER)) + IPAERR("fail to get smmu clk\n"); + return PTR_ERR(smmu_clk); + } + + if (clk_get_rate(smmu_clk) == 0) { + long rate = clk_round_rate(smmu_clk, 1000); + + clk_set_rate(smmu_clk, rate); + } + } + + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) { + ipa_cnoc_clk = clk_get(dev, "iface_clk"); + if (IS_ERR(ipa_cnoc_clk)) { + ipa_cnoc_clk = NULL; + IPAERR("fail to get cnoc clk\n"); + return -ENODEV; + } + + ipa_clk_src = clk_get(dev, "core_src_clk"); + if (IS_ERR(ipa_clk_src)) { + ipa_clk_src = NULL; + IPAERR("fail to get ipa clk src\n"); + return -ENODEV; + } + + sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk"); + if (IS_ERR(sys_noc_ipa_axi_clk)) { + sys_noc_ipa_axi_clk = NULL; + IPAERR("fail to get sys_noc_ipa_axi clk\n"); + return -ENODEV; + } + + ipa_inactivity_clk = clk_get(dev, "inactivity_clk"); + if (IS_ERR(ipa_inactivity_clk)) { + ipa_inactivity_clk = NULL; + IPAERR("fail to get inactivity clk\n"); + return -ENODEV; + } + } + + return 0; +} + +void _ipa_enable_clks_v2_0(void) +{ + IPADBG_LOW("enabling gcc_ipa_clk\n"); + if (ipa_clk) { + clk_prepare(ipa_clk); + clk_enable(ipa_clk); + IPADBG_LOW("curr_ipa_clk_rate=%d", ipa_ctx->curr_ipa_clk_rate); + clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate); + ipa_uc_notify_clk_state(true); + } else { + WARN_ON(1); + } + + if (smmu_clk) + clk_prepare_enable(smmu_clk); + /* Enable the BAM IRQ. */ + ipa_sps_irq_control_all(true); + ipa_suspend_apps_pipes(false); +} + +void _ipa_enable_clks_v1_1(void) +{ + + if (ipa_cnoc_clk) { + clk_prepare(ipa_cnoc_clk); + clk_enable(ipa_cnoc_clk); + clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE); + } else { + WARN_ON(1); + } + + if (ipa_clk_src) + clk_set_rate(ipa_clk_src, + ipa_ctx->curr_ipa_clk_rate); + else + WARN_ON(1); + + if (ipa_clk) + clk_prepare(ipa_clk); + else + WARN_ON(1); + + if (sys_noc_ipa_axi_clk) + clk_prepare(sys_noc_ipa_axi_clk); + else + WARN_ON(1); + + if (ipa_inactivity_clk) + clk_prepare(ipa_inactivity_clk); + else + WARN_ON(1); + + if (ipa_clk) + clk_enable(ipa_clk); + else + WARN_ON(1); + + if (sys_noc_ipa_axi_clk) + clk_enable(sys_noc_ipa_axi_clk); + else + WARN_ON(1); + + if (ipa_inactivity_clk) + clk_enable(ipa_inactivity_clk); + else + WARN_ON(1); + +} + +static unsigned int ipa_get_bus_vote(void) +{ + unsigned int idx = 1; + + if (ipa_ctx->curr_ipa_clk_rate == ipa_ctx->ctrl->ipa_clk_rate_svs) { + idx = 1; + } else if (ipa_ctx->curr_ipa_clk_rate == + ipa_ctx->ctrl->ipa_clk_rate_nominal) { + if (ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2) + idx = 1; + else + idx = 2; + } else if (ipa_ctx->curr_ipa_clk_rate == + ipa_ctx->ctrl->ipa_clk_rate_turbo) { + idx = ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1; + } else { + WARN_ON(1); + } + + IPADBG("curr %d idx %d\n", ipa_ctx->curr_ipa_clk_rate, idx); + + return idx; +} + +/** + * ipa_enable_clks() - Turn on IPA clocks + * + * Return codes: + * None + */ +void ipa_enable_clks(void) +{ + IPADBG("enabling IPA clocks and bus voting\n"); + + ipa_ctx->ctrl->ipa_enable_clks(); + + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl, + ipa_get_bus_vote())) + WARN_ON(1); +} + +void _ipa_disable_clks_v1_1(void) +{ + + if (ipa_inactivity_clk) + clk_disable_unprepare(ipa_inactivity_clk); + else + WARN_ON(1); + + if (sys_noc_ipa_axi_clk) + clk_disable_unprepare(sys_noc_ipa_axi_clk); + else + WARN_ON(1); + + if (ipa_clk) + clk_disable_unprepare(ipa_clk); + else + WARN_ON(1); + + if (ipa_cnoc_clk) + clk_disable_unprepare(ipa_cnoc_clk); + else + WARN_ON(1); + +} + +void _ipa_disable_clks_v2_0(void) +{ + IPADBG_LOW("disabling gcc_ipa_clk\n"); + ipa_suspend_apps_pipes(true); + ipa_sps_irq_control_all(false); + ipa_uc_notify_clk_state(false); + if (ipa_clk) + clk_disable_unprepare(ipa_clk); + else + WARN_ON(1); + + if (smmu_clk) + clk_disable_unprepare(smmu_clk); +} + +/** + * ipa_disable_clks() - Turn off IPA clocks + * + * Return codes: + * None + */ +void ipa_disable_clks(void) +{ + IPADBG_LOW("disabling IPA clocks and bus voting\n"); + + ipa_ctx->ctrl->ipa_disable_clks(); + + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl, + 0)) + WARN_ON(1); +} + +/** + * ipa_start_tag_process() - Send TAG packet and wait for it to come back + * + * This function is called prior to clock gating when active client counter + * is 1. TAG process ensures that there are no packets inside IPA HW that + * were not submitted to peer's BAM. During TAG process all aggregation frames + * are (force) closed. + * + * Return codes: + * None + */ +static void ipa_start_tag_process(struct work_struct *work) +{ + int res; + + IPADBG("starting TAG process\n"); + /* close aggregation frames on all pipes */ + res = ipa_tag_aggr_force_close(-1); + if (res) + IPAERR("ipa_tag_aggr_force_close failed %d\n", res); + + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS"); + + IPADBG("TAG process done\n"); +} + +/** + * ipa2_active_clients_log_mod() - Log a modification in the active clients + * reference count + * + * This method logs any modification in the active clients reference count: + * It logs the modification in the circular history buffer + * It logs the modification in the hash table - looking for an entry, + * creating one if needed and deleting one if needed. + * + * @id: ipa2_active client logging info struct to hold the log information + * @inc: a boolean variable to indicate whether the modification is an increase + * or decrease + * @int_ctx: a boolean variable to indicate whether this call is being made from + * an interrupt context and therefore should allocate GFP_ATOMIC memory + * + * Method process: + * - Hash the unique identifier string + * - Find the hash in the table + * 1)If found, increase or decrease the reference count + * 2)If not found, allocate a new hash table entry struct and initialize it + * - Remove and deallocate unneeded data structure + * - Log the call in the circular history buffer (unless it is a simple call) + */ +static void ipa2_active_clients_log_mod( + struct ipa_active_client_logging_info *id, + bool inc, bool int_ctx) +{ + char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]; + unsigned long long t; + unsigned long nanosec_rem; + struct ipa2_active_client_htable_entry *hentry; + struct ipa2_active_client_htable_entry *hfound; + u32 hkey; + char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN]; + + hfound = NULL; + memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN); + strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN); + hkey = jhash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN, + 0); + hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable, + hentry, list, hkey) { + if (!strcmp(hentry->id_string, id->id_string)) { + hentry->count = hentry->count + (inc ? 1 : -1); + hfound = hentry; + } + } + if (hfound == NULL) { + hentry = NULL; + hentry = kzalloc(sizeof( + struct ipa2_active_client_htable_entry), + int_ctx ? GFP_ATOMIC : GFP_KERNEL); + if (hentry == NULL) { + IPAERR("failed allocating active clients hash entry"); + return; + } + hentry->type = id->type; + strlcpy(hentry->id_string, id->id_string, + IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN); + INIT_HLIST_NODE(&hentry->list); + hentry->count = inc ? 1 : -1; + hash_add(ipa_ctx->ipa2_active_clients_logging.htable, + &hentry->list, hkey); + } else if (hfound->count == 0) { + hash_del(&hfound->list); + kfree(hfound); + } + + if (id->type != SIMPLE) { + t = local_clock(); + nanosec_rem = do_div(t, 1000000000) / 1000; + snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN, + inc ? "[%5lu.%06lu] ^ %s, %s: %d" : + "[%5lu.%06lu] v %s, %s: %d", + (unsigned long)t, nanosec_rem, + id->id_string, id->file, id->line); + ipa2_active_clients_log_insert(temp_str); + } +} + +void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa2_active_clients_log_mod(id, false, int_ctx); +} + +void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa2_active_clients_log_mod(id, true, int_ctx); +} + +/** + * ipa_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA2_ACTIVE_CLIENTS_INC_XXXX(); + * + * Return codes: + * None + */ +void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + ipa_active_clients_lock(); + ipa2_active_clients_log_inc(id, false); + ipa_ctx->ipa_active_clients.cnt++; + if (ipa_ctx->ipa_active_clients.cnt == 1) + ipa_enable_clks(); + IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); + ipa_active_clients_unlock(); +} + +/** + * ipa_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done. Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * + * + * Return codes: 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id) +{ + int res = 0; + unsigned long flags; + + if (ipa_active_clients_trylock(&flags) == 0) + return -EPERM; + + if (ipa_ctx->ipa_active_clients.cnt == 0) { + res = -EPERM; + goto bail; + } + + ipa2_active_clients_log_inc(id, true); + + ipa_ctx->ipa_active_clients.cnt++; + IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); +bail: + ipa_active_clients_trylock_unlock(&flags); + + return res; +} + +/** + * ipa_dec_client_disable_clks() - Decrease active clients counter + * + * In case that there are no active clients this function also starts + * TAG process. When TAG progress ends ipa clocks will be gated. + * start_tag_process_again flag is set during this function to signal TAG + * process to start again as there was another client that may send data to ipa + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA2_ACTIVE_CLIENTS_DEC_XXXX(); + * + * Return codes: + * None + */ +void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + struct ipa_active_client_logging_info log_info; + + ipa_active_clients_lock(); + ipa2_active_clients_log_dec(id, false); + ipa_ctx->ipa_active_clients.cnt--; + IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); + if (ipa_ctx->ipa_active_clients.cnt == 0) { + if (ipa_ctx->tag_process_before_gating) { + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, + "TAG_PROCESS"); + ipa2_active_clients_log_inc(&log_info, false); + ipa_ctx->tag_process_before_gating = false; + /* + * When TAG process ends, active clients will be + * decreased + */ + ipa_ctx->ipa_active_clients.cnt = 1; + queue_work(ipa_ctx->power_mgmt_wq, &ipa_tag_work); + } else { + ipa_disable_clks(); + } + } + ipa_active_clients_unlock(); +} + +/** + * ipa_inc_acquire_wakelock() - Increase active clients counter, and + * acquire wakelock if necessary + * + * Return codes: + * None + */ +void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client) +{ + unsigned long flags; + + if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX) + return; + spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); + if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client)) + IPAERR("client enum %d mask already set. ref cnt = %d\n", + ref_client, ipa_ctx->wakelock_ref_cnt.cnt); + ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client); + if (ipa_ctx->wakelock_ref_cnt.cnt) + __pm_stay_awake(ipa_ctx->w_lock); + IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n", + ipa_ctx->wakelock_ref_cnt.cnt, ref_client); + spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); +} + +/** + * ipa_dec_release_wakelock() - Decrease active clients counter + * + * In case if the ref count is 0, release the wakelock. + * + * Return codes: + * None + */ +void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client) +{ + unsigned long flags; + + if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX) + return; + spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); + ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client); + IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n", + ipa_ctx->wakelock_ref_cnt.cnt, ref_client); + if (ipa_ctx->wakelock_ref_cnt.cnt == 0) + __pm_relax(ipa_ctx->w_lock); + spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); +} + +static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res) +{ + void __iomem *ipa_bam_mmio; + int reg_val; + int retval = 0; + + ipa_bam_mmio = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST, + IPA_BAM_REMAP_SIZE); + if (!ipa_bam_mmio) + return -ENOMEM; + switch (ipa_ctx->ipa_hw_type) { + case IPA_HW_v1_1: + reg_val = IPA_BAM_CNFG_BITS_VALv1_1; + break; + case IPA_HW_v2_0: + case IPA_HW_v2_5: + case IPA_HW_v2_6L: + reg_val = IPA_BAM_CNFG_BITS_VALv2_0; + break; + default: + retval = -EPERM; + goto fail; + } + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5) + ipa_write_reg(ipa_bam_mmio, IPA_BAM_CNFG_BITS_OFST, reg_val); +fail: + iounmap(ipa_bam_mmio); + + return retval; +} + +int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + enum ipa_voltage_level needed_voltage; + u32 clk_rate; + + IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u", + floor_voltage, bandwidth_mbps); + + if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED || + floor_voltage >= IPA_VOLTAGE_MAX) { + IPAERR("bad voltage\n"); + return -EINVAL; + } + + if (ipa_ctx->enable_clock_scaling) { + IPADBG_LOW("Clock scaling is enabled\n"); + if (bandwidth_mbps >= + ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo) + needed_voltage = IPA_VOLTAGE_TURBO; + else if (bandwidth_mbps >= + ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal) + needed_voltage = IPA_VOLTAGE_NOMINAL; + else + needed_voltage = IPA_VOLTAGE_SVS; + } else { + IPADBG_LOW("Clock scaling is disabled\n"); + needed_voltage = IPA_VOLTAGE_NOMINAL; + } + + needed_voltage = max(needed_voltage, floor_voltage); + switch (needed_voltage) { + case IPA_VOLTAGE_SVS: + clk_rate = ipa_ctx->ctrl->ipa_clk_rate_svs; + break; + case IPA_VOLTAGE_NOMINAL: + clk_rate = ipa_ctx->ctrl->ipa_clk_rate_nominal; + break; + case IPA_VOLTAGE_TURBO: + clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo; + break; + default: + IPAERR("bad voltage\n"); + WARN_ON(1); + return -EFAULT; + } + + if (clk_rate == ipa_ctx->curr_ipa_clk_rate) { + IPADBG_LOW("Same voltage\n"); + return 0; + } + + ipa_active_clients_lock(); + ipa_ctx->curr_ipa_clk_rate = clk_rate; + IPADBG_LOW("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate); + if (ipa_ctx->ipa_active_clients.cnt > 0) { + struct ipa_active_client_logging_info log_info; + + /* + * clk_set_rate should be called with unlocked lock to allow + * clients to get a reference to IPA clock synchronously. + * Hold a reference to IPA clock here to make sure clock + * state does not change during set_rate. + */ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + ipa_ctx->ipa_active_clients.cnt++; + ipa2_active_clients_log_inc(&log_info, false); + ipa_active_clients_unlock(); + + clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate); + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + if (msm_bus_scale_client_update_request( + ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote())) + WARN_ON(1); + /* remove the vote added here */ + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + } else { + IPADBG_LOW("clocks are gated, not setting rate\n"); + ipa_active_clients_unlock(); + } + IPADBG_LOW("Done\n"); + return 0; +} + +static int ipa_init_flt_block(void) +{ + int result = 0; + + /* + * SW workaround for Improper Filter Behavior when neither Global nor + * Pipe Rules are present => configure dummy global filter rule + * always which results in a miss + */ + struct ipa_ioc_add_flt_rule *rules; + struct ipa_flt_rule_add *rule; + struct ipa_ioc_get_rt_tbl rt_lookup; + enum ipa_ip_type ip; + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v1_1) { + size_t sz = sizeof(struct ipa_ioc_add_flt_rule) + + sizeof(struct ipa_flt_rule_add); + + rules = kmalloc(sz, GFP_KERNEL); + if (rules == NULL) { + IPAERR("fail to alloc mem for dummy filter rule\n"); + return -ENOMEM; + } + + IPADBG("Adding global rules for IPv4 and IPv6"); + for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) { + memset(&rt_lookup, 0, + sizeof(struct ipa_ioc_get_rt_tbl)); + rt_lookup.ip = ip; + strlcpy(rt_lookup.name, IPA_DFLT_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + ipa2_get_rt_tbl(&rt_lookup); + ipa2_put_rt_tbl(rt_lookup.hdl); + + memset(rules, 0, sz); + rule = &rules->rules[0]; + rules->commit = 1; + rules->ip = ip; + rules->global = 1; + rules->num_rules = 1; + rule->at_rear = 1; + if (ip == IPA_IP_v4) { + rule->rule.attrib.attrib_mask = + IPA_FLT_PROTOCOL | IPA_FLT_DST_ADDR; + rule->rule.attrib.u.v4.protocol = + IPA_INVALID_L4_PROTOCOL; + rule->rule.attrib.u.v4.dst_addr_mask = ~0; + rule->rule.attrib.u.v4.dst_addr = ~0; + } else if (ip == IPA_IP_v6) { + rule->rule.attrib.attrib_mask = + IPA_FLT_NEXT_HDR | IPA_FLT_DST_ADDR; + rule->rule.attrib.u.v6.next_hdr = + IPA_INVALID_L4_PROTOCOL; + rule->rule.attrib.u.v6.dst_addr_mask[0] = ~0; + rule->rule.attrib.u.v6.dst_addr_mask[1] = ~0; + rule->rule.attrib.u.v6.dst_addr_mask[2] = ~0; + rule->rule.attrib.u.v6.dst_addr_mask[3] = ~0; + rule->rule.attrib.u.v6.dst_addr[0] = ~0; + rule->rule.attrib.u.v6.dst_addr[1] = ~0; + rule->rule.attrib.u.v6.dst_addr[2] = ~0; + rule->rule.attrib.u.v6.dst_addr[3] = ~0; + } else { + result = -EINVAL; + WARN_ON(1); + break; + } + rule->rule.action = IPA_PASS_TO_ROUTING; + rule->rule.rt_tbl_hdl = rt_lookup.hdl; + rule->rule.retain_hdr = true; + + if (ipa2_add_flt_rule(rules) || + rules->rules[0].status) { + + result = -EINVAL; + WARN_ON(1); + break; + } + } + kfree(rules); + } + return result; +} + +static void ipa_sps_process_irq_schedule_rel(void) +{ + queue_delayed_work(ipa_ctx->sps_power_mgmt_wq, + &ipa_sps_release_resource_work, + msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC)); +} + +/** + * ipa_suspend_handler() - Handles the suspend interrupt: + * wakes up the suspended peripheral by requesting its consumer + * @interrupt: Interrupt type + * @private_data: The client's private data + * @interrupt_data: Interrupt specific information data + */ +void ipa_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + enum ipa_rm_resource_name resource; + u32 suspend_data = + ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints; + u32 bmsk = 1; + u32 i = 0; + int res; + struct ipa_ep_cfg_holb holb_cfg; + + IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data); + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.tmr_val = 0; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if ((suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) { + if (IPA_CLIENT_IS_APPS_CONS(ipa_ctx->ep[i].client)) { + /* + * pipe will be unsuspended as part of + * enabling IPA clocks + */ + mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock); + if (!atomic_read( + &ipa_ctx->sps_pm.dec_clients) + ) { + IPA_ACTIVE_CLIENTS_INC_EP( + ipa_ctx->ep[i].client); + IPADBG("Pipes un-suspended.\n"); + IPADBG("Enter poll mode.\n"); + atomic_set( + &ipa_ctx->sps_pm.dec_clients, + 1); + /* + * acquire wake lock as long as suspend + * vote is held + */ + ipa_inc_acquire_wakelock( + IPA_WAKELOCK_REF_CLIENT_SPS); + ipa_sps_process_irq_schedule_rel(); + } + mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock); + } else { + resource = ipa2_get_rm_resource_from_ep(i); + res = ipa_rm_request_resource_with_timer( + resource); + if ((res == -EPERM) && + IPA_CLIENT_IS_CONS( + ipa_ctx->ep[i].client)) { + holb_cfg.en = 1; + res = ipa2_cfg_ep_holb_by_client( + ipa_ctx->ep[i].client, &holb_cfg); + if (res) { + IPAERR("holb en fail\n"); + IPAERR("IPAHW stall\n"); + ipa_assert(); + } + } + } + } + bmsk = bmsk << 1; + } +} + +/** + * ipa2_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ +int ipa2_restore_suspend_handler(void) +{ + int result = 0; + + result = ipa2_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ); + if (result) { + IPAERR("remove handler for suspend interrupt failed\n"); + return -EPERM; + } + + result = ipa2_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa_suspend_handler, true, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -EPERM; + } + + return result; +} + +static int apps_cons_release_resource(void) +{ + return 0; +} + +static int apps_cons_request_resource(void) +{ + return 0; +} + +static void ipa_sps_release_resource(struct work_struct *work) +{ + mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock); + /* check whether still need to decrease client usage */ + if (atomic_read(&ipa_ctx->sps_pm.dec_clients)) { + if (atomic_read(&ipa_ctx->sps_pm.eot_activity)) { + IPADBG("EOT pending Re-scheduling\n"); + ipa_sps_process_irq_schedule_rel(); + } else { + atomic_set(&ipa_ctx->sps_pm.dec_clients, 0); + ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_SPS); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE"); + } + } + atomic_set(&ipa_ctx->sps_pm.eot_activity, 0); + mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock); +} + +static int ipa_create_apps_resource(void) +{ + struct ipa_rm_create_params apps_cons_create_params; + struct ipa_rm_perf_profile profile; + int result = 0; + + memset(&apps_cons_create_params, 0, + sizeof(apps_cons_create_params)); + apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS; + apps_cons_create_params.request_resource = apps_cons_request_resource; + apps_cons_create_params.release_resource = apps_cons_release_resource; + result = ipa_rm_create_resource(&apps_cons_create_params); + if (result) { + IPAERR("ipa_rm_create_resource failed\n"); + return result; + } + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); + + return result; +} + + +/** + * ipa_init() - Initialize the IPA Driver + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * - Allocate memory for the driver context data struct + * - Initializing the ipa_ctx with: + * 1)parsed values from the dts file + * 2)parameters passed to the module initialization + * 3)read HW values(such as core memory size) + * - Map IPA core registers to CPU memory + * - Restart IPA core(HW reset) + * - Register IPA BAM to SPS driver and get a BAM handler + * - Set configuration for IPA BAM via BAM_CNFG_BITS + * - Initialize the look-aside caches(kmem_cache/slab) for filter, + * routing and IPA-tree + * - Create memory pool with 4 objects for DMA operations(each object + * is 512Bytes long), this object will be use for tx(A5->IPA) + * - Initialize lists head(routing,filter,hdr,system pipes) + * - Initialize mutexes (for ipa_ctx and NAT memory mutexes) + * - Initialize spinlocks (for list related to A5<->IPA pipes) + * - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq" + * - Initialize Red-Black-Tree(s) for handles of header,routing rule, + * routing table ,filtering rule + * - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes + * - Preparing the descriptors for System pipes + * - Initialize the filter block by committing IPV4 and IPV6 default rules + * - Create empty routing table in system memory(no committing) + * - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms + * - Create a char-device for IPA + * - Initialize IPA RM (resource manager) + */ +static int ipa_init(const struct ipa_plat_drv_res *resource_p, + struct device *ipa_dev) +{ + int result = 0; + int i; + struct sps_bam_props bam_props = { 0 }; + struct ipa_flt_tbl *flt_tbl; + struct ipa_rt_tbl_set *rset; + struct ipa_active_client_logging_info log_info; + + IPADBG("IPA Driver initialization started\n"); + + /* + * since structure alignment is implementation dependent, add test to + * avoid different and incompatible data layouts + */ + BUILD_BUG_ON(sizeof(struct ipa_hw_pkt_status) != IPA_PKT_STATUS_SIZE); + + ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL); + if (!ipa_ctx) { + IPAERR(":kzalloc err.\n"); + result = -ENOMEM; + goto fail_mem_ctx; + } + + ipa_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0); + if (ipa_ctx->logbuf == NULL) + IPADBG("failed to create IPC log, continue...\n"); + + ipa_ctx->pdev = ipa_dev; + ipa_ctx->uc_pdev = ipa_dev; + ipa_ctx->smmu_present = smmu_info.present; + if (!ipa_ctx->smmu_present) + ipa_ctx->smmu_s1_bypass = true; + else + ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass; + ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base; + ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size; + ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type; + ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode; + ipa_ctx->ipa_uc_monitor_holb = + resource_p->ipa_uc_monitor_holb; + ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge; + ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode; + ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt; + ipa_ctx->ipa_wdi2 = resource_p->ipa_wdi2; + ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size; + ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size; + ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset; + ipa_ctx->use_dma_zone = resource_p->use_dma_zone; + ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control; + + /* Setting up IPA RX Polling Timeout Seconds */ + ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec, + &ipa_ctx->ipa_rx_max_timeout_usec, + resource_p->ipa_rx_polling_sleep_msec); + + /* Setting up ipa polling iteration */ + if ((resource_p->ipa_polling_iteration >= MIN_POLLING_ITERATION) + && (resource_p->ipa_polling_iteration <= MAX_POLLING_ITERATION)) + ipa_ctx->ipa_polling_iteration = + resource_p->ipa_polling_iteration; + else + ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION; + + /* default aggregation parameters */ + ipa_ctx->aggregation_type = IPA_MBIM_16; + ipa_ctx->aggregation_byte_limit = 1; + ipa_ctx->aggregation_time_limit = 0; + ipa_ctx->ipa2_active_clients_logging.log_rdy = false; + + ipa_ctx->ctrl = kzalloc(sizeof(*ipa_ctx->ctrl), GFP_KERNEL); + if (!ipa_ctx->ctrl) { + IPAERR("memory allocation error for ctrl\n"); + result = -ENOMEM; + goto fail_mem_ctrl; + } + result = ipa_controller_static_bind(ipa_ctx->ctrl, + ipa_ctx->ipa_hw_type); + if (result) { + IPAERR("fail to static bind IPA ctrl.\n"); + result = -EFAULT; + goto fail_bind; + } + + IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n", + ipa_ctx->hdr_tbl_lcl, ipa_ctx->ip4_rt_tbl_lcl, + ipa_ctx->ip6_rt_tbl_lcl, ipa_ctx->ip4_flt_tbl_lcl, + ipa_ctx->ip6_flt_tbl_lcl); + + if (bus_scale_table) { + IPADBG("Use bus scaling info from device tree\n"); + ipa_ctx->ctrl->msm_bus_data_ptr = bus_scale_table; + } + + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) { + /* get BUS handle */ + /* Check if bus handle is already registered */ + if (!register_ipa_bus_hdl) + ipa_ctx->ipa_bus_hdl = + msm_bus_scale_register_client( + ipa_ctx->ctrl->msm_bus_data_ptr); + else + ipa_ctx->ipa_bus_hdl = register_ipa_bus_hdl; + + if (!ipa_ctx->ipa_bus_hdl) { + IPAERR("fail to register with bus mgr!\n"); + result = -EPROBE_DEFER; + bus_scale_table = NULL; + goto fail_bus_reg; + } + } else { + IPADBG("Skipping bus scaling registration on Virtual plat\n"); + } + + result = ipa2_active_clients_log_init(); + if (result) + goto fail_init_active_client; + + /* get IPA clocks */ + result = ipa_get_clks(master_dev); + if (result) + goto fail_clk; + + /* Enable ipa_ctx->enable_clock_scaling */ + ipa_ctx->enable_clock_scaling = 1; + ipa_ctx->curr_ipa_clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo; + + /* enable IPA clocks explicitly to allow the initialization */ + ipa_enable_clks(); + + /* setup IPA register access */ + ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + + ipa_ctx->ctrl->ipa_reg_base_ofst, + resource_p->ipa_mem_size); + if (!ipa_ctx->mmio) { + IPAERR(":ipa-base ioremap err.\n"); + result = -EFAULT; + goto fail_remap; + } + + result = ipa_init_hw(); + if (result) { + IPAERR(":error initializing HW.\n"); + result = -ENODEV; + goto fail_init_hw; + } + IPADBG("IPA HW initialization sequence completed"); + + ipa_ctx->ipa_num_pipes = ipa_get_num_pipes(); + ipa_ctx->ctrl->ipa_sram_read_settings(); + IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n", + ipa_ctx->smem_sz, ipa_ctx->smem_restricted_bytes); + + if (ipa_ctx->smem_reqd_sz > + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes) { + IPAERR("SW expect more core memory, needed %d, avail %d\n", + ipa_ctx->smem_reqd_sz, ipa_ctx->smem_sz - + ipa_ctx->smem_restricted_bytes); + result = -ENOMEM; + goto fail_init_hw; + } + + mutex_init(&ipa_ctx->ipa_active_clients.mutex); + spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock); + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); + ipa2_active_clients_log_inc(&log_info, false); + ipa_ctx->ipa_active_clients.cnt = 1; + + /* Create workqueues for power management */ + ipa_ctx->power_mgmt_wq = + create_singlethread_workqueue("ipa_power_mgmt"); + if (!ipa_ctx->power_mgmt_wq) { + IPAERR("failed to create power mgmt wq\n"); + result = -ENOMEM; + goto fail_init_hw; + } + + ipa_ctx->sps_power_mgmt_wq = + create_singlethread_workqueue("sps_ipa_power_mgmt"); + if (!ipa_ctx->sps_power_mgmt_wq) { + IPAERR("failed to create sps power mgmt wq\n"); + result = -ENOMEM; + goto fail_create_sps_wq; + } + + /* register IPA with SPS driver */ + bam_props.phys_addr = resource_p->bam_mem_base; + bam_props.virt_size = resource_p->bam_mem_size; + bam_props.irq = resource_p->bam_irq; + bam_props.num_pipes = ipa_ctx->ipa_num_pipes; + bam_props.summing_threshold = IPA_SUMMING_THRESHOLD; + bam_props.event_threshold = IPA_EVENT_THRESHOLD; + bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING; + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP; + if (ipa_ctx->ipa_bam_remote_mode) + bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE; + if (!ipa_ctx->smmu_s1_bypass) + bam_props.options |= SPS_BAM_SMMU_EN; + bam_props.options |= SPS_BAM_CACHED_WP; + bam_props.ee = resource_p->ee; + bam_props.ipc_loglevel = 3; + + result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle); + if (result) { + IPAERR(":bam register err.\n"); + result = -EPROBE_DEFER; + goto fail_register_bam_device; + } + IPADBG("IPA BAM is registered\n"); + + if (ipa_setup_bam_cfg(resource_p)) { + IPAERR(":bam cfg err.\n"); + result = -ENODEV; + goto fail_flt_rule_cache; + } + + /* init the lookaside cache */ + ipa_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT", + sizeof(struct ipa_flt_entry), 0, 0, NULL); + if (!ipa_ctx->flt_rule_cache) { + IPAERR(":ipa flt cache create failed\n"); + result = -ENOMEM; + goto fail_flt_rule_cache; + } + ipa_ctx->rt_rule_cache = kmem_cache_create("IPA_RT", + sizeof(struct ipa_rt_entry), 0, 0, NULL); + if (!ipa_ctx->rt_rule_cache) { + IPAERR(":ipa rt cache create failed\n"); + result = -ENOMEM; + goto fail_rt_rule_cache; + } + ipa_ctx->hdr_cache = kmem_cache_create("IPA_HDR", + sizeof(struct ipa_hdr_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_cache) { + IPAERR(":ipa hdr cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_cache; + } + ipa_ctx->hdr_offset_cache = + kmem_cache_create("IPA_HDR_OFFSET", + sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_offset_cache) { + IPAERR(":ipa hdr off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_offset_cache; + } + ipa_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX", + sizeof(struct ipa_hdr_proc_ctx_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_proc_ctx_cache) { + IPAERR(":ipa hdr proc ctx cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_cache; + } + ipa_ctx->hdr_proc_ctx_offset_cache = + kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET", + sizeof(struct ipa_hdr_proc_ctx_offset_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_proc_ctx_offset_cache) { + IPAERR(":ipa hdr proc ctx off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_offset_cache; + } + ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL", + sizeof(struct ipa_rt_tbl), 0, 0, NULL); + if (!ipa_ctx->rt_tbl_cache) { + IPAERR(":ipa rt tbl cache create failed\n"); + result = -ENOMEM; + goto fail_rt_tbl_cache; + } + ipa_ctx->tx_pkt_wrapper_cache = + kmem_cache_create("IPA_TX_PKT_WRAPPER", + sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL); + if (!ipa_ctx->tx_pkt_wrapper_cache) { + IPAERR(":ipa tx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_tx_pkt_wrapper_cache; + } + ipa_ctx->rx_pkt_wrapper_cache = + kmem_cache_create("IPA_RX_PKT_WRAPPER", + sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL); + if (!ipa_ctx->rx_pkt_wrapper_cache) { + IPAERR(":ipa rx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_rx_pkt_wrapper_cache; + } + + /* Setup DMA pool */ + ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev, + IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec), + 0, 0); + if (!ipa_ctx->dma_pool) { + IPAERR("cannot alloc DMA pool.\n"); + result = -ENOMEM; + goto fail_dma_pool; + } + + ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl; + ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl; + + /* init the various list heads */ + INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list); + INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list); + INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list); + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]); + INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list); + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i]); + INIT_LIST_HEAD( + &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list); + INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list); + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl; + + flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl; + } + + rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + + INIT_LIST_HEAD(&ipa_ctx->intf_list); + INIT_LIST_HEAD(&ipa_ctx->msg_list); + INIT_LIST_HEAD(&ipa_ctx->pull_msg_list); + init_waitqueue_head(&ipa_ctx->msg_waitq); + mutex_init(&ipa_ctx->msg_lock); + + /* store wlan client-connect-msg-list */ + INIT_LIST_HEAD(&ipa_ctx->msg_wlan_client_list); + mutex_init(&ipa_ctx->msg_wlan_client_lock); + + mutex_init(&ipa_ctx->lock); + mutex_init(&ipa_ctx->nat_mem.lock); + mutex_init(&ipa_ctx->ipa_cne_evt_lock); + + idr_init(&ipa_ctx->ipa_idr); + spin_lock_init(&ipa_ctx->idr_lock); + + /* wlan related member */ + memset(&ipa_ctx->wc_memb, 0, sizeof(ipa_ctx->wc_memb)); + spin_lock_init(&ipa_ctx->wc_memb.wlan_spinlock); + spin_lock_init(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + INIT_LIST_HEAD(&ipa_ctx->wc_memb.wlan_comm_desc_list); + /* + * setup an empty routing table in system memory, this will be used + * to delete a routing table cleanly and safely + */ + ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE; + + ipa_ctx->empty_rt_tbl_mem.base = + dma_zalloc_coherent(ipa_ctx->pdev, + ipa_ctx->empty_rt_tbl_mem.size, + &ipa_ctx->empty_rt_tbl_mem.phys_base, + GFP_KERNEL); + if (!ipa_ctx->empty_rt_tbl_mem.base) { + IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n", + ipa_ctx->empty_rt_tbl_mem.size); + result = -ENOMEM; + goto fail_apps_pipes; + } + IPADBG("empty routing table was allocated in system memory"); + + /* setup the A5-IPA pipes */ + if (ipa_setup_apps_pipes()) { + IPAERR(":failed to setup IPA-Apps pipes.\n"); + result = -ENODEV; + goto fail_empty_rt_tbl; + } + IPADBG("IPA System2Bam pipes were connected\n"); + + if (ipa_init_flt_block()) { + IPAERR("fail to setup dummy filter rules\n"); + result = -ENODEV; + goto fail_empty_rt_tbl; + } + IPADBG("filter block was set with dummy filter rules"); + + /* setup the IPA pipe mem pool */ + if (resource_p->ipa_pipe_mem_size) + ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst, + resource_p->ipa_pipe_mem_size); + + ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME); + + result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err.\n"); + result = -ENODEV; + goto fail_alloc_chrdev_region; + } + + ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num, + ipa_ctx, DRV_NAME); + if (IS_ERR(ipa_ctx->dev)) { + IPAERR(":device_create err.\n"); + result = -ENODEV; + goto fail_device_create; + } + + cdev_init(&ipa_ctx->cdev, &ipa_drv_fops); + ipa_ctx->cdev.owner = THIS_MODULE; + ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */ + + result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1); + if (result) { + IPAERR(":cdev_add err=%d\n", -result); + result = -ENODEV; + goto fail_cdev_add; + } + IPADBG("ipa cdev added successful. major:%d minor:%d\n", + MAJOR(ipa_ctx->dev_num), + MINOR(ipa_ctx->dev_num)); + + if (create_nat_device()) { + IPAERR("unable to create nat device\n"); + result = -ENODEV; + goto fail_nat_dev_add; + } + + /* Register a wakeup source. */ + ipa_ctx->w_lock = + wakeup_source_register(&ipa_pdev->dev, "IPA_WS"); + if (!ipa_ctx->w_lock) { + IPAERR("IPA wakeup source register failed\n"); + result = -ENOMEM; + goto fail_w_source_register; + } + + spin_lock_init(&ipa_ctx->wakelock_ref_cnt.spinlock); + + /* Initialize the SPS PM lock. */ + mutex_init(&ipa_ctx->sps_pm.sps_pm_lock); + + /* Initialize IPA RM (resource manager) */ + result = ipa_rm_initialize(); + if (result) { + IPAERR("RM initialization failed (%d)\n", -result); + result = -ENODEV; + goto fail_ipa_rm_init; + } + IPADBG("IPA resource manager initialized"); + + result = ipa_create_apps_resource(); + if (result) { + IPAERR("Failed to create APPS_CONS resource\n"); + result = -ENODEV; + goto fail_create_apps_resource; + } + + /*register IPA IRQ handler*/ + result = ipa_interrupts_init(resource_p->ipa_irq, 0, + master_dev); + if (result) { + IPAERR("ipa interrupts initialization failed\n"); + result = -ENODEV; + goto fail_ipa_interrupts_init; + } + + /*add handler for suspend interrupt*/ + result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa_suspend_handler, false, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -ENODEV; + goto fail_add_interrupt_handler; + } + + if (ipa_ctx->use_ipa_teth_bridge) { + /* Initialize the tethering bridge driver */ + result = teth_bridge_driver_init(); + if (result) { + IPAERR(":teth_bridge init failed (%d)\n", -result); + result = -ENODEV; + goto fail_add_interrupt_handler; + } + IPADBG("teth_bridge initialized"); + } + + ipa_debugfs_init(); + + result = ipa_uc_interface_init(); + if (result) + IPAERR(":ipa Uc interface init failed (%d)\n", -result); + else + IPADBG(":ipa Uc interface init ok\n"); + + result = ipa2_wdi_init(); + if (result) + IPAERR(":wdi init failed (%d)\n", -result); + else + IPADBG(":wdi init ok\n"); + + result = ipa_ntn_init(); + if (result) + IPAERR(":ntn init failed (%d)\n", -result); + else + IPADBG(":ntn init ok\n"); + + ipa_ctx->q6_proxy_clk_vote_valid = true; + + ipa_register_panic_hdlr(); + + pr_info("IPA driver initialization was successful.\n"); + + return 0; + +fail_add_interrupt_handler: + free_irq(resource_p->ipa_irq, master_dev); +fail_ipa_interrupts_init: + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); +fail_create_apps_resource: + ipa_rm_exit(); +fail_ipa_rm_init: + wakeup_source_unregister(ipa_ctx->w_lock); + ipa_ctx->w_lock = NULL; +fail_w_source_register: +fail_nat_dev_add: + cdev_del(&ipa_ctx->cdev); +fail_cdev_add: + device_destroy(ipa_ctx->class, ipa_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(ipa_ctx->dev_num, 1); +fail_alloc_chrdev_region: + if (ipa_ctx->pipe_mem_pool) + gen_pool_destroy(ipa_ctx->pipe_mem_pool); +fail_empty_rt_tbl: + ipa_teardown_apps_pipes(); + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->empty_rt_tbl_mem.size, + ipa_ctx->empty_rt_tbl_mem.base, + ipa_ctx->empty_rt_tbl_mem.phys_base); +fail_apps_pipes: + idr_destroy(&ipa_ctx->ipa_idr); +fail_dma_pool: + kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache); +fail_rx_pkt_wrapper_cache: + kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache); +fail_tx_pkt_wrapper_cache: + kmem_cache_destroy(ipa_ctx->rt_tbl_cache); +fail_rt_tbl_cache: + kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_offset_cache); +fail_hdr_proc_ctx_offset_cache: + kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_cache); +fail_hdr_proc_ctx_cache: + kmem_cache_destroy(ipa_ctx->hdr_offset_cache); +fail_hdr_offset_cache: + kmem_cache_destroy(ipa_ctx->hdr_cache); +fail_hdr_cache: + kmem_cache_destroy(ipa_ctx->rt_rule_cache); +fail_rt_rule_cache: + kmem_cache_destroy(ipa_ctx->flt_rule_cache); +fail_flt_rule_cache: + sps_deregister_bam_device(ipa_ctx->bam_handle); +fail_register_bam_device: + destroy_workqueue(ipa_ctx->sps_power_mgmt_wq); +fail_create_sps_wq: + destroy_workqueue(ipa_ctx->power_mgmt_wq); +fail_init_hw: + iounmap(ipa_ctx->mmio); +fail_remap: + ipa_disable_clks(); +fail_clk: + ipa2_active_clients_log_destroy(); +fail_init_active_client: + msm_bus_scale_unregister_client(ipa_ctx->ipa_bus_hdl); + if (bus_scale_table) { + msm_bus_cl_clear_pdata(bus_scale_table); + bus_scale_table = NULL; + register_ipa_bus_hdl = 0; + } +fail_bus_reg: +fail_bind: + kfree(ipa_ctx->ctrl); +fail_mem_ctrl: + ipc_log_context_destroy(ipa_ctx->logbuf); + kfree(ipa_ctx); + ipa_ctx = NULL; +fail_mem_ctx: + return result; +} + +static int get_ipa_dts_configuration(struct platform_device *pdev, + struct ipa_plat_drv_res *ipa_drv_res) +{ + int result; + struct resource *resource; + + /* initialize ipa_res */ + ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST; + ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE; + ipa_drv_res->ipa_hw_type = 0; + ipa_drv_res->ipa_hw_mode = 0; + ipa_drv_res->ipa_uc_monitor_holb = false; + ipa_drv_res->ipa_bam_remote_mode = false; + ipa_drv_res->modem_cfg_emb_pipe_flt = false; + ipa_drv_res->ipa_wdi2 = false; + ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + + /* Get IPA HW Version */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", + &ipa_drv_res->ipa_hw_type); + if ((result) || (ipa_drv_res->ipa_hw_type == 0)) { + IPAERR(":get resource failed for ipa-hw-ver!\n"); + return -ENODEV; + } + IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type); + + /* Get IPA HW mode */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode", + &ipa_drv_res->ipa_hw_mode); + if (result) + IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n"); + else + IPADBG(": found ipa_drv_res->ipa_hw_mode = %d", + ipa_drv_res->ipa_hw_mode); + + /* Check ipa_uc_monitor_holb enabled or disabled */ + ipa_drv_res->ipa_uc_monitor_holb = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-uc-monitor-holb"); + IPADBG(": ipa uc monitor holb = %s\n", + ipa_drv_res->ipa_uc_monitor_holb + ? "Enabled" : "Disabled"); + + /* Get IPA WAN / LAN RX pool sizes */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-ring-size", + &ipa_drv_res->wan_rx_ring_size); + if (result) + IPADBG("using default for wan-rx-ring-size = %u\n", + ipa_drv_res->wan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u", + ipa_drv_res->wan_rx_ring_size); + + result = of_property_read_u32(pdev->dev.of_node, + "qcom,lan-rx-ring-size", + &ipa_drv_res->lan_rx_ring_size); + if (result) + IPADBG("using default for lan-rx-ring-size = %u\n", + ipa_drv_res->lan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u", + ipa_drv_res->lan_rx_ring_size); + + ipa_drv_res->use_ipa_teth_bridge = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-ipa-tethering-bridge"); + IPADBG(": using TBDr = %s", + ipa_drv_res->use_ipa_teth_bridge + ? "True" : "False"); + + ipa_drv_res->ipa_bam_remote_mode = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-bam-remote-mode"); + IPADBG(": ipa bam remote mode = %s\n", + ipa_drv_res->ipa_bam_remote_mode + ? "True" : "False"); + + ipa_drv_res->modem_cfg_emb_pipe_flt = + of_property_read_bool(pdev->dev.of_node, + "qcom,modem-cfg-emb-pipe-flt"); + IPADBG(": modem configure embedded pipe filtering = %s\n", + ipa_drv_res->modem_cfg_emb_pipe_flt + ? "True" : "False"); + + ipa_drv_res->ipa_wdi2 = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-wdi2"); + IPADBG(": WDI-2.0 = %s\n", + ipa_drv_res->ipa_wdi2 + ? "True" : "False"); + + ipa_drv_res->skip_uc_pipe_reset = + of_property_read_bool(pdev->dev.of_node, + "qcom,skip-uc-pipe-reset"); + IPADBG(": skip uC pipe reset = %s\n", + ipa_drv_res->skip_uc_pipe_reset + ? "True" : "False"); + + ipa_drv_res->use_dma_zone = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-dma-zone"); + IPADBG(": use dma zone = %s\n", + ipa_drv_res->use_dma_zone + ? "True" : "False"); + + ipa_drv_res->tethered_flow_control = + of_property_read_bool(pdev->dev.of_node, + "qcom,tethered-flow-control"); + IPADBG(": Use apps based flow control = %s\n", + ipa_drv_res->tethered_flow_control + ? "True" : "False"); + + /* Get IPA wrapper address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-base"); + if (!resource) { + IPAERR(":get resource failed for ipa-base!\n"); + return -ENODEV; + } + ipa_drv_res->ipa_mem_base = resource->start; + ipa_drv_res->ipa_mem_size = resource_size(resource); + IPADBG(": ipa-base = 0x%x, size = 0x%x\n", + ipa_drv_res->ipa_mem_base, + ipa_drv_res->ipa_mem_size); + + smmu_info.ipa_base = ipa_drv_res->ipa_mem_base; + smmu_info.ipa_size = ipa_drv_res->ipa_mem_size; + + /* Get IPA BAM address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "bam-base"); + if (!resource) { + IPAERR(":get resource failed for bam-base!\n"); + return -ENODEV; + } + ipa_drv_res->bam_mem_base = resource->start; + ipa_drv_res->bam_mem_size = resource_size(resource); + IPADBG(": bam-base = 0x%x, size = 0x%x\n", + ipa_drv_res->bam_mem_base, + ipa_drv_res->bam_mem_size); + + /* Get IPA pipe mem start ofst */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-pipe-mem"); + if (!resource) { + IPADBG(":not using pipe memory - resource nonexisting\n"); + } else { + ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start; + ipa_drv_res->ipa_pipe_mem_size = resource_size(resource); + IPADBG(":using pipe memory - at 0x%x of size 0x%x\n", + ipa_drv_res->ipa_pipe_mem_start_ofst, + ipa_drv_res->ipa_pipe_mem_size); + } + + /* Get IPA IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "ipa-irq"); + if (!resource) { + IPAERR(":get resource failed for ipa-irq!\n"); + return -ENODEV; + } + ipa_drv_res->ipa_irq = resource->start; + IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq); + + /* Get IPA BAM IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "bam-irq"); + if (!resource) { + IPAERR(":get resource failed for bam-irq!\n"); + return -ENODEV; + } + ipa_drv_res->bam_irq = resource->start; + IPADBG(":ibam-irq = %d\n", ipa_drv_res->bam_irq); + + result = of_property_read_u32(pdev->dev.of_node, "qcom,ee", + &ipa_drv_res->ee); + if (result) + ipa_drv_res->ee = 0; + + /* Get IPA RX Polling Timeout Seconds */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,rx-polling-sleep-ms", + &ipa_drv_res->ipa_rx_polling_sleep_msec); + + if (result) { + ipa_drv_res->ipa_rx_polling_sleep_msec = ONE_MSEC; + IPADBG("using default polling timeout of 1MSec\n"); + } else { + IPADBG(": found ipa_drv_res->ipa_rx_polling_sleep_sec = %d", + ipa_drv_res->ipa_rx_polling_sleep_msec); + } + + /* Get IPA Polling Iteration */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,ipa-polling-iteration", + &ipa_drv_res->ipa_polling_iteration); + if (result) { + ipa_drv_res->ipa_polling_iteration = MAX_POLLING_ITERATION; + IPADBG("using default polling iteration\n"); + } else { + IPADBG(": found ipa_drv_res->ipa_polling_iteration = %d", + ipa_drv_res->ipa_polling_iteration); + } + + return 0; +} + +static int ipa_smmu_wlan_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_WLAN); + int fast = 0; + int bypass = 0; + u32 iova_ap_mapping[2]; + + IPADBG("WLAN CB PROBE dev=%pK retrieving IOMMU mapping\n", dev); + + cb->iommu_domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->iommu_domain)) { + IPAERR("could not get iommu domain\n"); + return -EINVAL; + } + IPADBG("WLAN CB PROBE mapping retrieved\n"); + + cb->dev = dev; + cb->valid = true; + + cb->va_start = cb->va_end = cb->va_size = 0; + if (of_property_read_u32_array( + dev->of_node, "qcom,iommu-dma-addr-pool", + iova_ap_mapping, 2) == 0) { + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + } + + IPADBG("WLAN CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n", + dev, cb->va_start, cb->va_size); + + /* + * Prior to these calls to iommu_domain_get_attr(), these + * attributes were set in this function relative to dtsi values + * defined for this driver. In other words, if corresponding ipa + * driver owned values were found in the dtsi, they were read and + * set here. + * + * In this new world, the developer will use iommu owned dtsi + * settings to set them there. This new logic below, simply + * checks to see if they've been set in dtsi. If so, the logic + * further below acts accordingly... + */ + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass); + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast); + + IPADBG( + "WLAN CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n", + dev, bypass, fast); + + return 0; +} + +static int ipa_smmu_uc_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_UC); + int fast = 0; + int bypass = 0; + u32 iova_ap_mapping[2]; + + IPADBG("UC CB PROBE sub pdev=%p\n", dev); + + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set mask failed\n"); + return -EOPNOTSUPP; + } + + IPADBG("UC CB PROBE dev=%pK retrieving IOMMU mapping\n", dev); + + cb->iommu_domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->iommu_domain)) { + IPAERR("could not get iommu domain\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("UC CB PROBE mapping retrieved\n"); + + cb->dev = dev; + cb->valid = true; + + cb->va_start = cb->va_end = cb->va_size = 0; + + if (of_property_read_u32_array( + dev->of_node, "qcom,iommu-dma-addr-pool", + iova_ap_mapping, 2) == 0) { + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + } + + IPADBG("UC CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n", + dev, cb->va_start, cb->va_size); + + /* + * Prior to these calls to iommu_domain_get_attr(), these + * attributes were set in this function relative to dtsi values + * defined for this driver. In other words, if corresponding ipa + * driver owned values were found in the dtsi, they were read and + * set here. + * + * In this new world, the developer will use iommu owned dtsi + * settings to set them there. This new logic below, simply + * checks to see if they've been set in dtsi. If so, the logic + * further below acts accordingly... + */ + + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass); + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast); + + IPADBG("UC CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n", + dev, bypass, fast); + + ipa_ctx->uc_pdev = dev; + + IPADBG("UC CB PROBE pdev=%p attached\n", dev); + return 0; +} + +static int ipa_smmu_ap_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); + int result; + int fast = 0; + int bypass = 0; + u32 iova_ap_mapping[2]; + + IPADBG("AP CB probe: dev=%pK\n", dev); + + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set mask failed\n"); + return -EOPNOTSUPP; + } + + IPADBG("AP CB PROBE dev=%pK retrieving IOMMU mapping\n", dev); + cb->iommu_domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->iommu_domain)) { + IPAERR("could not get iommu domain\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + + IPADBG("AP CB PROBE mapping retrieved\n"); + + cb->dev = dev; + cb->valid = true; + + cb->va_start = cb->va_end = cb->va_size = 0; + if (of_property_read_u32_array( + dev->of_node, "qcom,iommu-dma-addr-pool", + iova_ap_mapping, 2) == 0) { + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + } + + + IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + /* + * Prior to these calls to iommu_domain_get_attr(), these + * attributes were set in this function relative to dtsi values + * defined for this driver. In other words, if corresponding ipa + * driver owned values were found in the dtsi, they were read and + * set here. + * + * In this new world, the developer will use iommu owned dtsi + * settings to set them there. This new logic below, simply + * checks to see if they've been set in dtsi. If so, the logic + * further below acts accordingly... + */ + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass); + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast); + + IPADBG("AP CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n", + dev, bypass, fast); + + if (!smmu_info.s1_bypass) { + IPAERR("map IPA region to AP_CB IOMMU\n"); + result = ipa_iommu_map(cb->iommu_domain, + rounddown(smmu_info.ipa_base, PAGE_SIZE), + rounddown(smmu_info.ipa_base, PAGE_SIZE), + roundup(smmu_info.ipa_size, PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + if (result) { + IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n", + result); + cb->valid = false; + return result; + } + } + + smmu_info.present = true; + + if (!bus_scale_table) + bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev); + + /* Proceed to real initialization */ + result = ipa_init(&ipa_res, dev); + if (result) { + IPAERR("ipa_init failed\n"); + cb->valid = false; + return result; + } + + return result; +} + +int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + int result; + struct device *dev = &pdev_p->dev; + + IPADBG("IPA driver probing started\n"); + + /* + * Due to late initialization of msm_bus in kernel >= 4.14, add + * mechanism to defer IPA probing until msm_bus is initialized + * successfully. + */ + if (of_device_is_compatible(dev->of_node, "qcom,ipa")) { + if (!ipa_pdev) + ipa_pdev = pdev_p; + if (!bus_scale_table) + bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev); + } + if (bus_scale_table != NULL) { + if (of_device_is_compatible(dev->of_node, "qcom,ipa")) { + /* + * Register with bus client to check if msm_bus + * is completely initialized. + */ + register_ipa_bus_hdl = + msm_bus_scale_register_client( + bus_scale_table); + if (!register_ipa_bus_hdl) { + IPAERR("fail to register with bus mgr!\n"); + bus_scale_table = NULL; + return -EPROBE_DEFER; + } + } + } else { + return -EPROBE_DEFER; + } + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) + return ipa_smmu_ap_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) + return ipa_smmu_wlan_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) + return ipa_smmu_uc_cb_probe(dev); + + master_dev = dev; + if (!ipa_pdev) + ipa_pdev = pdev_p; + + result = get_ipa_dts_configuration(pdev_p, &ipa_res); + if (result) { + IPAERR("IPA dts parsing failed\n"); + return result; + } + + result = ipa2_bind_api_controller(ipa_res.ipa_hw_type, api_ctrl); + if (result) { + IPAERR("IPA API binding failed\n"); + return result; + } + + if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) { + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,smmu-s1-bypass")) + smmu_info.s1_bypass = true; + smmu_info.arm_smmu = true; + pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n", + smmu_info.s1_bypass, smmu_info.fast_map); + result = of_platform_populate(pdev_p->dev.of_node, + pdrv_match, NULL, &pdev_p->dev); + + } else { + if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pdev_p->dev, + DMA_BIT_MASK(32))) { + IPAERR("DMA set mask failed\n"); + return -EOPNOTSUPP; + } + + if (!bus_scale_table) + bus_scale_table = msm_bus_cl_get_pdata(pdev_p); + + /* Proceed to real initialization */ + result = ipa_init(&ipa_res, dev); + if (result) { + IPAERR("ipa_init failed\n"); + return result; + } + } + IPADBG("IPA PROBE SUCCESSFUL, result %d\n", result); + + return result; +} + +/** + * ipa2_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP. + * This will postpone the suspend operation until IPA is no longer used by AP. + */ +int ipa2_ap_suspend(struct device *dev) +{ + int i; + + IPADBG("Enter...\n"); + + /* In case there is a tx/rx handler in polling mode fail to suspend */ + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if (ipa_ctx->ep[i].sys && + atomic_read(&ipa_ctx->ep[i].sys->curr_polling_state)) { + IPAERR("EP %d is in polling state, do not suspend\n", + i); + return -EAGAIN; + } + } + + /* release SPS IPA resource without waiting for inactivity timer */ + atomic_set(&ipa_ctx->sps_pm.eot_activity, 0); + ipa_sps_release_resource(NULL); + IPADBG("Exit\n"); + + return 0; +} + +/** + * ipa2_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Always returns 0 since resume should always succeed. + */ +int ipa2_ap_resume(struct device *dev) +{ + return 0; +} + +struct ipa_context *ipa_get_ctx(void) +{ + return ipa_ctx; +} + +int ipa_iommu_map(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, size_t size, int prot) +{ + struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); + struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx(); + + IPADBG("domain =0x%p iova 0x%lx\n", domain, iova); + IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size); + + /* Checking the address overlapping */ + if (domain == ipa2_get_smmu_domain()) { + if (iova >= ap_cb->va_start && iova < ap_cb->va_end) + IPAERR("iommu AP overlap addr 0x%lx\n", iova); + } else if (domain == ipa2_get_wlan_smmu_domain()) { + /* wlan is one time map */ + } else if (domain == ipa2_get_uc_smmu_domain()) { + if (iova >= uc_cb->va_start && iova < uc_cb->va_end) + IPAERR("iommu uC overlap addr 0x%lx\n", iova); + } else { + IPAERR("Unexpected domain 0x%p\n", domain); + ipa_assert(); + return -EFAULT; + } + + return iommu_map(domain, iova, paddr, size, prot); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA HW device driver"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c new file mode 100644 index 0000000000000000000000000000000000000000..89e58a059086a030ab2465e9afd76dcab9906a64 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c @@ -0,0 +1,930 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2017, 2020, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include "ipa_i.h" + +/* + * These values were determined empirically and shows good E2E bi- + * directional throughputs + */ +#define IPA_HOLB_TMR_EN 0x1 +#define IPA_HOLB_TMR_DIS 0x0 +#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff + +#define IPA_PKT_FLUSH_TO_US 100 + +int ipa_enable_data_path(u32 clnt_hdl) +{ + struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + int res = 0; + + IPADBG("Enabling data path\n"); + /* From IPA 2.0, disable HOLB */ + if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) && + IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* Enable the pipe */ + if (IPA_CLIENT_IS_CONS(ep->client) && + (ep->keep_ipa_awake || + ipa_ctx->resume_on_connect[ep->client] || + !ipa_should_pipe_be_suspended(ep->client))) { + memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + return res; +} + +int ipa_disable_data_path(u32 clnt_hdl) +{ + struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 aggr_init; + int res = 0; + + IPADBG("Disabling data path\n"); + /* On IPA 2.0, enable HOLB in order to prevent IPA from stalling */ + if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) && + IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = 0; + res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* Suspend the pipe */ + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = true; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + udelay(IPA_PKT_FLUSH_TO_US); + aggr_init = ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(clnt_hdl)); + if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >> + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) { + res = ipa_tag_aggr_force_close(clnt_hdl); + if (res) { + IPAERR("tag process timeout, client:%d err:%d\n", + clnt_hdl, res); + ipa_assert(); + } + } + + return res; +} + +int ipa2_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask) +{ + struct ipa_enable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + req.source_pipe_bitmask = source_pipe_bitmask; + if (throttle_source) { + req.throttle_source_valid = 1; + req.throttle_source = 1; + } + result = qmi_enable_force_clear_datapath_send(&req); + if (result) { + IPAERR("qmi_enable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +int ipa2_disable_force_clear(u32 request_id) +{ + struct ipa_disable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + result = qmi_disable_force_clear_datapath_send(&req); + if (result) { + IPAERR("qmi_disable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +static int ipa2_smmu_map_peer_bam(unsigned long dev) +{ + phys_addr_t base; + u32 size; + struct iommu_domain *smmu_domain; + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); + + if (!ipa_ctx->smmu_s1_bypass) { + if (ipa_ctx->peer_bam_map_cnt == 0) { + if (sps_get_bam_addr(dev, &base, &size)) { + IPAERR("Fail to get addr\n"); + return -EINVAL; + } + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + if (ipa_iommu_map(smmu_domain, + cb->va_end, + rounddown(base, PAGE_SIZE), + roundup(size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE | + IOMMU_MMIO)) { + IPAERR("Fail to ipa_iommu_map\n"); + return -EINVAL; + } + } + + ipa_ctx->peer_bam_iova = cb->va_end; + ipa_ctx->peer_bam_pa = base; + ipa_ctx->peer_bam_map_size = size; + ipa_ctx->peer_bam_dev = dev; + + IPADBG("Peer bam %lu mapped\n", dev); + } else { + WARN_ON(dev != ipa_ctx->peer_bam_dev); + } + + ipa_ctx->peer_bam_map_cnt++; + } + + return 0; +} + +static int ipa_connect_configure_sps(const struct ipa_connect_params *in, + struct ipa_ep_context *ep, int ipa_ep_idx) +{ + int result = -EFAULT; + + /* Default Config */ + ep->ep_hdl = sps_alloc_endpoint(); + + if (ipa2_smmu_map_peer_bam(in->client_bam_hdl)) { + IPAERR("fail to iommu map peer BAM.\n"); + return -EFAULT; + } + + if (ep->ep_hdl == NULL) { + IPAERR("SPS EP alloc failed EP.\n"); + return -EFAULT; + } + + result = sps_get_config(ep->ep_hdl, + &ep->connect); + if (result) { + IPAERR("fail to get config.\n"); + return -EFAULT; + } + + /* Specific Config */ + if (IPA_CLIENT_IS_CONS(in->client)) { + ep->connect.mode = SPS_MODE_SRC; + ep->connect.destination = + in->client_bam_hdl; + ep->connect.dest_iova = ipa_ctx->peer_bam_iova; + ep->connect.source = ipa_ctx->bam_handle; + ep->connect.dest_pipe_index = + in->client_ep_idx; + ep->connect.src_pipe_index = ipa_ep_idx; + } else { + ep->connect.mode = SPS_MODE_DEST; + ep->connect.source = in->client_bam_hdl; + ep->connect.source_iova = ipa_ctx->peer_bam_iova; + ep->connect.destination = ipa_ctx->bam_handle; + ep->connect.src_pipe_index = in->client_ep_idx; + ep->connect.dest_pipe_index = ipa_ep_idx; + } + + return 0; +} + +static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in, + struct sps_mem_buffer *mem_buff_ptr, + bool *fifo_in_pipe_mem_ptr, + u32 *fifo_pipe_mem_ofst_ptr, + u32 fifo_size, int ipa_ep_idx) +{ + dma_addr_t dma_addr; + u32 ofst; + int result = -EFAULT; + struct iommu_domain *smmu_domain; + + mem_buff_ptr->size = fifo_size; + if (in->pipe_mem_preferred) { + if (ipa_pipe_mem_alloc(&ofst, fifo_size)) { + IPAERR("FIFO pipe mem alloc fail ep %u\n", + ipa_ep_idx); + mem_buff_ptr->base = + dma_alloc_coherent(ipa_ctx->pdev, + mem_buff_ptr->size, + &dma_addr, GFP_KERNEL); + } else { + memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer)); + result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst, + fifo_size, 1); + WARN_ON(result); + *fifo_in_pipe_mem_ptr = true; + dma_addr = mem_buff_ptr->phys_base; + *fifo_pipe_mem_ofst_ptr = ofst; + } + } else { + mem_buff_ptr->base = + dma_alloc_coherent(ipa_ctx->pdev, mem_buff_ptr->size, + &dma_addr, GFP_KERNEL); + } + if (ipa_ctx->smmu_s1_bypass) { + mem_buff_ptr->phys_base = dma_addr; + } else { + mem_buff_ptr->iova = dma_addr; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + mem_buff_ptr->phys_base = + iommu_iova_to_phys(smmu_domain, dma_addr); + } + } + if (mem_buff_ptr->base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -EFAULT; + } + + return 0; +} + +/** + * ipa2_connect() - low-level IPA client connect + * @in: [in] input parameters from client + * @sps: [out] sps output from IPA needed by client for sps_connect + * @clnt_hdl: [out] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to connect to + * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api + * expects caller to take responsibility to add any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_connect(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa_ep_context *ep; + struct ipa_ep_cfg_status ep_status; + unsigned long base; + struct iommu_domain *smmu_domain; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + IPADBG("connecting client\n"); + + if (in == NULL || sps == NULL || clnt_hdl == NULL || + in->client >= IPA_CLIENT_MAX || + in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa2_get_ep_mapping(in->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_INC_EP(in->client); + + + ep->skip_ep_cfg = in->skip_ep_cfg; + ep->valid = 1; + ep->client = in->client; + ep->client_notify = in->notify; + ep->priv = in->priv; + ep->keep_ipa_awake = in->keep_ipa_awake; + + /* Notify uc to start monitoring holb on USB BAM Producer pipe. */ + if (IPA_CLIENT_IS_USB_CONS(in->client)) { + ipa_uc_monitor_holb(in->client, true); + IPADBG("Enabling holb monitor for client:%d", in->client); + } + + result = ipa_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto ipa_cfg_ep_fail; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + /* Setting EP status 0 */ + memset(&ep_status, 0, sizeof(ep_status)); + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep_status)) { + IPAERR("fail to configure status of EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + result = ipa_connect_configure_sps(in, ep, ipa_ep_idx); + if (result) { + IPAERR("fail to configure SPS.\n"); + goto ipa_cfg_ep_fail; + } + + if (!ipa_ctx->smmu_s1_bypass && + (in->desc.base == NULL || + in->data.base == NULL)) { + IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n", + in->data.base, in->desc.base); + goto desc_mem_alloc_fail; + } + + if (in->desc.base == NULL) { + result = ipa_connect_allocate_fifo(in, &ep->connect.desc, + &ep->desc_fifo_in_pipe_mem, + &ep->desc_fifo_pipe_mem_ofst, + in->desc_fifo_sz, ipa_ep_idx); + if (result) { + IPAERR("fail to allocate DESC FIFO.\n"); + goto desc_mem_alloc_fail; + } + } else { + IPADBG("client allocated DESC FIFO\n"); + ep->connect.desc = in->desc; + ep->desc_fifo_client_allocated = true; + } + IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base, + ep->connect.desc.size); + + if (in->data.base == NULL) { + result = ipa_connect_allocate_fifo(in, &ep->connect.data, + &ep->data_fifo_in_pipe_mem, + &ep->data_fifo_pipe_mem_ofst, + in->data_fifo_sz, ipa_ep_idx); + if (result) { + IPAERR("fail to allocate DATA FIFO.\n"); + goto data_mem_alloc_fail; + } + } else { + IPADBG("client allocated DATA FIFO\n"); + ep->connect.data = in->data; + ep->data_fifo_client_allocated = true; + } + IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base, + ep->connect.data.size); + + if (!ipa_ctx->smmu_s1_bypass) { + ep->connect.data.iova = ep->connect.data.phys_base; + base = ep->connect.data.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + if (ipa_iommu_map(smmu_domain, + rounddown(base, PAGE_SIZE), + rounddown(base, PAGE_SIZE), + roundup(ep->connect.data.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE)) { + IPAERR("Fail to ipa_iommu_map data FIFO\n"); + goto iommu_map_data_fail; + } + } + ep->connect.desc.iova = ep->connect.desc.phys_base; + base = ep->connect.desc.iova; + if (smmu_domain != NULL) { + if (ipa_iommu_map(smmu_domain, + rounddown(base, PAGE_SIZE), + rounddown(base, PAGE_SIZE), + roundup(ep->connect.desc.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE)) { + IPAERR("Fail to ipa_iommu_map desc FIFO\n"); + goto iommu_map_desc_fail; + } + } + } + + if ((ipa_ctx->ipa_hw_type == IPA_HW_v2_0 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_5 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) && + IPA_CLIENT_IS_USB_CONS(in->client)) + ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD; + else + ep->connect.event_thresh = IPA_EVENT_THRESHOLD; + ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */ + + result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client); + if (result) { + IPAERR("sps_connect fails.\n"); + goto sps_connect_fail; + } + + sps->ipa_bam_hdl = ipa_ctx->bam_handle; + sps->ipa_ep_idx = ipa_ep_idx; + *clnt_hdl = ipa_ep_idx; + memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer)); + memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer)); + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client)) + ipa_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->client); + + IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx); + + return 0; + +sps_connect_fail: + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.desc.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.desc.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } +iommu_map_desc_fail: + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.data.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.data.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } +iommu_map_data_fail: + if (!ep->data_fifo_client_allocated) { + if (!ep->data_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.data.size, + ep->connect.data.base, + ep->connect.data.phys_base); + else + ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, + ep->connect.data.size); + } +data_mem_alloc_fail: + if (!ep->desc_fifo_client_allocated) { + if (!ep->desc_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); + else + ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, + ep->connect.desc.size); + } +desc_mem_alloc_fail: + sps_free_endpoint(ep->ep_hdl); +ipa_cfg_ep_fail: + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_EP(in->client); +fail: + return result; +} + +static int ipa2_smmu_unmap_peer_bam(unsigned long dev) +{ + size_t len; + struct iommu_domain *smmu_domain; + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); + + if (!ipa_ctx->smmu_s1_bypass) { + WARN_ON(dev != ipa_ctx->peer_bam_dev); + ipa_ctx->peer_bam_map_cnt--; + if (ipa_ctx->peer_bam_map_cnt == 0) { + len = roundup(ipa_ctx->peer_bam_map_size + + ipa_ctx->peer_bam_pa - + rounddown(ipa_ctx->peer_bam_pa, + PAGE_SIZE), PAGE_SIZE); + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + if (iommu_unmap(smmu_domain, + cb->va_end, len) != len) { + IPAERR("Fail to iommu_unmap\n"); + return -EINVAL; + } + IPADBG("Peer bam %lu unmapped\n", dev); + } + } + } + + return 0; +} + +/** + * ipa2_disconnect() - low-level IPA client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to disconnect + * from IPA in BAM-BAM mode. this api expects caller to take responsibility to + * free any needed headers, routing and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disconnect(u32 clnt_hdl) +{ + int result; + struct ipa_ep_context *ep; + unsigned long peer_bam; + unsigned long base; + struct iommu_domain *smmu_domain; + struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0}; + int res; + enum ipa_client_type client_type; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + client_type = ipa2_get_client_mapping(clnt_hdl); + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(client_type); + + /* For USB 2.0 controller, first the ep will be disabled. + * so this sequence is not needed again when disconnecting the pipe. + */ + if (!ep->ep_disabled) { + /* Set Disconnect in Progress flag. */ + spin_lock(&ipa_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa_ctx->disconnect_lock); + + /* Notify uc to stop monitoring holb on USB BAM + * Producer pipe. + */ + if (IPA_CLIENT_IS_USB_CONS(ep->client)) { + ipa_uc_monitor_holb(ep->client, false); + IPADBG("Disabling holb monitor for client: %d\n", + ep->client); + } + + result = ipa_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", + result, clnt_hdl); + return -EPERM; + } + } + + result = sps_disconnect(ep->ep_hdl); + if (result) { + IPAERR("SPS disconnect failed.\n"); + return -EPERM; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + peer_bam = ep->connect.destination; + else + peer_bam = ep->connect.source; + + if (ipa2_smmu_unmap_peer_bam(peer_bam)) { + IPAERR("fail to iommu unmap peer BAM.\n"); + return -EPERM; + } + + if (!ep->desc_fifo_client_allocated && + ep->connect.desc.base) { + if (!ep->desc_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); + else + ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, + ep->connect.desc.size); + } + + if (!ep->data_fifo_client_allocated && + ep->connect.data.base) { + if (!ep->data_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.data.size, + ep->connect.data.base, + ep->connect.data.phys_base); + else + ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, + ep->connect.data.size); + } + + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.desc.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.desc.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } + + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.data.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.data.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } + + result = sps_free_endpoint(ep->ep_hdl); + if (result) { + IPAERR("SPS de-alloc EP failed.\n"); + return -EPERM; + } + + ipa_delete_dflt_flt_rules(clnt_hdl); + + /* If APPS flow control is not enabled, send a message to modem to + * enable flow control honoring. + */ + if (!ipa_ctx->tethered_flow_control && ep->qmi_request_sent) { + /* Send a message to modem to disable flow control honoring. */ + req.request_id = clnt_hdl; + res = qmi_disable_force_clear_datapath_send(&req); + if (res) { + IPADBG("disable_force_clear_datapath failed %d\n", + res); + } + } + + spin_lock(&ipa_ctx->disconnect_lock); + memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); + spin_unlock(&ipa_ctx->disconnect_lock); + + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +/** + * ipa2_reset_endpoint() - reset an endpoint from BAM perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_endpoint(u32 clnt_hdl) +{ + int res; + struct ipa_ep_context *ep; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes) { + IPAERR("Bad parameters.\n"); + return -EFAULT; + } + ep = &ipa_ctx->ep[clnt_hdl]; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + res = sps_disconnect(ep->ep_hdl); + if (res) { + IPAERR("sps_disconnect() failed, res=%d.\n", res); + goto bail; + } else { + res = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, + ep->client); + if (res) { + IPAERR("sps_connect() failed, res=%d.\n", res); + goto bail; + } + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return res; +} + +/** + * ipa2_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before + * client disconnect. + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to remove + * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api + * expects caller to take responsibility to free any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_clear_endpoint_delay(u32 clnt_hdl) +{ + struct ipa_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_ctrl = {0}; + struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0}; + int res; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!ipa_ctx->tethered_flow_control) { + IPADBG("APPS flow control is not enabled\n"); + /* Send a message to modem to disable flow control honoring. */ + req.request_id = clnt_hdl; + req.source_pipe_bitmask = 1 << clnt_hdl; + res = qmi_enable_force_clear_datapath_send(&req); + if (res) { + IPADBG("enable_force_clear_datapath failed %d\n", + res); + } + ep->qmi_request_sent = true; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + /* Set disconnect in progress flag so further flow control events are + * not honored. + */ + spin_lock(&ipa_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa_ctx->disconnect_lock); + + /* If flow is disabled at this point, restore the ep state.*/ + ep_ctrl.ipa_ep_delay = false; + ep_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_ctrl); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl); + + return 0; +} + +/** + * ipa2_disable_endpoint() - low-level IPA client disable endpoint + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to + * disable the pipe from IPA in BAM-BAM mode. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disable_endpoint(u32 clnt_hdl) +{ + int result; + struct ipa_ep_context *ep; + enum ipa_client_type client_type; + unsigned long bam; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + client_type = ipa2_get_client_mapping(clnt_hdl); + IPA_ACTIVE_CLIENTS_INC_EP(client_type); + + /* Set Disconnect in Progress flag. */ + spin_lock(&ipa_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa_ctx->disconnect_lock); + + /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */ + if (IPA_CLIENT_IS_USB_CONS(ep->client)) { + ipa_uc_monitor_holb(ep->client, false); + IPADBG("Disabling holb monitor for client: %d\n", ep->client); + } + + result = ipa_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + goto fail; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + bam = ep->connect.source; + else + bam = ep->connect.destination; + + result = sps_pipe_reset(bam, clnt_hdl); + if (result) { + IPAERR("SPS pipe reset failed.\n"); + goto fail; + } + + ep->ep_disabled = true; + + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); + + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + + return 0; + +fail: + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); + return -EPERM; +} + + +/** + * ipa_sps_connect_safe() - connect endpoint from BAM prespective + * @h: [in] sps pipe handle + * @connect: [in] sps connect parameters + * @ipa_client: [in] ipa client handle representing the pipe + * + * This function connects a BAM pipe using SPS driver sps_connect() API + * and by requesting uC interface to reset the pipe, avoids an IPA HW + * limitation that does not allow resetting a BAM pipe during traffic in + * IPA TX command queue. + * + * Returns: 0 on success, negative on failure + */ +int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect, + enum ipa_client_type ipa_client) +{ + int res; + + if (ipa_ctx->ipa_hw_type > IPA_HW_v2_5 || ipa_ctx->skip_uc_pipe_reset) { + IPADBG("uC pipe reset is not required\n"); + } else { + res = ipa_uc_reset_pipe(ipa_client); + if (res) + return res; + } + return sps_connect(h, connect); +} +EXPORT_SYMBOL(ipa_sps_connect_safe); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..11be67b8e8ef980e504a3e721258f8dbebed4320 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -0,0 +1,2327 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_MAX_MSG_LEN 4096 +#define IPA_DBG_CNTR_ON 127265 +#define IPA_DBG_CNTR_OFF 127264 +#define IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE ((IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN \ + * IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \ + + IPA_MAX_MSG_LEN) + +#define RX_MIN_POLL_CNT "Rx Min Poll Count" +#define RX_MAX_POLL_CNT "Rx Max Poll Count" +#define MAX_COUNT_LENGTH 6 +#define MAX_POLLING_ITERATION 40 +#define MIN_POLLING_ITERATION 1 + +#define IPA_DUMP_STATUS_FIELD(f) \ + pr_err(#f "=0x%x\n", status->f) + +static const char * const ipa_excp_name[] = { + __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0), + __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP), +}; + +static const char * const ipa_status_excp_name[] = { + __stringify_1(IPA_EXCP_DEAGGR), + __stringify_1(IPA_EXCP_REPLICATION), + __stringify_1(IPA_EXCP_IP), + __stringify_1(IPA_EXCP_IHL), + __stringify_1(IPA_EXCP_FRAG_MISS), + __stringify_1(IPA_EXCP_SW), + __stringify_1(IPA_EXCP_NAT), + __stringify_1(IPA_EXCP_NONE), +}; + +static const char * const ipa_event_name[] = { + __stringify(WLAN_CLIENT_CONNECT), + __stringify(WLAN_CLIENT_DISCONNECT), + __stringify(WLAN_CLIENT_POWER_SAVE_MODE), + __stringify(WLAN_CLIENT_NORMAL_MODE), + __stringify(SW_ROUTING_ENABLE), + __stringify(SW_ROUTING_DISABLE), + __stringify(WLAN_AP_CONNECT), + __stringify(WLAN_AP_DISCONNECT), + __stringify(WLAN_STA_CONNECT), + __stringify(WLAN_STA_DISCONNECT), + __stringify(WLAN_CLIENT_CONNECT_EX), + __stringify(WLAN_SWITCH_TO_SCC), + __stringify(WLAN_SWITCH_TO_MCC), + __stringify(WLAN_WDI_ENABLE), + __stringify(WLAN_WDI_DISABLE), + __stringify(WAN_UPSTREAM_ROUTE_ADD), + __stringify(WAN_UPSTREAM_ROUTE_DEL), + __stringify(WAN_EMBMS_CONNECT), + __stringify(WAN_XLAT_CONNECT), + __stringify(ECM_CONNECT), + __stringify(ECM_DISCONNECT), + __stringify(IPA_TETHERING_STATS_UPDATE_STATS), + __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS), + __stringify(IPA_QUOTA_REACH), + __stringify(IPA_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING), + __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT), + __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT), + __stringify(ADD_BRIDGE_VLAN_MAPPING), + __stringify(DEL_BRIDGE_VLAN_MAPPING), + __stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_GSB_CONNECT), + __stringify(IPA_GSB_DISCONNECT), +}; + +static const char * const ipa_hdr_l2_type_name[] = { + __stringify(IPA_HDR_L2_NONE), + __stringify(IPA_HDR_L2_ETHERNET_II), + __stringify(IPA_HDR_L2_802_3), +}; + +static const char *const ipa_hdr_proc_type_name[] = { + __stringify(IPA_HDR_PROC_NONE), + __stringify(IPA_HDR_PROC_ETHII_TO_ETHII), + __stringify(IPA_HDR_PROC_ETHII_TO_802_3), + __stringify(IPA_HDR_PROC_802_3_TO_ETHII), + __stringify(IPA_HDR_PROC_802_3_TO_802_3), +}; + +static struct dentry *dent; +static struct dentry *dfile_gen_reg; +static struct dentry *dfile_ep_reg; +static struct dentry *dfile_keep_awake; +static struct dentry *dfile_ep_holb; +static struct dentry *dfile_hdr; +static struct dentry *dfile_proc_ctx; +static struct dentry *dfile_ip4_rt; +static struct dentry *dfile_ip6_rt; +static struct dentry *dfile_ip4_flt; +static struct dentry *dfile_ip6_flt; +static struct dentry *dfile_stats; +static struct dentry *dfile_wstats; +static struct dentry *dfile_wdi_stats; +static struct dentry *dfile_ntn_stats; +static struct dentry *dfile_dbg_cnt; +static struct dentry *dfile_msg; +static struct dentry *dfile_ip4_nat; +static struct dentry *dfile_rm_stats; +static struct dentry *dfile_status_stats; +static struct dentry *dfile_active_clients; +static struct dentry *dfile_ipa_rx_poll_timeout; +static struct dentry *dfile_ipa_poll_iteration; + +static char dbg_buff[IPA_MAX_MSG_LEN]; +static char *active_clients_buf; +static s8 ep_reg_idx; +static void *ipa_ipc_low_buff; + +int _ipa_read_gen_reg_v1_1(char *buff, int max_len) +{ + return scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_VERSION=0x%x\n" + "IPA_COMP_HW_VERSION=0x%x\n" + "IPA_ROUTE=0x%x\n" + "IPA_FILTER=0x%x\n" + "IPA_SHARED_MEM_SIZE=0x%x\n", + ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1), + ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1), + ipa_read_reg(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v1_1)); +} + +int _ipa_read_gen_reg_v2_0(char *buff, int max_len) +{ + return scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_VERSION=0x%x\n" + "IPA_COMP_HW_VERSION=0x%x\n" + "IPA_ROUTE=0x%x\n" + "IPA_FILTER=0x%x\n" + "IPA_SHARED_MEM_RESTRICTED=0x%x\n" + "IPA_SHARED_MEM_SIZE=0x%x\n", + ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1), + ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1), + ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0), + ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0)); +} + +static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + nbytes = ipa_ctx->ctrl->ipa_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_write_ep_holb(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ep_cfg_holb holb; + u32 en; + u32 tmr_val; + u32 ep_idx; + unsigned long missing; + char *sptr, *token; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &ep_idx)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &en)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &tmr_val)) + return -EINVAL; + + holb.en = en; + holb.tmr_val = tmr_val; + + ipa2_cfg_ep_holb(ep_idx, &holb); + + return count; +} + +static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option >= ipa_ctx->ipa_num_pipes) { + IPAERR("bad pipe specified %u\n", option); + return count; + } + + ep_reg_idx = option; + + return count; +} + +int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe) +{ + return scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_ROUTE_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n", + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe)) + ); +} + +int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe) +{ + return scnprintf( + dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_ROUTE_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n" + "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CFG_%u=0x%x\n", + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CFG_n_OFST(pipe))); +} + +static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int start_idx; + int end_idx; + int size = 0; + int ret; + loff_t pos; + + /* negative ep_reg_idx means all registers */ + if (ep_reg_idx < 0) { + start_idx = 0; + end_idx = ipa_ctx->ipa_num_pipes; + } else { + start_idx = ep_reg_idx; + end_idx = start_idx + 1; + } + pos = *ppos; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + for (i = start_idx; i < end_idx; i++) { + + nbytes = ipa_ctx->ctrl->ipa_read_ep_reg(dbg_buff, + IPA_MAX_MSG_LEN, i); + + *ppos = pos; + ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff, + nbytes); + if (ret < 0) { + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return ret; + } + + size += ret; + ubuf += nbytes; + count -= nbytes; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + *ppos = pos + size; + return size; +} + +static ssize_t ipa_write_keep_awake(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option == 1) + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + else if (option == 0) + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + else + return -EFAULT; + + return count; +} + +static ssize_t ipa_read_keep_awake(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + ipa_active_clients_lock(); + if (ipa_ctx->ipa_active_clients.cnt) + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is ON\n"); + else + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is OFF\n"); + ipa_active_clients_unlock(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int nbytes = 0; + int i = 0; + struct ipa_hdr_entry *entry; + + mutex_lock(&ipa_ctx->lock); + + if (ipa_ctx->hdr_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + + list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->cookie != IPA_HDR_COOKIE) + continue; + nbytes = scnprintf( + dbg_buff, + IPA_MAX_MSG_LEN, + "name:%s len=%d ref=%d partial=%d type=%s ", + entry->name, + entry->hdr_len, + entry->ref_cnt, + entry->is_partial, + ipa_hdr_l2_type_name[entry->type]); + + if (entry->is_hdr_proc_ctx) { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "phys_base=0x%pa ", + &entry->phys_base); + } else { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "ofst=%u ", + entry->offset_entry->offset >> 2); + } + for (i = 0; i < entry->hdr_len; i++) { + scnprintf(dbg_buff + nbytes + i * 2, + IPA_MAX_MSG_LEN - nbytes - i * 2, + "%02x", entry->hdr[i]); + } + scnprintf(dbg_buff + nbytes + entry->hdr_len * 2, + IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2, + "\n"); + pr_err("%s\n", dbg_buff); + } + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +static int ipa_attrib_dump(struct ipa_rule_attrib *attrib, + enum ipa_ip_type ip) +{ + __be32 addr[4]; + __be32 mask[4]; + int nbytes = 0; + int i; + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tos_value:%d ", attrib->tos_value); + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tos_mask:%d ", attrib->tos_mask); + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "protocol:%d ", attrib->u.v4.protocol); + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.src_addr); + mask[0] = htonl(attrib->u.v4.src_addr_mask); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "src_addr:%pI4 src_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.src_addr[i]); + mask[i] = htonl(attrib->u.v6.src_addr_mask[i]); + } + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "src_addr:%pI6 src_addr_mask:%pI6 ", + addr + 0, mask + 0); + } else { + WARN_ON(1); + } + } + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "dst_addr:%pI4 dst_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.dst_addr[i]); + mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]); + } + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "dst_addr:%pI6 dst_addr_mask:%pI6 ", + addr + 0, mask + 0); + } else { + WARN_ON(1); + } + } + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "src_port_range:%u %u ", + attrib->src_port_lo, + attrib->src_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_port_range:%u %u ", + attrib->dst_port_lo, + attrib->dst_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_TYPE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "type:%d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_CODE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "code:%d ", attrib->code); + + if (attrib->attrib_mask & IPA_FLT_SPI) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "spi:%x ", attrib->spi); + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "src_port:%u ", attrib->src_port); + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_port:%u ", attrib->dst_port); + + if (attrib->attrib_mask & IPA_FLT_TC) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tc:%d ", attrib->u.v6.tc); + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "flow_label:%x ", attrib->u.v6.flow_label); + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "next_hdr:%d ", attrib->u.v6.next_hdr); + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "metadata:%x metadata_mask:%x", + attrib->meta_data, attrib->meta_data_mask); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "frg "); + + if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "src_mac_addr:%pM ", attrib->src_mac_addr); + } + + if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_mac_addr:%pM ", attrib->dst_mac_addr); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "ether_type:%x ", attrib->ether_type); + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "l2tp inner ip type: %d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_addr:%pI4 dst_addr_mask:%pI4 ", + addr, mask); + } + + pr_err("%s\n", dbg_buff); + return 0; +} + +static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) +{ + uint8_t addr[16]; + uint8_t mask[16]; + int nbytes = 0; + int i; + int j; + + if (attrib->tos_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tos_value:%d ", attrib->tos_eq); + + if (attrib->protocol_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "protocol:%d ", attrib->protocol_eq); + + if (attrib->num_ihl_offset_range_16 > + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) { + IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS, + attrib->num_ihl_offset_range_16); + return -EPERM; + } + + for (i = 0; i < attrib->num_ihl_offset_range_16; i++) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ", + attrib->ihl_offset_range_16[i].offset, + attrib->ihl_offset_range_16[i].range_low, + attrib->ihl_offset_range_16[i].range_high); + } + + if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) { + IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32); + return -EPERM; + } + + for (i = 0; i < attrib->num_offset_meq_32; i++) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ", + attrib->offset_meq_32[i].offset, + attrib->offset_meq_32[i].mask, + attrib->offset_meq_32[i].value); + } + + if (attrib->tc_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tc:%d ", attrib->tc_eq); + + if (attrib->fl_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "flow_label:%d ", attrib->fl_eq); + + if (attrib->ihl_offset_eq_16_present) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ihl_ofst_eq16:%d val:0x%x) ", + attrib->ihl_offset_eq_16.offset, + attrib->ihl_offset_eq_16.value); + } + + if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) { + IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32); + return -EPERM; + } + + for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ", + attrib->ihl_offset_meq_32[i].offset, + attrib->ihl_offset_meq_32[i].mask, + attrib->ihl_offset_meq_32[i].value); + } + + if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) { + IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128); + return -EPERM; + } + + for (i = 0; i < attrib->num_offset_meq_128; i++) { + for (j = 0; j < 16; j++) { + addr[j] = attrib->offset_meq_128[i].value[j]; + mask[j] = attrib->offset_meq_128[i].mask[j]; + } + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ", + attrib->offset_meq_128[i].offset, + mask + 0, + addr + 0); + } + + if (attrib->metadata_meq32_present) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(metadata: ofst:%u mask:0x%x val:0x%x) ", + attrib->metadata_meq32.offset, + attrib->metadata_meq32.mask, + attrib->metadata_meq32.value); + } + + if (attrib->ipv4_frag_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "frg "); + + pr_err("%s\n", dbg_buff); + return 0; +} + +static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i = 0; + int nbytes = 0; + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + struct ipa_rt_tbl_set *set; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + u32 ofst; + u32 ofst_words; + + set = &ipa_ctx->rt_tbl_set[ip]; + + mutex_lock(&ipa_ctx->lock); + + if (ip == IPA_IP_v6) { + if (ipa_ctx->ip6_rt_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + } else if (ip == IPA_IP_v4) { + if (ipa_ctx->ip4_rt_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + } + + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + i = 0; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + if (entry->proc_ctx) { + ofst = entry->proc_ctx->offset_entry->offset; + ofst_words = + (ofst + + ipa_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa2_get_ep_mapping(entry->rule.dst), + !ipa_ctx->hdr_tbl_lcl); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "proc_ctx[32B]:%u attrib_mask:%08x ", + ofst_words, + entry->rule.attrib.attrib_mask); + } else { + if (entry->hdr) + ofst = entry->hdr->offset_entry->offset; + else + ofst = 0; + + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa2_get_ep_mapping(entry->rule.dst), + !ipa_ctx->hdr_tbl_lcl); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr_ofst[words]:%u attrib_mask:%08x ", + ofst >> 2, + entry->rule.attrib.attrib_mask); + } + + pr_err("%s\n", dbg_buff); + ipa_attrib_dump(&entry->rule.attrib, ip); + i++; + } + } + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +static ssize_t ipa_read_proc_ctx(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_hdr_proc_ctx_tbl *tbl; + struct ipa_hdr_proc_ctx_entry *entry; + u32 ofst_words; + + tbl = &ipa_ctx->hdr_proc_ctx_tbl; + + mutex_lock(&ipa_ctx->lock); + + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) + pr_info("Table resides on local memory\n"); + else + pr_info("Table resides on system(ddr) memory\n"); + + list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) { + ofst_words = (entry->offset_entry->offset + + ipa_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + if (entry->hdr->is_hdr_proc_ctx) { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr_phys_base:0x%pa\n", + &entry->hdr->phys_base); + } else { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr[words]:%u\n", + entry->hdr->offset_entry->offset >> 2); + } + } + mutex_unlock(&ipa_ctx->lock); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i; + int j; + int nbytes = 0; + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + struct ipa_rt_tbl *rt_tbl; + u32 rt_tbl_idx; + u32 bitmap; + bool eq; + int res = 0; + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + mutex_lock(&ipa_ctx->lock); + i = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->cookie != IPA_FLT_COOKIE) + continue; + if (entry->rule.eq_attrib_type) { + rt_tbl_idx = entry->rule.rt_tbl_idx; + bitmap = entry->rule.eq_attrib.rule_eq_bitmap; + eq = true; + } else { + rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl); + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; + else + rt_tbl_idx = rt_tbl->idx; + bitmap = entry->rule.attrib.attrib_mask; + eq = false; + } + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d ", + i, entry->rule.action, rt_tbl_idx); + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "attrib_mask:%08x retain_hdr:%d eq:%d ", + bitmap, entry->rule.retain_hdr, eq); + if (eq) { + res = ipa_attrib_dump_eq( + &entry->rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } else + ipa_attrib_dump( + &entry->rule.attrib, ip); + i++; + } + + for (j = 0; j < ipa_ctx->ipa_num_pipes; j++) { + tbl = &ipa_ctx->flt_tbl[j][ip]; + i = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->cookie != IPA_FLT_COOKIE) + continue; + if (entry->rule.eq_attrib_type) { + rt_tbl_idx = entry->rule.rt_tbl_idx; + bitmap = entry->rule.eq_attrib.rule_eq_bitmap; + eq = true; + } else { + rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl); + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; + else + rt_tbl_idx = rt_tbl->idx; + bitmap = entry->rule.attrib.attrib_mask; + eq = false; + } + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + j, i, entry->rule.action, rt_tbl_idx); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "attrib_mask:%08x retain_hdr:%d ", + bitmap, entry->rule.retain_hdr); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "eq:%d ", eq); + pr_err("%s\n", dbg_buff); + if (eq) { + res = ipa_attrib_dump_eq( + &entry->rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } else + ipa_attrib_dump( + &entry->rule.attrib, ip); + i++; + } + } +bail: + mutex_unlock(&ipa_ctx->lock); + + return res; +} + +static ssize_t ipa_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int cnt = 0; + uint connect = 0; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) + connect |= (ipa_ctx->ep[i].valid << i); + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "sw_tx=%u\n" + "hw_tx=%u\n" + "tx_non_linear=%u\n" + "tx_compl=%u\n" + "wan_rx=%u\n" + "stat_compl=%u\n" + "lan_aggr_close=%u\n" + "wan_aggr_close=%u\n" + "act_clnt=%u\n" + "con_clnt_bmap=0x%x\n" + "wan_rx_empty=%u\n" + "wan_repl_rx_empty=%u\n" + "lan_rx_empty=%u\n" + "lan_repl_rx_empty=%u\n" + "flow_enable=%u\n" + "flow_disable=%u\n", + ipa_ctx->stats.tx_sw_pkts, + ipa_ctx->stats.tx_hw_pkts, + ipa_ctx->stats.tx_non_linear, + ipa_ctx->stats.tx_pkts_compl, + ipa_ctx->stats.rx_pkts, + ipa_ctx->stats.stat_compl, + ipa_ctx->stats.aggr_close, + ipa_ctx->stats.wan_aggr_close, + ipa_ctx->ipa_active_clients.cnt, + connect, + ipa_ctx->stats.wan_rx_empty, + ipa_ctx->stats.wan_repl_rx_empty, + ipa_ctx->stats.lan_rx_empty, + ipa_ctx->stats.lan_repl_rx_empty, + ipa_ctx->stats.flow_enable, + ipa_ctx->stats.flow_disable); + cnt += nbytes; + + for (i = 0; i < MAX_NUM_EXCP; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, + "lan_rx_excp[%u:%20s]=%u\n", i, + ipa_status_excp_name[i], + ipa_ctx->stats.rx_excp_pkts[i]); + cnt += nbytes; + } + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "sw_tx=%u\n" + "hw_tx=%u\n" + "rx=%u\n" + "rx_repl_repost=%u\n" + "rx_q_len=%u\n" + "act_clnt=%u\n" + "con_clnt_bmap=0x%x\n", + ipa_ctx->stats.tx_sw_pkts, + ipa_ctx->stats.tx_hw_pkts, + ipa_ctx->stats.rx_pkts, + ipa_ctx->stats.rx_repl_repost, + ipa_ctx->stats.rx_q_len, + ipa_ctx->ipa_active_clients.cnt, + connect); + cnt += nbytes; + + for (i = 0; i < MAX_NUM_EXCP; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, + "rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i], + ipa_ctx->stats.rx_excp_pkts[i]); + cnt += nbytes; + } + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_wstats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + +#define HEAD_FRMT_STR "%25s\n" +#define FRMT_STR "%25s %10u\n" +#define FRMT_STR1 "%25s %10u\n\n" + + int cnt = 0; + int nbytes; + int ipa_ep_idx; + enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD; + struct ipa_ep_context *ep; + + do { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:"); + cnt += nbytes; + + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Avail Fifo Desc:", + atomic_read(&ep->avail_fifo_desc)); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Status Rcvd:", + ep->wstats.rx_pkts_status_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Processed:", + ep->wstats.rx_hd_processed); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail); + cnt += nbytes; + + } while (0); + + client = IPA_CLIENT_WLAN1_CONS; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN1_CONS Stats:"); + cnt += nbytes; + while (1) { + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Tx Pkts Dropped:", + ep->wstats.tx_pkts_dropped); + cnt += nbytes; + +nxt_clnt_cons: + switch (client) { + case IPA_CLIENT_WLAN1_CONS: + client = IPA_CLIENT_WLAN2_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN2_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN2_CONS: + client = IPA_CLIENT_WLAN3_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN3_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN3_CONS: + client = IPA_CLIENT_WLAN4_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN4_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN4_CONS: + default: + break; + } + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Allocated:", + ipa_ctx->wc_memb.wlan_comm_total_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Avail:", ipa_ctx->wc_memb.wlan_comm_free_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1, + "Total Tx Pkts Freed:", ipa_ctx->wc_memb.total_tx_pkts_freed); + cnt += nbytes; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ +#define TX_STATS(y) \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + struct IpaHwStatsNTNInfoData_t stats; + int nbytes; + int cnt = 0; + + if (!ipa2_get_ntn_stats(&stats)) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX tail_ptr_val=%u\n" + "TX num_db_fired=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_unexpected_db=%u\n" + "TX num_bam_int_handled=%u\n" + "TX num_bam_int_in_non_running_state=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX num_bam_int_handled_while_wait_for_bam=%u\n" + "TX num_bam_int_handled_while_not_in_bam=%u\n", + TX_STATS(num_pkts_processed), + TX_STATS(tail_ptr_val), + TX_STATS(num_db_fired), + TX_STATS(tx_comp_ring_stats.ringFull), + TX_STATS(tx_comp_ring_stats.ringEmpty), + TX_STATS(tx_comp_ring_stats.ringUsageHigh), + TX_STATS(tx_comp_ring_stats.ringUsageLow), + TX_STATS(tx_comp_ring_stats.RingUtilCount), + TX_STATS(bam_stats.bamFifoFull), + TX_STATS(bam_stats.bamFifoEmpty), + TX_STATS(bam_stats.bamFifoUsageHigh), + TX_STATS(bam_stats.bamFifoUsageLow), + TX_STATS(bam_stats.bamUtilCount), + TX_STATS(num_db), + TX_STATS(num_unexpected_db), + TX_STATS(num_bam_int_handled), + TX_STATS(num_bam_int_in_non_running_state), + TX_STATS(num_qmb_int_handled), + TX_STATS(num_bam_int_handled_while_wait_for_bam), + TX_STATS(num_bam_int_handled_while_not_in_bam)); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX max_outstanding_pkts=%u\n" + "RX num_pkts_processed=%u\n" + "RX rx_ring_rp_value=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_bam_int_handled=%u\n" + "RX num_db=%u\n" + "RX num_unexpected_db=%u\n" + "RX num_pkts_in_dis_uninit_state=%u\n" + "num_ic_inj_vdev_change=%u\n" + "num_ic_inj_fw_desc_change=%u\n", + RX_STATS(max_outstanding_pkts), + RX_STATS(num_pkts_processed), + RX_STATS(rx_ring_rp_value), + RX_STATS(rx_ind_ring_stats.ringFull), + RX_STATS(rx_ind_ring_stats.ringEmpty), + RX_STATS(rx_ind_ring_stats.ringUsageHigh), + RX_STATS(rx_ind_ring_stats.ringUsageLow), + RX_STATS(rx_ind_ring_stats.RingUtilCount), + RX_STATS(bam_stats.bamFifoFull), + RX_STATS(bam_stats.bamFifoEmpty), + RX_STATS(bam_stats.bamFifoUsageHigh), + RX_STATS(bam_stats.bamFifoUsageLow), + RX_STATS(bam_stats.bamUtilCount), + RX_STATS(num_bam_int_handled), + RX_STATS(num_db), + RX_STATS(num_unexpected_db), + RX_STATS(num_pkts_in_dis_uninit_state), + RX_STATS(num_bam_int_handled_while_not_in_bam), + RX_STATS(num_bam_int_handled_while_in_bam_state)); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read NTN stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct IpaHwStatsWDIInfoData_t stats; + int nbytes; + int cnt = 0; + + if (!ipa2_get_wdi_stats(&stats)) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX copy_engine_doorbell_value=%u\n" + "TX num_db_fired=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_unexpected_db=%u\n" + "TX num_bam_int_handled=%u\n" + "TX num_bam_int_in_non_running_state=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX num_bam_int_handled_while_wait_for_bam=%u\n", + stats.tx_ch_stats.num_pkts_processed, + stats.tx_ch_stats.copy_engine_doorbell_value, + stats.tx_ch_stats.num_db_fired, + stats.tx_ch_stats.tx_comp_ring_stats.ringFull, + stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty, + stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh, + stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow, + stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount, + stats.tx_ch_stats.bam_stats.bamFifoFull, + stats.tx_ch_stats.bam_stats.bamFifoEmpty, + stats.tx_ch_stats.bam_stats.bamFifoUsageHigh, + stats.tx_ch_stats.bam_stats.bamFifoUsageLow, + stats.tx_ch_stats.bam_stats.bamUtilCount, + stats.tx_ch_stats.num_db, + stats.tx_ch_stats.num_unexpected_db, + stats.tx_ch_stats.num_bam_int_handled, + stats.tx_ch_stats.num_bam_int_in_non_running_state, + stats.tx_ch_stats.num_qmb_int_handled, + stats.tx_ch_stats.num_bam_int_handled_while_wait_for_bam); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX max_outstanding_pkts=%u\n" + "RX num_pkts_processed=%u\n" + "RX rx_ring_rp_value=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_bam_int_handled=%u\n" + "RX num_db=%u\n" + "RX num_unexpected_db=%u\n" + "RX num_pkts_in_dis_uninit_state=%u\n" + "RX num_ic_inj_vdev_change=%u\n" + "RX num_ic_inj_fw_desc_change=%u\n" + "RX num_qmb_int_handled=%u\n" + "RX reserved1=%u\n" + "RX reserved2=%u\n", + stats.rx_ch_stats.max_outstanding_pkts, + stats.rx_ch_stats.num_pkts_processed, + stats.rx_ch_stats.rx_ring_rp_value, + stats.rx_ch_stats.rx_ind_ring_stats.ringFull, + stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow, + stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount, + stats.rx_ch_stats.bam_stats.bamFifoFull, + stats.rx_ch_stats.bam_stats.bamFifoEmpty, + stats.rx_ch_stats.bam_stats.bamFifoUsageHigh, + stats.rx_ch_stats.bam_stats.bamFifoUsageLow, + stats.rx_ch_stats.bam_stats.bamUtilCount, + stats.rx_ch_stats.num_bam_int_handled, + stats.rx_ch_stats.num_db, + stats.rx_ch_stats.num_unexpected_db, + stats.rx_ch_stats.num_pkts_in_dis_uninit_state, + stats.rx_ch_stats.num_ic_inj_vdev_change, + stats.rx_ch_stats.num_ic_inj_fw_desc_change, + stats.rx_ch_stats.num_qmb_int_handled, + stats.rx_ch_stats.reserved1, + stats.rx_ch_stats.reserved2); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read WDI stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +void _ipa_write_dbg_cnt_v1_1(int option) +{ + if (option == 1) + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0), + IPA_DBG_CNTR_ON); + else + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0), + IPA_DBG_CNTR_OFF); +} + +void _ipa_write_dbg_cnt_v2_0(int option) +{ + if (option == 1) + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0), + IPA_DBG_CNTR_ON); + else + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0), + IPA_DBG_CNTR_OFF); +} + +static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + u32 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtou32(dbg_buff, 0, &option)) + return -EFAULT; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_ctx->ctrl->ipa_write_dbg_cnt(option); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return count; +} + +int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len) +{ + int regval; + + regval = ipa_read_reg(ipa_ctx->mmio, + IPA_DEBUG_CNT_REG_N_OFST_v1_1(0)); + + return scnprintf(buf, max_len, + "IPA_DEBUG_CNT_REG_0=0x%x\n", regval); +} + +int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len) +{ + int regval; + + regval = ipa_read_reg(ipa_ctx->mmio, + IPA_DEBUG_CNT_REG_N_OFST_v2_0(0)); + + return scnprintf(buf, max_len, + "IPA_DEBUG_CNT_REG_0=0x%x\n", regval); +} + +static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + nbytes = ipa_ctx->ctrl->ipa_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_read_msg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int cnt = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(ipa_event_name); i++) { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "msg[%u:%27s] W:%u R:%u\n", i, + ipa_event_name[i], + ipa_ctx->stats.msg_w[i], + ipa_ctx->stats.msg_r[i]); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_nat4(struct file *file, + char __user *ubuf, size_t count, + loff_t *ppos) +{ + +#define ENTRY_U32_FIELDS 8 +#define NAT_ENTRY_ENABLE 0x8000 +#define NAT_ENTRY_RST_FIN_BIT 0x4000 +#define BASE_TABLE 0 +#define EXPANSION_TABLE 1 + + u32 *base_tbl, *indx_tbl; + u32 tbl_size, *tmp; + u32 value, i, j, rule_id; + u16 enable, tbl_entry, flag; + u32 no_entries = 0; + int nbytes = 0; + + mutex_lock(&ipa_ctx->nat_mem.lock); + value = ipa_ctx->nat_mem.public_ip_addr; + pr_err( + "Table IP Address:%d.%d.%d.%d\n", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + + pr_err("Table Size:%d\n", + ipa_ctx->nat_mem.size_base_tables); + + if (!ipa_ctx->nat_mem.size_expansion_tables) + pr_err("Expansion Table Size:%d\n", + ipa_ctx->nat_mem.size_expansion_tables); + else + pr_err("Expansion Table Size:%d\n", + ipa_ctx->nat_mem.size_expansion_tables-1); + + if (!ipa_ctx->nat_mem.is_sys_mem) + pr_err("Not supported for local(shared) memory\n"); + + /* Print Base tables */ + rule_id = 0; + for (j = 0; j < 2; j++) { + if (j == BASE_TABLE) { + tbl_size = ipa_ctx->nat_mem.size_base_tables; + base_tbl = (u32 *)ipa_ctx->nat_mem.ipv4_rules_addr; + + pr_err("\nBase Table:\n"); + } else { + if (!ipa_ctx->nat_mem.size_expansion_tables) + continue; + tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1; + base_tbl = + (u32 *)ipa_ctx->nat_mem.ipv4_expansion_rules_addr; + + pr_err("\nExpansion Base Table:\n"); + } + + if (base_tbl != NULL) { + for (i = 0; i <= tbl_size; i++, rule_id++) { + tmp = base_tbl; + value = tmp[4]; + enable = ((value & 0xFFFF0000) >> 16); + + if (enable & NAT_ENTRY_ENABLE) { + no_entries++; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Rule:%d ", rule_id); + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Private_IP:%d.%d.%d.%d ", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Target_IP:%d.%d.%d.%d ", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Next_Index:%d Public_Port:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Private_Port:%d Target_Port:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + flag = ((value & 0xFFFF0000) >> 16); + if (flag & NAT_ENTRY_RST_FIN_BIT) { + nbytes = scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "IP_CKSM_delta:0x%x Flags:%s ", + (value & 0x0000FFFF), + "Direct_To_A5"); + } else { + nbytes = scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "IP_CKSM_delta:0x%x Flags:%s ", + (value & 0x0000FFFF), + "Fwd_to_route"); + } + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Time_stamp:0x%x Proto:%d ", + (value & 0x00FFFFFF), + ((value & 0xFF000000) >> 24)); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Prev_Index:%d Indx_tbl_entry:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "TCP_UDP_cksum_delta:0x%x ", + ((value & 0xFFFF0000) >> 16)); + pr_err("%s\n", dbg_buff); + } + + base_tbl += ENTRY_U32_FIELDS; + + } + } + } + + /* Print Index tables */ + rule_id = 0; + for (j = 0; j < 2; j++) { + if (j == BASE_TABLE) { + tbl_size = ipa_ctx->nat_mem.size_base_tables; + indx_tbl = (u32 *)ipa_ctx->nat_mem.index_table_addr; + + pr_err("\nIndex Table:\n"); + } else { + if (!ipa_ctx->nat_mem.size_expansion_tables) + continue; + tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1; + indx_tbl = + (u32 *)ipa_ctx->nat_mem.index_table_expansion_addr; + + pr_err("\nExpansion Index Table:\n"); + } + + if (indx_tbl != NULL) { + for (i = 0; i <= tbl_size; i++, rule_id++) { + tmp = indx_tbl; + value = *tmp; + tbl_entry = (value & 0x0000FFFF); + + if (tbl_entry) { + value = *tmp; + pr_err( + "Rule:%d Table_Ent:%d Next_Index:%d\n", + rule_id, + tbl_entry, + ((value & 0xFFFF0000) >> 16)); + } + + indx_tbl++; + } + } + } + pr_err("Current No. Nat Entries: %d\n", no_entries); + mutex_unlock(&ipa_ctx->nat_mem.lock); + + return 0; +} + +static ssize_t ipa_rm_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int result, nbytes, cnt = 0; + + result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN); + if (result < 0) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Error in printing RM stat %d\n", result); + cnt += nbytes; + } else + cnt += result; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static void ipa_dump_status(struct ipa_hw_pkt_status *status) +{ + IPA_DUMP_STATUS_FIELD(status_opcode); + IPA_DUMP_STATUS_FIELD(exception); + IPA_DUMP_STATUS_FIELD(status_mask); + IPA_DUMP_STATUS_FIELD(pkt_len); + IPA_DUMP_STATUS_FIELD(endp_src_idx); + IPA_DUMP_STATUS_FIELD(endp_dest_idx); + IPA_DUMP_STATUS_FIELD(metadata); + + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5) { + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_local); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_global); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_pipe_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_match); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_rule_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.ret_hdr); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.tag_f_1); + } else { + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_local); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_global); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_pipe_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.ret_hdr); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_rule_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.tag_f_1); + } + + IPA_DUMP_STATUS_FIELD(tag_f_2); + IPA_DUMP_STATUS_FIELD(time_day_ctr); + IPA_DUMP_STATUS_FIELD(nat_hit); + IPA_DUMP_STATUS_FIELD(nat_tbl_idx); + IPA_DUMP_STATUS_FIELD(nat_type); + IPA_DUMP_STATUS_FIELD(route_local); + IPA_DUMP_STATUS_FIELD(route_tbl_idx); + IPA_DUMP_STATUS_FIELD(route_match); + IPA_DUMP_STATUS_FIELD(ucp); + IPA_DUMP_STATUS_FIELD(route_rule_idx); + IPA_DUMP_STATUS_FIELD(hdr_local); + IPA_DUMP_STATUS_FIELD(hdr_offset); + IPA_DUMP_STATUS_FIELD(frag_hit); + IPA_DUMP_STATUS_FIELD(frag_rule); +} + +static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ipa_status_stats *stats; + int i, j; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -EFAULT; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if (!ipa_ctx->ep[i].sys || !ipa_ctx->ep[i].sys->status_stat) + continue; + + memcpy(stats, ipa_ctx->ep[i].sys->status_stat, sizeof(*stats)); + stats->curr = (stats->curr + IPA_MAX_STATUS_STAT_NUM - 1) + % IPA_MAX_STATUS_STAT_NUM; + pr_err("Statuses for pipe %d\n", i); + for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) { + pr_err("curr=%d\n", stats->curr); + ipa_dump_status(&stats->status[stats->curr]); + pr_err("\n\n\n"); + stats->curr = (stats->curr + 1) % + IPA_MAX_STATUS_STAT_NUM; + } + } + + kfree(stats); + return 0; +} + +static ssize_t ipa2_print_active_clients_log(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int cnt; + int table_size; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE); + ipa_active_clients_lock(); + cnt = ipa2_active_clients_log_print_buffer(active_clients_buf, + IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE - IPA_MAX_MSG_LEN); + table_size = ipa2_active_clients_log_print_table(active_clients_buf + + cnt, IPA_MAX_MSG_LEN); + ipa_active_clients_unlock(); + + return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf, + cnt + table_size); +} + +static ssize_t ipa2_clear_active_clients_log(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + ipa2_active_clients_log_clear(); + + return count; +} + +static ssize_t ipa_read_rx_polling_timeout(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int min_cnt; + int max_cnt; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE); + min_cnt = scnprintf(active_clients_buf, + IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + "Rx Min Poll count = %u\n", + ipa_ctx->ipa_rx_min_timeout_usec); + + max_cnt = scnprintf(active_clients_buf + min_cnt, + IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + "Rx Max Poll count = %u\n", + ipa_ctx->ipa_rx_max_timeout_usec); + + return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf, + min_cnt + max_cnt); +} + +static ssize_t ipa_write_rx_polling_timeout(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + s8 polltime = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + if (copy_from_user(dbg_buff, ubuf, count)) + return -EFAULT; + + dbg_buff[count] = '\0'; + + if (kstrtos8(dbg_buff, 0, &polltime)) + return -EFAULT; + + ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec, + &ipa_ctx->ipa_rx_max_timeout_usec, polltime); + return count; +} + +static ssize_t ipa_read_polling_iteration(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int cnt; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE); + + cnt = scnprintf(active_clients_buf, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + "Polling Iteration count = %u\n", + ipa_ctx->ipa_polling_iteration); + + return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf, + cnt); +} + +static ssize_t ipa_write_polling_iteration(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + s8 iteration_cnt = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + if (copy_from_user(dbg_buff, ubuf, count)) + return -EFAULT; + + dbg_buff[count] = '\0'; + + if (kstrtos8(dbg_buff, 0, &iteration_cnt)) + return -EFAULT; + + if ((iteration_cnt >= MIN_POLLING_ITERATION) && + (iteration_cnt <= MAX_POLLING_ITERATION)) + ipa_ctx->ipa_polling_iteration = iteration_cnt; + else + ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION; + + return count; +} + +static ssize_t ipa_enable_ipc_low(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + mutex_lock(&ipa_ctx->lock); + if (option) { + if (!ipa_ipc_low_buff) { + ipa_ipc_low_buff = + ipc_log_context_create(IPA_IPC_LOG_PAGES, + "ipa_low", 0); + if (ipa_ipc_low_buff == NULL) + IPADBG("failed to get logbuf_low\n"); + } + ipa_ctx->logbuf_low = ipa_ipc_low_buff; + } else { + ipa_ctx->logbuf_low = NULL; + } + mutex_unlock(&ipa_ctx->lock); + + return count; +} + +static const struct file_operations ipa_gen_reg_ops = { + .read = ipa_read_gen_reg, +}; + +static const struct file_operations ipa_ep_reg_ops = { + .read = ipa_read_ep_reg, + .write = ipa_write_ep_reg, +}; + +static const struct file_operations ipa_keep_awake_ops = { + .read = ipa_read_keep_awake, + .write = ipa_write_keep_awake, +}; + +static const struct file_operations ipa_ep_holb_ops = { + .write = ipa_write_ep_holb, +}; + +static const struct file_operations ipa_hdr_ops = { + .read = ipa_read_hdr, +}; + +static const struct file_operations ipa_rt_ops = { + .read = ipa_read_rt, + .open = simple_open, +}; + +static const struct file_operations ipa_proc_ctx_ops = { + .read = ipa_read_proc_ctx, +}; + +static const struct file_operations ipa_flt_ops = { + .read = ipa_read_flt, + .open = simple_open, +}; + +static const struct file_operations ipa_stats_ops = { + .read = ipa_read_stats, +}; + +static const struct file_operations ipa_wstats_ops = { + .read = ipa_read_wstats, +}; + +static const struct file_operations ipa_wdi_ops = { + .read = ipa_read_wdi, +}; + +static const struct file_operations ipa_ntn_ops = { + .read = ipa_read_ntn, +}; + +static const struct file_operations ipa_msg_ops = { + .read = ipa_read_msg, +}; + +static const struct file_operations ipa_dbg_cnt_ops = { + .read = ipa_read_dbg_cnt, + .write = ipa_write_dbg_cnt, +}; + +static const struct file_operations ipa_nat4_ops = { + .read = ipa_read_nat4, +}; + +static const struct file_operations ipa_rm_stats = { + .read = ipa_rm_read_stats, +}; + +static const struct file_operations ipa_status_stats_ops = { + .read = ipa_status_stats_read, +}; + +static const struct file_operations ipa2_active_clients = { + .read = ipa2_print_active_clients_log, + .write = ipa2_clear_active_clients_log, +}; + +static const struct file_operations ipa_ipc_low_ops = { + .write = ipa_enable_ipc_low, +}; + +static const struct file_operations ipa_rx_poll_time_ops = { + .read = ipa_read_rx_polling_timeout, + .write = ipa_write_rx_polling_timeout, +}; + +static const struct file_operations ipa_poll_iteration_ops = { + .read = ipa_read_polling_iteration, + .write = ipa_write_polling_iteration, +}; + +void ipa_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0664; + const mode_t write_only_mode = 0220; + struct dentry *file; + + dent = debugfs_create_dir("ipa", NULL); + if (IS_ERR(dent)) { + IPAERR("fail to create folder in debug_fs.\n"); + return; + } + + file = debugfs_create_u32("hw_type", read_only_mode, + dent, &ipa_ctx->ipa_hw_type); + if (!file) { + IPAERR("could not create hw_type file\n"); + goto fail; + } + + + dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, + NULL, &ipa_gen_reg_ops); + if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) { + IPAERR("fail to create file for debug_fs gen_reg\n"); + goto fail; + } + + dfile_active_clients = debugfs_create_file("active_clients", + read_write_mode, dent, NULL, &ipa2_active_clients); + if (!dfile_active_clients || IS_ERR(dfile_active_clients)) { + IPAERR("fail to create file for debug_fs active_clients\n"); + goto fail; + } + + active_clients_buf = NULL; + active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + GFP_KERNEL); + if (active_clients_buf == NULL) + IPAERR("fail to allocate active clients memory buffer"); + + dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, + NULL, &ipa_ep_reg_ops); + if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) { + IPAERR("fail to create file for debug_fs ep_reg\n"); + goto fail; + } + + dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode, + dent, NULL, &ipa_keep_awake_ops); + if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) { + IPAERR("fail to create file for debug_fs dfile_keep_awake\n"); + goto fail; + } + + dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent, + NULL, &ipa_ep_holb_ops); + if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) { + IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n"); + goto fail; + } + + dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, NULL, + &ipa_hdr_ops); + if (!dfile_hdr || IS_ERR(dfile_hdr)) { + IPAERR("fail to create file for debug_fs hdr\n"); + goto fail; + } + + dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent, + NULL, &ipa_proc_ctx_ops); + if (!dfile_hdr || IS_ERR(dfile_hdr)) { + IPAERR("fail to create file for debug_fs proc_ctx\n"); + goto fail; + } + + dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa_rt_ops); + if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) { + IPAERR("fail to create file for debug_fs ip4 rt\n"); + goto fail; + } + + dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa_rt_ops); + if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) { + IPAERR("fail to create file for debug_fs ip6:w rt\n"); + goto fail; + } + + dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa_flt_ops); + if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) { + IPAERR("fail to create file for debug_fs ip4 flt\n"); + goto fail; + } + + dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa_flt_ops); + if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) { + IPAERR("fail to create file for debug_fs ip6 flt\n"); + goto fail; + } + + dfile_stats = debugfs_create_file("stats", read_only_mode, dent, NULL, + &ipa_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + IPAERR("fail to create file for debug_fs stats\n"); + goto fail; + } + + dfile_wstats = debugfs_create_file("wstats", read_only_mode, + dent, NULL, &ipa_wstats_ops); + if (!dfile_wstats || IS_ERR(dfile_wstats)) { + IPAERR("fail to create file for debug_fs wstats\n"); + goto fail; + } + + dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, + NULL, &ipa_wdi_ops); + if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) { + IPAERR("fail to create file for debug_fs wdi stats\n"); + goto fail; + } + + dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, + NULL, &ipa_ntn_ops); + if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) { + IPAERR("fail to create file for debug_fs ntn stats\n"); + goto fail; + } + + dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, + NULL, &ipa_dbg_cnt_ops); + if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) { + IPAERR("fail to create file for debug_fs dbg_cnt\n"); + goto fail; + } + + dfile_msg = debugfs_create_file("msg", read_only_mode, dent, NULL, + &ipa_msg_ops); + if (!dfile_msg || IS_ERR(dfile_msg)) { + IPAERR("fail to create file for debug_fs msg\n"); + goto fail; + } + + dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent, + NULL, &ipa_nat4_ops); + if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) { + IPAERR("fail to create file for debug_fs ip4 nat\n"); + goto fail; + } + + dfile_rm_stats = debugfs_create_file("rm_stats", + read_only_mode, dent, NULL, &ipa_rm_stats); + if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) { + IPAERR("fail to create file for debug_fs rm_stats\n"); + goto fail; + } + + dfile_status_stats = debugfs_create_file("status_stats", + read_only_mode, dent, NULL, &ipa_status_stats_ops); + if (!dfile_status_stats || IS_ERR(dfile_status_stats)) { + IPAERR("fail to create file for debug_fs status_stats\n"); + goto fail; + } + + dfile_ipa_rx_poll_timeout = debugfs_create_file("ipa_rx_poll_time", + read_write_mode, dent, NULL, &ipa_rx_poll_time_ops); + if (!dfile_ipa_rx_poll_timeout || IS_ERR(dfile_ipa_rx_poll_timeout)) { + IPAERR("fail to create file for debug_fs rx poll timeout\n"); + goto fail; + } + + dfile_ipa_poll_iteration = debugfs_create_file("ipa_poll_iteration", + read_write_mode, dent, NULL, &ipa_poll_iteration_ops); + if (!dfile_ipa_poll_iteration || IS_ERR(dfile_ipa_poll_iteration)) { + IPAERR("fail to create file for debug_fs poll iteration\n"); + goto fail; + } + + file = debugfs_create_u32("enable_clock_scaling", read_write_mode, + dent, &ipa_ctx->enable_clock_scaling); + if (!file) { + IPAERR("could not create enable_clock_scaling file\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps", + read_write_mode, dent, + &ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal); + if (!file) { + IPAERR("could not create bw_threshold_nominal_mbps\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps", + read_write_mode, dent, + &ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo); + if (!file) { + IPAERR("could not create bw_threshold_turbo_mbps\n"); + goto fail; + } + + file = debugfs_create_file("enable_low_prio_print", write_only_mode, + dent, NULL, &ipa_ipc_low_ops); + if (!file) { + IPAERR("could not create enable_low_prio_print file\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(dent); +} + +void ipa_debugfs_remove(void) +{ + if (IS_ERR(dent)) { + IPAERR("Debugfs: folder was not created.\n"); + return; + } + if (active_clients_buf != NULL) { + kfree(active_clients_buf); + active_clients_buf = NULL; + } + debugfs_remove_recursive(dent); +} + +#else /* !CONFIG_DEBUG_FS */ +void ipa_debugfs_init(void) {} +void ipa_debugfs_remove(void) {} +int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len) +{ + return 0; +} +int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe) +{ + return 0; +} +int _ipa_read_gen_reg_v1_1(char *buff, int max_len) +{ + return 0; +} +void _ipa_write_dbg_cnt_v1_1(int option) {} +int _ipa_read_gen_reg_v2_0(char *buff, int max_len) +{ + return 0; +} +int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe) +{ + return 0; +} +void _ipa_write_dbg_cnt_v2_0(int option) {} +int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len) +{ + return 0; +} +#endif diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c new file mode 100644 index 0000000000000000000000000000000000000000..0313b19b41416b2d84fc9af201ef439f1e859510 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c @@ -0,0 +1,902 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010 +#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050 +#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8 +#define IPA_DMA_MAX_PKT_SZ 0xFFFF +#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \ + sizeof(struct sps_iovec) - 1) +#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \ + sizeof(struct sps_iovec) - 1) + +#define IPADMA_DRV_NAME "ipa_dma" + +#define IPADMA_DBG(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_ERR(fmt, args...) \ + do { \ + pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_FUNC_ENTRY() \ + IPADMA_DBG_LOW("ENTRY\n") + +#define IPADMA_FUNC_EXIT() \ + IPADMA_DBG_LOW("EXIT\n") + + +#ifdef CONFIG_DEBUG_FS +#define IPADMA_MAX_MSG_LEN 1024 +static char dbg_buff[IPADMA_MAX_MSG_LEN]; +static void ipa_dma_debugfs_init(void); +static void ipa_dma_debugfs_destroy(void); +#else +static void ipa_dma_debugfs_init(void) {} +static void ipa_dma_debugfs_destroy(void) {} +#endif + +/** + * struct ipa_dma_xfer_wrapper - IPADMA transfer descr wrapper + * @phys_addr_src: physical address of the source data to copy + * @phys_addr_dest: physical address to store the copied data + * @len: len in bytes to copy + * @link: linked to the wrappers list on the proper(sync/async) cons pipe + * @xfer_done: completion object for sync_memcpy completion + * @callback: IPADMA client provided completion callback + * @user1: cookie1 for above callback + * + * This struct can wrap both sync and async memcpy transfers descriptors. + */ +struct ipa_dma_xfer_wrapper { + u64 phys_addr_src; + u64 phys_addr_dest; + u16 len; + struct list_head link; + struct completion xfer_done; + void (*callback)(void *user1); + void *user1; +}; + +/** + * struct ipa_dma_ctx -IPADMA driver context information + * @is_enabled:is ipa_dma enabled? + * @destroy_pending: destroy ipa_dma after handling all pending memcpy + * @ipa_dma_xfer_wrapper_cache: cache of ipa_dma_xfer_wrapper structs + * @sync_lock: lock for synchronisation in sync_memcpy + * @async_lock: lock for synchronisation in async_memcpy + * @enable_lock: lock for is_enabled + * @pending_lock: lock for synchronize is_enable and pending_cnt + * @done: no pending works-ipadma can be destroyed + * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer + * @ipa_dma_async_prod_hdl:handle of async memcpy producer + * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer + * @sync_memcpy_pending_cnt: number of pending sync memcopy operations + * @async_memcpy_pending_cnt: number of pending async memcopy operations + * @uc_memcpy_pending_cnt: number of pending uc memcopy operations + * @total_sync_memcpy: total number of sync memcpy (statistics) + * @total_async_memcpy: total number of async memcpy (statistics) + * @total_uc_memcpy: total number of uc memcpy (statistics) + */ +struct ipa_dma_ctx { + bool is_enabled; + bool destroy_pending; + struct kmem_cache *ipa_dma_xfer_wrapper_cache; + struct mutex sync_lock; + spinlock_t async_lock; + struct mutex enable_lock; + spinlock_t pending_lock; + struct completion done; + u32 ipa_dma_sync_prod_hdl; + u32 ipa_dma_async_prod_hdl; + u32 ipa_dma_sync_cons_hdl; + u32 ipa_dma_async_cons_hdl; + atomic_t sync_memcpy_pending_cnt; + atomic_t async_memcpy_pending_cnt; + atomic_t uc_memcpy_pending_cnt; + atomic_t total_sync_memcpy; + atomic_t total_async_memcpy; + atomic_t total_uc_memcpy; +}; +static struct ipa_dma_ctx *ipa_dma_ctx; + +/** + * ipa2_dma_init() -Initialize IPADMA. + * + * This function initialize all IPADMA internal data and connect in dma: + * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS + * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS + * + * Return codes: 0: success + * -EFAULT: IPADMA is already initialized + * -ENOMEM: allocating memory error + * -EPERM: pipe connection failed + */ +int ipa2_dma_init(void) +{ + struct ipa_dma_ctx *ipa_dma_ctx_t; + struct ipa_sys_connect_params sys_in; + int res = 0; + + IPADMA_FUNC_ENTRY(); + + if (ipa_dma_ctx) { + IPADMA_ERR("Already initialized.\n"); + return -EFAULT; + } + ipa_dma_ctx_t = kzalloc(sizeof(*(ipa_dma_ctx)), GFP_KERNEL); + + if (!ipa_dma_ctx_t) { + IPADMA_ERR("kzalloc error.\n"); + return -ENOMEM; + } + + ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache = + kmem_cache_create("IPA_DMA_XFER_WRAPPER", + sizeof(struct ipa_dma_xfer_wrapper), 0, 0, NULL); + if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) { + IPAERR(":failed to create ipa dma xfer wrapper cache.\n"); + res = -ENOMEM; + goto fail_mem_ctrl; + } + + mutex_init(&ipa_dma_ctx_t->enable_lock); + spin_lock_init(&ipa_dma_ctx_t->async_lock); + mutex_init(&ipa_dma_ctx_t->sync_lock); + spin_lock_init(&ipa_dma_ctx_t->pending_lock); + init_completion(&ipa_dma_ctx_t->done); + ipa_dma_ctx_t->is_enabled = false; + ipa_dma_ctx_t->destroy_pending = false; + atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0); + + /* IPADMA SYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.skip_ep_cfg = false; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) { + IPADMA_ERR(":setup sync prod pipe failed\n"); + res = -EPERM; + goto fail_sync_prod; + } + + /* IPADMA SYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = NULL; + sys_in.priv = NULL; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) { + IPADMA_ERR(":setup sync cons pipe failed.\n"); + res = -EPERM; + goto fail_sync_cons; + } + + IPADMA_DBG("SYNC MEMCPY pipes are connected\n"); + + /* IPADMA ASYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD; + sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.skip_ep_cfg = false; + sys_in.notify = NULL; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) { + IPADMA_ERR(":setup async prod pipe failed.\n"); + res = -EPERM; + goto fail_async_prod; + } + + /* IPADMA ASYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = ipa_dma_async_memcpy_notify_cb; + sys_in.priv = NULL; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) { + IPADMA_ERR(":setup async cons pipe failed.\n"); + res = -EPERM; + goto fail_async_cons; + } + ipa_dma_debugfs_init(); + ipa_dma_ctx = ipa_dma_ctx_t; + IPADMA_DBG("ASYNC MEMCPY pipes are connected\n"); + + IPADMA_FUNC_EXIT(); + return res; +fail_async_cons: + ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl); +fail_async_prod: + ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl); +fail_sync_cons: + ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl); +fail_sync_prod: + kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache); +fail_mem_ctrl: + kfree(ipa_dma_ctx_t); + ipa_dma_ctx = NULL; + return res; + +} + + +/** + * ipa2_dma_enable() -Vote for IPA clocks. + * + *Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * enabled + */ +int ipa2_dma_enable(void) +{ + IPADMA_FUNC_ENTRY(); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't enable\n"); + return -EPERM; + } + mutex_lock(&ipa_dma_ctx->enable_lock); + if (ipa_dma_ctx->is_enabled) { + IPADMA_ERR("Already enabled.\n"); + mutex_unlock(&ipa_dma_ctx->enable_lock); + return -EPERM; + } + IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA"); + ipa_dma_ctx->is_enabled = true; + mutex_unlock(&ipa_dma_ctx->enable_lock); + + IPADMA_FUNC_EXIT(); + return 0; +} + +static bool ipa_dma_work_pending(void) +{ + if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)) { + IPADMA_DBG("pending sync\n"); + return true; + } + if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) { + IPADMA_DBG("pending async\n"); + return true; + } + if (atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt)) { + IPADMA_DBG("pending uc\n"); + return true; + } + IPADMA_DBG_LOW("no pending work\n"); + return false; +} + +/** + * ipa2_dma_disable()- Unvote for IPA clocks. + * + * enter to power save mode. + * + * Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * diabled + * -EFAULT: can not disable ipa_dma as there are pending + * memcopy works + */ +int ipa2_dma_disable(void) +{ + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't disable\n"); + return -EPERM; + } + mutex_lock(&ipa_dma_ctx->enable_lock); + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("Already disabled.\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + mutex_unlock(&ipa_dma_ctx->enable_lock); + return -EPERM; + } + if (ipa_dma_work_pending()) { + IPADMA_ERR("There is pending work, can't disable.\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + mutex_unlock(&ipa_dma_ctx->enable_lock); + return -EFAULT; + } + ipa_dma_ctx->is_enabled = false; + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA"); + mutex_unlock(&ipa_dma_ctx->enable_lock); + IPADMA_FUNC_EXIT(); + return 0; +} + +/** + * ipa2_dma_sync_memcpy()- Perform synchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: other + */ +int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len) +{ + int ep_idx; + int res; + int i = 0; + struct ipa_sys_context *cons_sys; + struct ipa_sys_context *prod_sys; + struct sps_iovec iov; + struct ipa_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa_dma_xfer_wrapper *head_descr = NULL; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + if (((u32)src != src) || ((u32)dest != dest)) { + IPADMA_ERR("Bad addr - only 32b addr supported for BAM"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa_dma_ctx->sync_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) >= + IPA_DMA_MAX_PENDING_SYNC) { + atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); + IPADMA_ERR("Reached pending requests limit\n"); + return -EFAULT; + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + return -EFAULT; + } + cons_sys = ipa_ctx->ep[ep_idx].sys; + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + IPADMA_ERR("failed to alloc xfer descr wrapper\n"); + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + init_completion(&xfer_descr->xfer_done); + + mutex_lock(&ipa_dma_ctx->sync_lock); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, NULL, 0); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); + goto fail_sps_send; + } + res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, + NULL, SPS_IOVEC_FLAG_EOT); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); + ipa_assert(); + } + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + + /* in case we are not the head of the list, wait for head to wake us */ + if (xfer_descr != head_descr) { + mutex_unlock(&ipa_dma_ctx->sync_lock); + wait_for_completion(&xfer_descr->xfer_done); + mutex_lock(&ipa_dma_ctx->sync_lock); + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + ipa_assert_on(xfer_descr != head_descr); + } + mutex_unlock(&ipa_dma_ctx->sync_lock); + + do { + /* wait for transfer to complete */ + res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov); + if (res) + IPADMA_ERR("Failed: get_iovec, returned %d loop#:%d\n" + , res, i); + + usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX, + IPA_DMA_POLLING_MAX_SLEEP_RX); + i++; + } while (iov.addr == 0); + + mutex_lock(&ipa_dma_ctx->sync_lock); + list_del(&head_descr->link); + cons_sys->len--; + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); + /* wake the head of the list */ + if (!list_empty(&cons_sys->head_desc_list)) { + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + complete(&head_descr->xfer_done); + } + mutex_unlock(&ipa_dma_ctx->sync_lock); + + ipa_assert_on(dest != iov.addr); + ipa_assert_on(len != iov.size); + atomic_inc(&ipa_dma_ctx->total_sync_memcpy); + atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + + IPADMA_FUNC_EXIT(); + return res; + +fail_sps_send: + list_del(&xfer_descr->link); + cons_sys->len--; + mutex_unlock(&ipa_dma_ctx->sync_lock); + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + return res; +} + +/** + * ipa2_dma_async_memcpy()- Perform asynchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * @user_cb: callback function to notify the client when the copy was done. + * @user_param: cookie for user_cb. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: descr fifo is full. + */ +int ipa2_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param) +{ + int ep_idx; + int res = 0; + struct ipa_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa_sys_context *prod_sys; + struct ipa_sys_context *cons_sys; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + if (((u32)src != src) || ((u32)dest != dest)) { + IPADMA_ERR("Bad addr - only 32b addr supported for BAM"); + return -EINVAL; + } + if (!user_cb) { + IPADMA_ERR("null pointer: user_cb\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa_dma_ctx->async_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) >= + IPA_DMA_MAX_PENDING_ASYNC) { + atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); + IPADMA_ERR("Reached pending requests limit\n"); + return -EFAULT; + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + return -EFAULT; + } + cons_sys = ipa_ctx->ep[ep_idx].sys; + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + IPADMA_ERR("failed to alloc xfrer descr wrapper\n"); + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + xfer_descr->callback = user_cb; + xfer_descr->user1 = user_param; + + spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, xfer_descr, 0); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); + goto fail_sps_send; + } + res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, + NULL, SPS_IOVEC_FLAG_EOT); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); + ipa_assert(); + goto fail_sps_send; + } + spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); + IPADMA_FUNC_EXIT(); + return res; + +fail_sps_send: + list_del(&xfer_descr->link); + spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + return res; +} + +/** + * ipa2_dma_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -EBADF: IPA uC is not loaded + */ +int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa_dma_ctx->uc_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + + res = ipa_uc_memcpy(dest, src, len); + if (res) { + IPADMA_ERR("ipa_uc_memcpy failed %d\n", res); + goto dec_and_exit; + } + + atomic_inc(&ipa_dma_ctx->total_uc_memcpy); + res = 0; +dec_and_exit: + atomic_dec(&ipa_dma_ctx->uc_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + IPADMA_FUNC_EXIT(); + return res; +} + +/** + * ipa2_dma_destroy() -teardown IPADMA pipes and release ipadma. + * + * this is a blocking function, returns just after destroying IPADMA. + */ +void ipa2_dma_destroy(void) +{ + int res = 0; + + IPADMA_FUNC_ENTRY(); + if (!ipa_dma_ctx) { + IPADMA_ERR("IPADMA isn't initialized\n"); + return; + } + + if (ipa_dma_work_pending()) { + ipa_dma_ctx->destroy_pending = true; + IPADMA_DBG("There are pending memcpy, wait for completion\n"); + wait_for_completion(&ipa_dma_ctx->done); + } + + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n"); + ipa_dma_ctx->ipa_dma_async_cons_hdl = 0; + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC CONS failed\n"); + ipa_dma_ctx->ipa_dma_sync_cons_hdl = 0; + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n"); + ipa_dma_ctx->ipa_dma_async_prod_hdl = 0; + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC PROD failed\n"); + ipa_dma_ctx->ipa_dma_sync_prod_hdl = 0; + + ipa_dma_debugfs_destroy(); + kmem_cache_destroy(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache); + kfree(ipa_dma_ctx); + ipa_dma_ctx = NULL; + + IPADMA_FUNC_EXIT(); +} + +/** + * ipa_dma_async_memcpy_notify_cb() -Callback function which will be called by + * IPA driver after getting notify from SPS driver or poll mode on Rx operation + * is completed (data was written to dest descriptor on async_cons ep). + * + * @priv -not in use. + * @evt - event name - IPA_RECIVE. + * @data -the iovec. + */ +void ipa_dma_async_memcpy_notify_cb(void *priv + , enum ipa_dp_evt_type evt, unsigned long data) +{ + int ep_idx = 0; + struct sps_iovec *iov = (struct sps_iovec *) data; + struct ipa_dma_xfer_wrapper *xfer_descr_expected; + struct ipa_sys_context *sys; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + sys = ipa_ctx->ep[ep_idx].sys; + + spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); + xfer_descr_expected = list_first_entry(&sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + list_del(&xfer_descr_expected->link); + sys->len--; + spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); + + ipa_assert_on(xfer_descr_expected->phys_addr_dest != iov->addr); + ipa_assert_on(xfer_descr_expected->len != iov->size); + + atomic_inc(&ipa_dma_ctx->total_async_memcpy); + atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); + xfer_descr_expected->callback(xfer_descr_expected->user1); + + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, + xfer_descr_expected); + + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + + IPADMA_FUNC_EXIT(); +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dent; +static struct dentry *dfile_info; + +static ssize_t ipa_dma_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + + if (!ipa_dma_ctx) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Not initialized\n"); + } else { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Status:\n IPADMA is %s\n", + (ipa_dma_ctx->is_enabled) ? "Enabled" : "Disabled"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Statistics:\n total sync memcpy: %d\n ", + atomic_read(&ipa_dma_ctx->total_sync_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "total async memcpy: %d\n ", + atomic_read(&ipa_dma_ctx->total_async_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending sync memcpy jobs: %d\n ", + atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending async memcpy jobs: %d\n", + atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending uc memcpy jobs: %d\n", + atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt)); + } + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + s8 in_num = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &in_num)) + return -EFAULT; + switch (in_num) { + case 0: + if (ipa_dma_work_pending()) + IPADMA_ERR("Note, there are pending memcpy\n"); + + atomic_set(&ipa_dma_ctx->total_async_memcpy, 0); + atomic_set(&ipa_dma_ctx->total_sync_memcpy, 0); + break; + default: + IPADMA_ERR("invalid argument: To reset statistics echo 0\n"); + break; + } + return count; +} + +static const struct file_operations ipadma_stats_ops = { + .read = ipa_dma_debugfs_read, + .write = ipa_dma_debugfs_reset_statistics, +}; + +static void ipa_dma_debugfs_init(void) +{ + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("ipa_dma", NULL); + if (IS_ERR(dent)) { + IPADMA_ERR("fail to create folder ipa_dma\n"); + return; + } + + dfile_info = + debugfs_create_file("info", read_write_mode, dent, + NULL, &ipadma_stats_ops); + if (!dfile_info || IS_ERR(dfile_info)) { + IPADMA_ERR("fail to create file stats\n"); + goto fail; + } + return; +fail: + debugfs_remove_recursive(dent); +} + +static void ipa_dma_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +#endif /* !CONFIG_DEBUG_FS */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..b3f66161f263be919b48648d27e318637d8c1694 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -0,0 +1,3702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_trace.h" + +#define IPA_WAN_AGGR_PKT_CNT 5 +#define IPA_LAST_DESC_CNT 0xFFFF +#define POLLING_INACTIVITY_RX 40 +#define POLLING_INACTIVITY_TX 40 +#define POLLING_MIN_SLEEP_TX 400 +#define POLLING_MAX_SLEEP_TX 500 +/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_MTU 1500 +#define IPA_GENERIC_AGGR_BYTE_LIMIT 6 +#define IPA_GENERIC_AGGR_TIME_LIMIT 1 +#define IPA_GENERIC_AGGR_PKT_LIMIT 0 + +#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 +#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ + (X) + NET_SKB_PAD) +\ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\ + (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X))) +#define IPA_GENERIC_RX_BUFF_LIMIT (\ + IPA_REAL_GENERIC_RX_BUFF_SZ(\ + IPA_GENERIC_RX_BUFF_BASE_SZ) -\ + IPA_GENERIC_RX_BUFF_BASE_SZ) + +#define IPA_RX_BUFF_CLIENT_HEADROOM 256 + +/* less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000) + +#define IPA_WLAN_RX_POOL_SZ 100 +#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5 +#define IPA_WLAN_RX_BUFF_SZ 2048 +#define IPA_WLAN_COMM_RX_POOL_LOW 100 +#define IPA_WLAN_COMM_RX_POOL_HIGH 900 + +#define IPA_ODU_RX_BUFF_SZ 2048 +#define IPA_ODU_RX_POOL_SZ 32 +#define IPA_SIZE_DL_CSUM_META_TRAILER 8 + +#define IPA_HEADROOM 128 + +static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags); +static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys); +static void ipa_replenish_rx_cache(struct ipa_sys_context *sys); +static void replenish_rx_work_func(struct work_struct *work); +static void ipa_wq_handle_rx(struct work_struct *work); +static void ipa_wq_handle_tx(struct work_struct *work); +static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size); +static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, + u32 size); +static int ipa_assign_policy(struct ipa_sys_connect_params *in, + struct ipa_sys_context *sys); +static void ipa_cleanup_rx(struct ipa_sys_context *sys); +static void ipa_wq_rx_avail(struct work_struct *work); +static void ipa_alloc_wlan_rx_common_cache(u32 size); +static void ipa_cleanup_wlan_rx_common_cache(void); +static void ipa_wq_repl_rx(struct work_struct *work); +static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys, + struct sps_iovec *iovec); + +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit); +static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys); + +static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt) +{ + struct ipa_tx_pkt_wrapper *tx_pkt_expected; + int i; + + for (i = 0; i < cnt; i++) { + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + spin_unlock_bh(&sys->spinlock); + return; + } + tx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_tx_pkt_wrapper, + link); + list_del(&tx_pkt_expected->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + if (!tx_pkt_expected->no_unmap_dma) { + if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa_ctx->pdev, + tx_pkt_expected->mem.phys_base, + tx_pkt_expected->mem.size, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa_ctx->pdev, + tx_pkt_expected->mem.phys_base, + tx_pkt_expected->mem.size, + DMA_TO_DEVICE); + } + } + if (tx_pkt_expected->callback) + tx_pkt_expected->callback(tx_pkt_expected->user1, + tx_pkt_expected->user2); + if (tx_pkt_expected->cnt > 1 && + tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) { + if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) { + dma_pool_free(ipa_ctx->dma_pool, + tx_pkt_expected->mult.base, + tx_pkt_expected->mult.phys_base); + } else { + dma_unmap_single(ipa_ctx->pdev, + tx_pkt_expected->mult.phys_base, + tx_pkt_expected->mult.size, + DMA_TO_DEVICE); + kfree(tx_pkt_expected->mult.base); + } + } + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected); + } +} + +static void ipa_wq_write_done_status(int src_pipe) +{ + struct ipa_tx_pkt_wrapper *tx_pkt_expected; + struct ipa_sys_context *sys; + u32 cnt; + + WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes); + + if (!ipa_ctx->ep[src_pipe].status.status_en) + return; + + sys = ipa_ctx->ep[src_pipe].sys; + if (!sys) + return; + + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + spin_unlock_bh(&sys->spinlock); + return; + } + tx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_tx_pkt_wrapper, + link); + cnt = tx_pkt_expected->cnt; + spin_unlock_bh(&sys->spinlock); + ipa_wq_write_done_common(sys, cnt); +} + +/** + * ipa_write_done() - this function will be (eventually) called when a Tx + * operation is complete + * * @work: work_struct used by the work queue + * + * Will be called in deferred context. + * - invoke the callback supplied by the client who sent this command + * - iterate over all packets and validate that + * the order for sent packet is the same as expected + * - delete all the tx packet descriptors from the system + * pipe context (not needed anymore) + * - return the tx buffer back to dma_pool + */ +static void ipa_wq_write_done(struct work_struct *work) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + u32 cnt; + struct ipa_sys_context *sys; + + tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work); + cnt = tx_pkt->cnt; + sys = tx_pkt->sys; + + ipa_wq_write_done_common(sys, cnt); +} + +static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all, + bool in_poll_state) +{ + struct sps_iovec iov; + int ret; + int cnt = 0; + + while ((in_poll_state ? atomic_read(&sys->curr_polling_state) : + !atomic_read(&sys->curr_polling_state))) { + if (cnt && !process_all) + break; + ret = sps_get_iovec(sys->ep->ep_hdl, &iov); + if (ret) { + IPAERR("sps_get_iovec failed %d\n", ret); + break; + } + + if (iov.addr == 0) + break; + + ipa_wq_write_done_common(sys, 1); + cnt++; + } + + return cnt; +} + +/** + * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode + */ +static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys) +{ + int ret; + + if (!atomic_read(&sys->curr_polling_state)) { + IPAERR("already in intr mode\n"); + goto fail; + } + + ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + goto fail; + } + sys->event.options = SPS_O_EOT; + ret = sps_register_event(sys->ep->ep_hdl, &sys->event); + if (ret) { + IPAERR("sps_register_event() failed %d\n", ret); + goto fail; + } + sys->ep->connect.options = + SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; + ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + goto fail; + } + atomic_set(&sys->curr_polling_state, 0); + ipa_handle_tx_core(sys, true, false); + return; + +fail: + queue_delayed_work(sys->wq, &sys->switch_to_intr_work, + msecs_to_jiffies(1)); +} + +static void ipa_handle_tx(struct ipa_sys_context *sys) +{ + int inactive_cycles = 0; + int cnt; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + do { + cnt = ipa_handle_tx_core(sys, true, true); + if (cnt == 0) { + inactive_cycles++; + usleep_range(POLLING_MIN_SLEEP_TX, + POLLING_MAX_SLEEP_TX); + } else { + inactive_cycles = 0; + } + } while (inactive_cycles <= POLLING_INACTIVITY_TX); + + ipa_tx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +static void ipa_wq_handle_tx(struct work_struct *work) +{ + struct ipa_sys_context *sys; + + sys = container_of(work, struct ipa_sys_context, work); + + ipa_handle_tx(sys); +} + +/** + * ipa_send_one() - Send a single descriptor + * @sys: system pipe context + * @desc: descriptor to send + * @in_atomic: whether caller is in atomic context + * + * - Allocate tx_packet wrapper + * - transfer data to the IPA + * - after the transfer was done the SPS will + * notify the sending user via ipa_sps_irq_comp_tx() + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, + bool in_atomic) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + int result; + u16 sps_flags = SPS_IOVEC_FLAG_EOT; + dma_addr_t dma_address; + u16 len; + gfp_t mem_flag = GFP_ATOMIC; + struct sps_iovec iov; + int ret; + + if (unlikely(!in_atomic)) + mem_flag = GFP_KERNEL; + + tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag); + if (!tx_pkt) { + IPAERR("failed to alloc tx wrapper\n"); + goto fail_mem_alloc; + } + + if (!desc->dma_address_valid) { + dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld, + desc->len, DMA_TO_DEVICE); + } else { + dma_address = desc->dma_address; + tx_pkt->no_unmap_dma = true; + } + if (dma_mapping_error(ipa_ctx->pdev, dma_address)) { + IPAERR("dma_map_single failed\n"); + goto fail_dma_map; + } + + INIT_LIST_HEAD(&tx_pkt->link); + tx_pkt->type = desc->type; + tx_pkt->cnt = 1; /* only 1 desc in this "set" */ + + tx_pkt->mem.phys_base = dma_address; + tx_pkt->mem.base = desc->pyld; + tx_pkt->mem.size = desc->len; + tx_pkt->sys = sys; + tx_pkt->callback = desc->callback; + tx_pkt->user1 = desc->user1; + tx_pkt->user2 = desc->user2; + + /* + * Special treatment for immediate commands, where the structure of the + * descriptor is different + */ + if (desc->type == IPA_IMM_CMD_DESC) { + sps_flags |= SPS_IOVEC_FLAG_IMME; + len = desc->opcode; + IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n", + desc->opcode, desc->len, sps_flags); + IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len); + } else { + len = desc->len; + } + + INIT_WORK(&tx_pkt->work, ipa_wq_write_done); + + spin_lock_bh(&sys->spinlock); + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + do { + ret = sps_get_iovec(sys->ep->ep_hdl, &iov); + if (ret) { + IPADBG("sps_get_iovec failed %d\n", ret); + break; + } + if ((iov.addr == 0x0) && (iov.size == 0x0)) + break; + } while (1); + } + result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt, + sps_flags); + if (result) { + IPAERR("sps_transfer_one failed rc=%d\n", result); + goto fail_sps_send; + } + + spin_unlock_bh(&sys->spinlock); + + return 0; + +fail_sps_send: + list_del(&tx_pkt->link); + spin_unlock_bh(&sys->spinlock); + dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE); +fail_dma_map: + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt); +fail_mem_alloc: + return -EFAULT; +} + +/** + * ipa_send() - Send multiple descriptors in one HW transaction + * @sys: system pipe context + * @num_desc: number of packets + * @desc: packets to send (may be immediate command or data) + * @in_atomic: whether caller is in atomic context + * + * This function is used for system-to-bam connection. + * - SPS driver expect struct sps_transfer which will contain all the data + * for a transaction + * - ipa_tx_pkt_wrapper will be used for each ipa + * descriptor (allocated from wrappers cache) + * - The wrapper struct will be configured for each ipa-desc payload and will + * contain information which will be later used by the user callbacks + * - each transfer will be made by calling to sps_transfer() + * - Each packet (command or data) that will be sent will also be saved in + * ipa_sys_context for later check that all data was sent + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, + bool in_atomic) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + struct ipa_tx_pkt_wrapper *next_pkt; + struct sps_transfer transfer = { 0 }; + struct sps_iovec *iovec; + dma_addr_t dma_addr; + int i = 0; + int j; + int result; + uint size = num_desc * sizeof(struct sps_iovec); + gfp_t mem_flag = GFP_ATOMIC; + struct sps_iovec iov; + int ret; + gfp_t flag; + + if (unlikely(!in_atomic)) + mem_flag = GFP_KERNEL; + + flag = mem_flag | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (num_desc == IPA_NUM_DESC_PER_SW_TX) { + transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, + &dma_addr); + if (!transfer.iovec) { + IPAERR("fail to alloc dma mem for sps xfr buff\n"); + return -EFAULT; + } + } else { + transfer.iovec = kmalloc(size, flag); + if (!transfer.iovec) { + IPAERR("fail to alloc mem for sps xfr buff "); + IPAERR("num_desc = %d size = %d\n", num_desc, size); + return -EFAULT; + } + dma_addr = dma_map_single(ipa_ctx->pdev, + transfer.iovec, size, DMA_TO_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) { + IPAERR("dma_map_single failed for sps xfr buff\n"); + kfree(transfer.iovec); + return -EFAULT; + } + } + + transfer.iovec_phys = dma_addr; + transfer.iovec_count = num_desc; + spin_lock_bh(&sys->spinlock); + + for (i = 0; i < num_desc; i++) { + tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, + mem_flag); + if (!tx_pkt) { + IPAERR("failed to alloc tx wrapper\n"); + goto failure; + } + /* + * first desc of set is "special" as it holds the count and + * other info + */ + if (i == 0) { + transfer.user = tx_pkt; + tx_pkt->mult.phys_base = dma_addr; + tx_pkt->mult.base = transfer.iovec; + tx_pkt->mult.size = size; + tx_pkt->cnt = num_desc; + INIT_WORK(&tx_pkt->work, ipa_wq_write_done); + } + + iovec = &transfer.iovec[i]; + iovec->flags = 0; + + INIT_LIST_HEAD(&tx_pkt->link); + tx_pkt->type = desc[i].type; + + if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) { + tx_pkt->mem.base = desc[i].pyld; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + dma_map_single(ipa_ctx->pdev, + tx_pkt->mem.base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } else { + tx_pkt->mem.base = desc[i].frag; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + skb_frag_dma_map(ipa_ctx->pdev, + desc[i].frag, + 0, tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } + + if (dma_mapping_error(ipa_ctx->pdev, tx_pkt->mem.phys_base)) { + IPAERR("dma_map_single failed\n"); + goto failure_dma_map; + } + + tx_pkt->sys = sys; + tx_pkt->callback = desc[i].callback; + tx_pkt->user1 = desc[i].user1; + tx_pkt->user2 = desc[i].user2; + + /* + * Point the iovec to the buffer and + * add this packet to system pipe context. + */ + iovec->addr = tx_pkt->mem.phys_base; + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + + /* + * Special treatment for immediate commands, where the structure + * of the descriptor is different + */ + if (desc[i].type == IPA_IMM_CMD_DESC) { + iovec->size = desc[i].opcode; + iovec->flags |= SPS_IOVEC_FLAG_IMME; + IPA_DUMP_BUFF(desc[i].pyld, + tx_pkt->mem.phys_base, desc[i].len); + } else { + iovec->size = desc[i].len; + } + + if (i == (num_desc - 1)) { + iovec->flags |= SPS_IOVEC_FLAG_EOT; + /* "mark" the last desc */ + tx_pkt->cnt = IPA_LAST_DESC_CNT; + } + } + + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + do { + ret = sps_get_iovec(sys->ep->ep_hdl, &iov); + if (ret) { + IPADBG("sps_get_iovec failed %d\n", ret); + break; + } + if ((iov.addr == 0x0) && (iov.size == 0x0)) + break; + } while (1); + } + result = sps_transfer(sys->ep->ep_hdl, &transfer); + if (result) { + IPAERR("sps_transfer failed rc=%d\n", result); + goto failure; + } + + spin_unlock_bh(&sys->spinlock); + return 0; + +failure_dma_map: + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt); + +failure: + tx_pkt = transfer.user; + for (j = 0; j < i; j++) { + next_pkt = list_next_entry(tx_pkt, link); + list_del(&tx_pkt->link); + if (!tx_pkt->no_unmap_dma) { + if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } + } + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt); + tx_pkt = next_pkt; + } + if (transfer.iovec_phys) { + if (num_desc == IPA_NUM_DESC_PER_SW_TX) { + dma_pool_free(ipa_ctx->dma_pool, transfer.iovec, + transfer.iovec_phys); + } else { + dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys, + size, DMA_TO_DEVICE); + kfree(transfer.iovec); + } + } + spin_unlock_bh(&sys->spinlock); + return -EFAULT; +} + +/** + * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver + * after an immediate command is complete. + * @user1: pointer to the descriptor of the transfer + * @user2: + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa_send_cmd()) + */ +static void ipa_sps_irq_cmd_ack(void *user1, int user2) +{ + struct ipa_desc *desc = (struct ipa_desc *)user1; + + if (!desc) { + IPAERR("desc is NULL\n"); + WARN_ON(1); + return; + } + IPADBG_LOW("got ack for cmd=%d\n", desc->opcode); + complete(&desc->xfer_done); +} + +/** + * ipa_send_cmd - send immediate commands + * @num_desc: number of descriptors within the desc struct + * @descr: descriptor structure + * + * Function will block till command gets ACK from IPA HW, caller needs + * to free any resources it allocated after function returns + * The callback in ipa_desc should not be set by the caller + * for this function. + */ +int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr) +{ + struct ipa_desc *desc; + int i, result = 0; + struct ipa_sys_context *sys; + int ep_idx; + + for (i = 0; i < num_desc; i++) + IPADBG_LOW("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + sys = ipa_ctx->ep[ep_idx].sys; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (num_desc == 1) { + init_completion(&descr->xfer_done); + + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa_sps_irq_cmd_ack; + descr->user1 = descr; + if (ipa_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&descr->xfer_done); + } else { + desc = &descr[num_desc - 1]; + init_completion(&desc->xfer_done); + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa_sps_irq_cmd_ack; + desc->user1 = desc; + if (ipa_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&desc->xfer_done); + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa_sps_irq_tx_notify() - Callback function which will be called by + * the SPS driver to start a Tx poll operation. + * Called in an interrupt context. + * @notify: SPS driver supplied notification struct + * + * This function defer the work for this event to the tx workqueue. + */ +static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify) +{ + struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user; + int ret; + + IPADBG_LOW("event %d notified\n", notify->event_id); + + switch (notify->event_id) { + case SPS_EVENT_EOT: + if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + if (!atomic_read(&sys->curr_polling_state)) { + ret = sps_get_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + break; + } + sys->ep->connect.options = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + ret = sps_set_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + break; + } + atomic_set(&sys->curr_polling_state, 1); + queue_work(sys->wq, &sys->work); + } + break; + default: + IPAERR("received unexpected event id %d\n", notify->event_id); + } +} + +/** + * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by + * the SPS driver after a Tx operation is complete. + * Called in an interrupt context. + * @notify: SPS driver supplied notification struct + * + * This function defer the work for this event to the tx workqueue. + * This event will be later handled by ipa_write_done. + */ +static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + + IPADBG_LOW("event %d notified\n", notify->event_id); + + switch (notify->event_id) { + case SPS_EVENT_EOT: + tx_pkt = notify->data.transfer.user; + if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + queue_work(tx_pkt->sys->wq, &tx_pkt->work); + break; + default: + IPAERR("received unexpected event id %d\n", notify->event_id); + } +} + +/** + * ipa_poll_pkt() - Poll packet from SPS BAM + * return 0 to caller on poll successfully + * else -EIO + * + */ +static int ipa_poll_pkt(struct ipa_sys_context *sys, + struct sps_iovec *iov) +{ + int ret; + + ret = sps_get_iovec(sys->ep->ep_hdl, iov); + if (ret) { + IPAERR("sps_get_iovec failed %d\n", ret); + return ret; + } + + if (iov->addr == 0) + return -EIO; + + return 0; +} + +/** + * ipa_handle_rx_core() - The core functionality of packet reception. This + * function is read from multiple code paths. + * + * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN + * endpoint. The function runs as long as there are packets in the pipe. + * For each packet: + * - Disconnect the packet from the system pipe linked list + * - Unmap the packets skb, make it non DMAable + * - Free the packet from the cache + * - Prepare a proper skb + * - Call the endpoints notify function, passing the skb in the parameters + * - Replenish the rx cache + */ +static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all, + bool in_poll_state) +{ + struct sps_iovec iov; + int ret; + int cnt = 0; + + while ((in_poll_state ? atomic_read(&sys->curr_polling_state) : + !atomic_read(&sys->curr_polling_state))) { + if (cnt && !process_all) + break; + + ret = ipa_poll_pkt(sys, &iov); + if (ret) + break; + + if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client)) + ipa_dma_memcpy_notify(sys, &iov); + else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client)) + ipa_wlan_wq_rx_common(sys, iov.size); + else + ipa_wq_rx_common(sys, iov.size); + + cnt++; + } + + return cnt; +} + +/** + * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode + */ +static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys) +{ + int ret; + + if (!sys->ep || !sys->ep->valid) { + IPAERR("EP Not Valid, no need to cleanup.\n"); + return; + } + + ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + goto fail; + } + + if (!atomic_read(&sys->curr_polling_state) && + ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) { + IPADBG("already in intr mode\n"); + return; + } + + if (!atomic_read(&sys->curr_polling_state)) { + IPAERR("already in intr mode\n"); + goto fail; + } + + sys->event.options = SPS_O_EOT; + ret = sps_register_event(sys->ep->ep_hdl, &sys->event); + if (ret) { + IPAERR("sps_register_event() failed %d\n", ret); + goto fail; + } + sys->ep->connect.options = + SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; + ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + goto fail; + } + atomic_set(&sys->curr_polling_state, 0); + if (!sys->ep->napi_enabled) + ipa_handle_rx_core(sys, true, false); + ipa_dec_release_wakelock(sys->ep->wakelock_client); + return; + +fail: + queue_delayed_work(sys->wq, &sys->switch_to_intr_work, + msecs_to_jiffies(1)); +} + + +/** + * ipa_sps_irq_control() - Function to enable or disable BAM IRQ. + */ +static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable) +{ + int ret; + + /* + * Do not change sps config in case we are in polling mode as this + * indicates that sps driver already notified EOT event and sps config + * should not change until ipa driver processes the packet. + */ + if (atomic_read(&sys->curr_polling_state)) { + IPADBG("in polling mode, do not change config\n"); + return; + } + + if (enable) { + ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + return; + } + sys->event.options = SPS_O_EOT; + ret = sps_register_event(sys->ep->ep_hdl, &sys->event); + if (ret) { + IPAERR("sps_register_event() failed %d\n", ret); + return; + } + sys->ep->connect.options = + SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; + ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + return; + } + } else { + ret = sps_get_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + return; + } + sys->ep->connect.options = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + ret = sps_set_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + return; + } + } +} + +void ipa_sps_irq_control_all(bool enable) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx, client_num; + + IPADBG("\n"); + + for (client_num = 0; + client_num < IPA_CLIENT_MAX; client_num++) { + if (!IPA_CLIENT_IS_APPS_CONS(client_num)) + continue; + + ipa_ep_idx = ipa_get_ep_mapping(client_num); + if (ipa_ep_idx == -1) { + IPADBG_LOW("Invalid client.\n"); + continue; + } + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (!ep->valid) { + IPAERR("EP (%d) not allocated.\n", ipa_ep_idx); + continue; + } + ipa_sps_irq_control(ep->sys, enable); + } +} + +/** + * ipa_rx_notify() - Callback function which is called by the SPS driver when a + * a packet is received + * @notify: SPS driver supplied notification information + * + * Called in an interrupt context, therefore the majority of the work is + * deffered using a work queue. + * + * After receiving a packet, the driver goes to polling mode and keeps pulling + * packets until the rx buffer is empty, then it goes back to interrupt mode. + * This comes to prevent the CPU from handling too many interrupts when the + * throughput is high. + */ +static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify) +{ + struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user; + int ret; + + IPADBG("event %d notified\n", notify->event_id); + + switch (notify->event_id) { + case SPS_EVENT_EOT: + if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + + if (atomic_read(&sys->curr_polling_state)) { + sys->ep->eot_in_poll_err++; + break; + } + + ret = sps_get_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + break; + } + sys->ep->connect.options = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + ret = sps_set_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + break; + } + ipa_inc_acquire_wakelock(sys->ep->wakelock_client); + atomic_set(&sys->curr_polling_state, 1); + trace_intr_to_poll(sys->ep->client); + queue_work(sys->wq, &sys->work); + break; + default: + IPAERR("received unexpected event id %d\n", notify->event_id); + } +} + +static void switch_to_intr_tx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work); + ipa_handle_tx(sys); +} + +/** + * ipa_handle_rx() - handle packet reception. This function is executed in the + * context of a work queue. + * @work: work struct needed by the work queue + * + * ipa_handle_rx_core() is run in polling mode. After all packets has been + * received, the driver switches back to interrupt mode. + */ +static void ipa_handle_rx(struct ipa_sys_context *sys) +{ + int inactive_cycles = 0; + int cnt; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + do { + cnt = ipa_handle_rx_core(sys, true, true); + if (cnt == 0) { + inactive_cycles++; + trace_idle_sleep_enter(sys->ep->client); + usleep_range(ipa_ctx->ipa_rx_min_timeout_usec, + ipa_ctx->ipa_rx_max_timeout_usec); + trace_idle_sleep_exit(sys->ep->client); + } else { + inactive_cycles = 0; + } + + /* if pipe is out of buffers there is no point polling for + * completed descs; release the worker so delayed work can + * run in a timely manner + */ + if (sys->len == 0) + break; + + } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration); + + trace_poll_to_intr(sys->ep->client); + ipa_rx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa2_rx_poll() - Poll the rx packets from IPA HW. This + * function is exectued in the softirq context + * + * if input budget is zero, the driver switches back to + * interrupt mode + * + * return number of polled packets, on error 0(zero) + */ +int ipa2_rx_poll(u32 clnt_hdl, int weight) +{ + struct ipa_ep_context *ep; + int ret; + int cnt = 0; + unsigned int delay = 1; + struct sps_iovec iov; + + IPADBG("\n"); + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm 0x%x\n", clnt_hdl); + return cnt; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + while (cnt < weight && + atomic_read(&ep->sys->curr_polling_state)) { + + ret = ipa_poll_pkt(ep->sys, &iov); + if (ret) + break; + + ipa_wq_rx_common(ep->sys, iov.size); + cnt += IPA_WAN_AGGR_PKT_CNT; + } + + if (cnt == 0 || cnt < weight) { + ep->inactive_cycles++; + ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0); + + if (ep->inactive_cycles > 3 || ep->sys->len == 0) { + ep->switch_to_intr = true; + delay = 0; + } else if (cnt < weight) { + delay = 0; + } + queue_delayed_work(ep->sys->wq, + &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay)); + } else + ep->inactive_cycles = 0; + + return cnt; +} + +static void switch_to_intr_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work); + + if (sys->ep->napi_enabled) { + if (sys->ep->switch_to_intr) { + ipa_rx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI"); + sys->ep->switch_to_intr = false; + sys->ep->inactive_cycles = 0; + } else + sys->ep->client_notify(sys->ep->priv, + IPA_CLIENT_START_POLL, 0); + } else + ipa_handle_rx(sys); +} + +/** + * ipa_update_repl_threshold()- Update the repl_threshold for the client. + * + * Return value: None. + */ +void ipa_update_repl_threshold(enum ipa_client_type ipa_client) +{ + int ep_idx; + struct ipa_ep_context *ep; + + /* Check if ep is valid. */ + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPADBG("Invalid IPA client\n"); + return; + } + + ep = &ipa_ctx->ep[ep_idx]; + if (!ep->valid) { + IPADBG("EP not valid/Not applicable for client.\n"); + return; + } + /* + * Determine how many buffers/descriptors remaining will + * cause to drop below the yellow WM bar. + */ + if (ep->sys->rx_buff_sz) + ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys) + / ep->sys->rx_buff_sz; + else + ep->rx_replenish_threshold = 0; +} + +/** + * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform + * IPA EP configuration + * @sys_in: [in] input needed to setup BAM pipe and configure EP + * @clnt_hdl: [out] client handle + * + * - configure the end-point registers with the supplied + * parameters from the user. + * - call SPS APIs to create a system-to-bam connection with IPA. + * - allocate descriptor FIFO + * - register callback function(ipa_sps_irq_rx_notify or + * ipa_sps_irq_tx_notify - depends on client type) in case the driver is + * not configured to pulling mode + * + * Returns: 0 on success, negative on failure + */ +int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + dma_addr_t dma_addr; + char buff[IPA_RESOURCE_NAME_MAX]; + struct iommu_domain *smmu_domain; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) { + IPAERR("bad parm client:%d fifo_sz:%d\n", + sys_in->client, sys_in->desc_fifo_sz); + goto fail_gen; + } + + ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + goto fail_gen; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + + if (ep->valid == 1) { + if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) { + IPAERR("EP already allocated.\n"); + goto fail_and_disable_clocks; + } else { + if (ipa2_cfg_ep_hdr(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr)) { + IPAERR("fail to configure hdr prop of EP.\n"); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa2_cfg_ep_cfg(ipa_ep_idx, + &sys_in->ipa_ep_cfg.cfg)) { + IPAERR("fail to configure cfg prop of EP.\n"); + result = -EFAULT; + goto fail_and_disable_clocks; + } + IPADBG("client %d (ep: %d) overlay ok sys=%p\n", + sys_in->client, ipa_ep_idx, ep->sys); + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + *clnt_hdl = ipa_ep_idx; + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + return 0; + } + } + + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); + + if (!ep->sys) { + ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL); + if (!ep->sys) { + IPAERR("failed to sys ctx for client %d\n", + sys_in->client); + result = -ENOMEM; + goto fail_and_disable_clocks; + } + + ep->sys->ep = ep; + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d", + sys_in->client); + ep->sys->wq = alloc_workqueue(buff, + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!ep->sys->wq) { + IPAERR("failed to create wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq; + } + + snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d", + sys_in->client); + ep->sys->repl_wq = alloc_workqueue(buff, + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!ep->sys->repl_wq) { + IPAERR("failed to create rep wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq2; + } + + INIT_LIST_HEAD(&ep->sys->head_desc_list); + INIT_LIST_HEAD(&ep->sys->rcycl_list); + spin_lock_init(&ep->sys->spinlock); + } else { + memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep)); + } + + ep->skip_ep_cfg = sys_in->skip_ep_cfg; + if (ipa_assign_policy(sys_in, ep->sys)) { + IPAERR("failed to sys ctx for client %d\n", sys_in->client); + result = -ENOMEM; + goto fail_gen2; + } + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->napi_enabled = sys_in->napi_enabled; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = sys_in->keep_ipa_awake; + atomic_set(&ep->avail_fifo_desc, + ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1)); + + if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) && + ep->sys->status_stat == NULL) { + ep->sys->status_stat = + kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL); + if (!ep->sys->status_stat) { + IPAERR("no memory\n"); + goto fail_gen2; + } + } + + result = ipa_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("skipping ep configuration\n"); + } + + /* Default Config */ + ep->ep_hdl = sps_alloc_endpoint(); + if (ep->ep_hdl == NULL) { + IPAERR("SPS EP allocation failed.\n"); + goto fail_gen2; + } + + result = sps_get_config(ep->ep_hdl, &ep->connect); + if (result) { + IPAERR("fail to get config.\n"); + goto fail_sps_cfg; + } + + /* Specific Config */ + if (IPA_CLIENT_IS_CONS(sys_in->client)) { + ep->connect.mode = SPS_MODE_SRC; + ep->connect.destination = SPS_DEV_HANDLE_MEM; + ep->connect.source = ipa_ctx->bam_handle; + ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++; + ep->connect.src_pipe_index = ipa_ep_idx; + /* + * Determine how many buffers/descriptors remaining will + * cause to drop below the yellow WM bar. + */ + if (ep->sys->rx_buff_sz) + ep->rx_replenish_threshold = + ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz; + else + ep->rx_replenish_threshold = 0; + /* Only when the WAN pipes are setup, actual threshold will + * be read from the register. So update LAN_CONS ep again with + * right value. + */ + if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS) + ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS); + } else { + ep->connect.mode = SPS_MODE_DEST; + ep->connect.source = SPS_DEV_HANDLE_MEM; + ep->connect.destination = ipa_ctx->bam_handle; + ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++; + ep->connect.dest_pipe_index = ipa_ep_idx; + } + + IPADBG("client:%d ep:%d", + sys_in->client, ipa_ep_idx); + + IPADBG("dest_pipe_index:%d src_pipe_index:%d\n", + ep->connect.dest_pipe_index, + ep->connect.src_pipe_index); + + ep->connect.options = ep->sys->sps_option; + ep->connect.desc.size = sys_in->desc_fifo_sz; + ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev, + ep->connect.desc.size, &dma_addr, GFP_KERNEL); + if (ipa_ctx->smmu_s1_bypass) { + ep->connect.desc.phys_base = dma_addr; + } else { + ep->connect.desc.iova = dma_addr; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + ep->connect.desc.phys_base = + iommu_iova_to_phys(smmu_domain, dma_addr); + } + } + if (ep->connect.desc.base == NULL) { + IPAERR("fail to get DMA desc memory.\n"); + goto fail_sps_cfg; + } + + ep->connect.event_thresh = IPA_EVENT_THRESHOLD; + + result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client); + if (result) { + IPAERR("sps_connect fails.\n"); + goto fail_sps_connect; + } + + ep->sys->event.options = SPS_O_EOT; + ep->sys->event.mode = SPS_TRIGGER_CALLBACK; + ep->sys->event.xfer_done = NULL; + ep->sys->event.user = ep->sys; + ep->sys->event.callback = ep->sys->sps_callback; + result = sps_register_event(ep->ep_hdl, &ep->sys->event); + if (result < 0) { + IPAERR("register event error %d\n", result); + goto fail_register_event; + } + + *clnt_hdl = ipa_ep_idx; + + if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) { + ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1; + ep->sys->repl.cache = kcalloc(ep->sys->repl.capacity, + sizeof(void *), GFP_KERNEL); + if (!ep->sys->repl.cache) { + IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx); + ep->sys->repl_hdlr = ipa_replenish_rx_cache; + ep->sys->repl.capacity = 0; + } else { + atomic_set(&ep->sys->repl.head_idx, 0); + atomic_set(&ep->sys->repl.tail_idx, 0); + ipa_wq_repl_rx(&ep->sys->repl_work); + } + } + + if (IPA_CLIENT_IS_CONS(sys_in->client)) + ipa_replenish_rx_cache(ep->sys); + + if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) { + ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW); + atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt); + } + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) { + if (ipa_ctx->modem_cfg_emb_pipe_flt && + sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa_install_dflt_flt_rules(ipa_ep_idx); + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_register_event: + sps_disconnect(ep->ep_hdl); +fail_sps_connect: + dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); +fail_sps_cfg: + sps_free_endpoint(ep->ep_hdl); +fail_gen2: + destroy_workqueue(ep->sys->repl_wq); +fail_wq2: + destroy_workqueue(ep->sys->wq); +fail_wq: + kfree(ep->sys); + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +/** + * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP + * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe + * + * Returns: 0 on success, negative on failure + */ +int ipa2_teardown_sys_pipe(u32 clnt_hdl) +{ + struct ipa_ep_context *ep; + int empty; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_disable_data_path(clnt_hdl); + if (ep->napi_enabled) { + ep->switch_to_intr = true; + do { + usleep_range(95, 105); + } while (atomic_read(&ep->sys->curr_polling_state)); + } + + if (IPA_CLIENT_IS_PROD(ep->client)) { + do { + spin_lock_bh(&ep->sys->spinlock); + empty = list_empty(&ep->sys->head_desc_list); + spin_unlock_bh(&ep->sys->spinlock); + if (!empty) + usleep_range(95, 105); + else + break; + } while (1); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + cancel_delayed_work_sync(&ep->sys->replenish_rx_work); + cancel_delayed_work_sync(&ep->sys->switch_to_intr_work); + } + + flush_workqueue(ep->sys->wq); + sps_disconnect(ep->ep_hdl); + dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); + sps_free_endpoint(ep->ep_hdl); + if (ep->sys->repl_wq) + flush_workqueue(ep->sys->repl_wq); + if (IPA_CLIENT_IS_CONS(ep->client)) + ipa_cleanup_rx(ep->sys); + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) { + if (ipa_ctx->modem_cfg_emb_pipe_flt && + ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa_delete_dflt_flt_rules(clnt_hdl); + } + + if (IPA_CLIENT_IS_WLAN_CONS(ep->client)) + atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt); + + memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats)); + + if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt)) + ipa_cleanup_wlan_rx_common_cache(); + + ep->valid = 0; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +/** + * ipa_tx_comp_usr_notify_release() - Callback function which will call the + * user supplied callback function to release the skb, or release it on + * its own if no callback function was supplied. + * @user1 + * @user2 + * + * This notified callback is for the destination client. + * This function is supplied in ipa_connect. + */ +static void ipa_tx_comp_usr_notify_release(void *user1, int user2) +{ + struct sk_buff *skb = (struct sk_buff *)user1; + int ep_idx = user2; + + IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx); + + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl); + + if (ipa_ctx->ep[ep_idx].client_notify) + ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)skb); + else + dev_kfree_skb_any(skb); +} + +static void ipa_tx_cmd_comp(void *user1, int user2) +{ + kfree(user1); +} + +/** + * ipa2_tx_dp() - Data-path tx handler + * @dst: [in] which IPA destination to route tx packets to + * @skb: [in] the packet to send + * @metadata: [in] TX packet meta-data + * + * Data-path tx handler, this is used for both SW data-path which by-passes most + * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If + * dst is a "valid" CONS type, then SW data-path is used. If dst is the + * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else + * is an error. For errors, client needs to free the skb as needed. For success, + * IPA driver will later invoke client callback if one was supplied. That + * callback should free the skb. If no callback supplied, IPA driver will free + * the skb internally + * + * The function will use two descriptors for this send command + * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent), + * the first descriptor will be used to inform the IPA hardware that + * apps need to push data into the IPA (IP_PACKET_INIT immediate command). + * Once this send was done from SPS point-of-view the IPA driver will + * get notified by the supplied callback - ipa_sps_irq_tx_comp() + * + * ipa_sps_irq_tx_comp will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *meta) +{ + struct ipa_desc *desc; + struct ipa_desc _desc[2]; + int dst_ep_idx; + struct ipa_ip_packet_init *cmd; + struct ipa_sys_context *sys; + int src_ep_idx; + int num_frags, f; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (skb->len == 0) { + IPAERR("packet size is 0\n"); + return -EINVAL; + } + + num_frags = skb_shinfo(skb)->nr_frags; + if (num_frags) { + /* 1 desc is needed for the linear portion of skb; + * 1 desc may be needed for the PACKET_INIT; + * 1 desc for each frag + */ + desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC); + if (!desc) { + IPAERR("failed to alloc desc array\n"); + goto fail_mem; + } + } else { + memset(_desc, 0, 2 * sizeof(struct ipa_desc)); + desc = &_desc[0]; + } + + /* + * USB_CONS: PKT_INIT ep_idx = dst pipe + * Q6_CONS: PKT_INIT ep_idx = sender pipe + * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe + * + * LAN TX: all PKT_INIT + * WAN TX: PKT_INIT (cmd) + HW (data) + * + */ + if (IPA_CLIENT_IS_CONS(dst)) { + src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_LAN_WAN_PROD); + goto fail_gen; + } + dst_ep_idx = ipa2_get_ep_mapping(dst); + } else { + src_ep_idx = ipa2_get_ep_mapping(dst); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", dst); + goto fail_gen; + } + if (meta && meta->pkt_init_dst_ep_valid) + dst_ep_idx = meta->pkt_init_dst_ep; + else + dst_ep_idx = -1; + } + + sys = ipa_ctx->ep[src_ep_idx].sys; + + if (!sys->ep->valid) { + IPAERR("pipe not valid\n"); + goto fail_gen; + } + + if (dst_ep_idx != -1) { + /* SW data path */ + cmd = kzalloc(sizeof(struct ipa_ip_packet_init), flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_gen; + } + + cmd->destination_pipe_index = dst_ep_idx; + desc[0].opcode = IPA_IP_PACKET_INIT; + desc[0].pyld = cmd; + desc[0].len = sizeof(struct ipa_ip_packet_init); + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = ipa_tx_cmd_comp; + desc[0].user1 = cmd; + desc[1].pyld = skb->data; + desc[1].len = skb_headlen(skb); + desc[1].type = IPA_DATA_DESC_SKB; + desc[1].callback = ipa_tx_comp_usr_notify_release; + desc[1].user1 = skb; + desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid && + meta->pkt_init_dst_ep_remote) ? + src_ep_idx : + dst_ep_idx; + if (meta && meta->dma_address_valid) { + desc[1].dma_address_valid = true; + desc[1].dma_address = meta->dma_address; + } + + for (f = 0; f < num_frags; f++) { + desc[2+f].frag = &skb_shinfo(skb)->frags[f]; + desc[2+f].type = IPA_DATA_DESC_SKB_PAGED; + desc[2+f].len = skb_frag_size(desc[2+f].frag); + } + + /* don't free skb till frag mappings are released */ + if (num_frags) { + desc[2+f-1].callback = desc[1].callback; + desc[2+f-1].user1 = desc[1].user1; + desc[2+f-1].user2 = desc[1].user2; + desc[1].callback = NULL; + } + + if (ipa_send(sys, num_frags + 2, desc, true)) { + IPAERR("fail to send skb %p num_frags %u SWP\n", + skb, num_frags); + goto fail_send; + } + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts); + } else { + /* HW data path */ + desc[0].pyld = skb->data; + desc[0].len = skb_headlen(skb); + desc[0].type = IPA_DATA_DESC_SKB; + desc[0].callback = ipa_tx_comp_usr_notify_release; + desc[0].user1 = skb; + desc[0].user2 = src_ep_idx; + + if (meta && meta->dma_address_valid) { + desc[0].dma_address_valid = true; + desc[0].dma_address = meta->dma_address; + } + + if (num_frags == 0) { + if (ipa_send_one(sys, desc, true)) { + IPAERR("fail to send skb %p HWP\n", skb); + goto fail_gen; + } + } else { + for (f = 0; f < num_frags; f++) { + desc[1+f].frag = &skb_shinfo(skb)->frags[f]; + desc[1+f].type = IPA_DATA_DESC_SKB_PAGED; + desc[1+f].len = skb_frag_size(desc[1+f].frag); + } + + /* don't free skb till frag mappings are released */ + desc[1+f-1].callback = desc[0].callback; + desc[1+f-1].user1 = desc[0].user1; + desc[1+f-1].user2 = desc[0].user2; + desc[0].callback = NULL; + + if (ipa_send(sys, num_frags + 1, desc, true)) { + IPAERR("fail to send skb %p num_frags %u HWP\n", + skb, num_frags); + goto fail_gen; + } + } + + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts); + } + + if (num_frags) { + kfree(desc); + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear); + } + + return 0; + +fail_send: + kfree(cmd); +fail_gen: + if (num_frags) + kfree(desc); +fail_mem: + return -EFAULT; +} + +static void ipa_wq_handle_rx(struct work_struct *work) +{ + struct ipa_sys_context *sys; + + sys = container_of(work, struct ipa_sys_context, work); + + if (sys->ep->napi_enabled) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI"); + sys->ep->client_notify(sys->ep->priv, + IPA_CLIENT_START_POLL, 0); + } else + ipa_handle_rx(sys); +} + +static void ipa_wq_repl_rx(struct work_struct *work) +{ + struct ipa_sys_context *sys; + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 next; + u32 curr; + + sys = container_of(work, struct ipa_sys_context, repl_work); + curr = atomic_read(&sys->repl.tail_idx); + +begin: + while (1) { + next = (curr + 1) % sys->repl.capacity; + if (next == atomic_read(&sys->repl.head_idx)) + goto fail_kmem_cache_alloc; + + rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) { + pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n", + __func__, sys); + goto fail_kmem_cache_alloc; + } + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + pr_err_ratelimited("%s fail alloc skb sys=%p\n", + __func__, sys); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { + pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n", + __func__, (void *)rx_pkt->data.dma_addr, + ptr, sys); + goto fail_dma_mapping; + } + + sys->repl.cache[curr] = rx_pkt; + curr = next; + /* ensure write is done before setting tail index */ + mb(); + atomic_set(&sys->repl.tail_idx, next); + } + + return; + +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (atomic_read(&sys->repl.tail_idx) == + atomic_read(&sys->repl.head_idx)) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty); + else + WARN_ON(1); + pr_err_ratelimited("%s sys=%p repl ring empty\n", + __func__, sys); + goto begin; + } +} + +static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys) +{ + struct ipa_rx_pkt_wrapper *rx_pkt = NULL; + struct ipa_rx_pkt_wrapper *tmp; + int ret; + u32 rx_len_cached = 0; + + IPADBG_LOW("\n"); + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + rx_len_cached = sys->len; + + if (rx_len_cached < sys->rx_pool_sz) { + list_for_each_entry_safe(rx_pkt, tmp, + &ipa_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + + if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0) + ipa_ctx->wc_memb.wlan_comm_free_cnt--; + + INIT_LIST_HEAD(&rx_pkt->link); + rx_pkt->len = 0; + rx_pkt->sys = sys; + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, + IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + goto fail_sps_transfer; + } + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + + if (rx_len_cached >= sys->rx_pool_sz) { + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + return; + } + } + } + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + if (rx_len_cached < sys->rx_pool_sz && + ipa_ctx->wc_memb.wlan_comm_total_cnt < + IPA_WLAN_COMM_RX_POOL_HIGH) { + ipa_replenish_rx_cache(sys); + ipa_ctx->wc_memb.wlan_comm_total_cnt += + (sys->rx_pool_sz - rx_len_cached); + } + + return; + +fail_sps_transfer: + list_del(&rx_pkt->link); + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); +} + +static void ipa_cleanup_wlan_rx_common_cache(void) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + struct ipa_rx_pkt_wrapper *tmp; + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + list_for_each_entry_safe(rx_pkt, tmp, + &ipa_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + ipa_ctx->wc_memb.wlan_comm_free_cnt--; + ipa_ctx->wc_memb.wlan_comm_total_cnt--; + } + ipa_ctx->wc_memb.total_tx_pkts_freed = 0; + + if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0) + IPAERR("wlan comm buff free cnt: %d\n", + ipa_ctx->wc_memb.wlan_comm_free_cnt); + + if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0) + IPAERR("wlan comm buff total cnt: %d\n", + ipa_ctx->wc_memb.wlan_comm_total_cnt); + + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + +} + +static void ipa_alloc_wlan_rx_common_cache(u32 size) +{ + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + int rx_len_cached = 0; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN | + (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt; + while (rx_len_cached < size) { + rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) { + IPAERR("failed to alloc rx wrapper\n"); + goto fail_kmem_cache_alloc; + } + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + + rx_pkt->data.skb = + ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ, + flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, + IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %p for %p\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + list_add_tail(&rx_pkt->link, + &ipa_ctx->wc_memb.wlan_comm_desc_list); + rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt; + + ipa_ctx->wc_memb.wlan_comm_free_cnt++; + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + } + + return; + +fail_dma_mapping: + dev_kfree_skb_any(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + return; +} + + +/** + * ipa_replenish_rx_cache() - Replenish the Rx packets cache. + * + * The function allocates buffers in the rx_pkt_wrapper_cache cache until there + * are IPA_RX_POOL_CEIL buffers in the cache. + * - Allocate a buffer in the cache + * - Initialized the packets link + * - Initialize the packets work struct + * - Allocate the packets socket buffer (skb) + * - Fill the packets skb with data + * - Make the packet DMAable + * - Add the packet to the system pipe linked list + * - Initiate a SPS transfer so that SPS driver will use this packet later. + */ +static void ipa_replenish_rx_cache(struct ipa_sys_context *sys) +{ + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN | + (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) { + IPAERR("failed to alloc rx wrapper\n"); + goto fail_kmem_cache_alloc; + } + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %p for %p\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + spin_unlock_bh(&sys->spinlock); + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + goto fail_sps_transfer; + } + } + + return; + +fail_sps_transfer: + spin_lock_bh(&sys->spinlock); + list_del(&rx_pkt->link); + rx_len_cached = --sys->len; + spin_unlock_bh(&sys->spinlock); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +} + +static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys) +{ + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + if (list_empty(&sys->rcycl_list)) + goto fail_kmem_cache_alloc; + + spin_lock_bh(&sys->spinlock); + rx_pkt = list_first_entry(&sys->rcycl_list, + struct ipa_rx_pkt_wrapper, link); + list_del(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); + INIT_LIST_HEAD(&rx_pkt->link); + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, + ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure for rx_pkt\n"); + goto fail_dma_mapping; + } + + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + spin_unlock_bh(&sys->spinlock); + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + goto fail_sps_transfer; + } + } + + return; +fail_sps_transfer: + spin_lock_bh(&sys->spinlock); + rx_len_cached = --sys->len; + list_del(&rx_pkt->link); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); +fail_dma_mapping: + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->rcycl_list); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +} + +static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + u32 curr; + + rx_len_cached = sys->len; + curr = atomic_read(&sys->repl.head_idx); + + while (rx_len_cached < sys->rx_pool_sz) { + if (curr == atomic_read(&sys->repl.tail_idx)) { + queue_work(sys->repl_wq, &sys->repl_work); + break; + } + + rx_pkt = sys->repl.cache[curr]; + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + spin_unlock_bh(&sys->spinlock); + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + list_del(&rx_pkt->link); + break; + } + rx_len_cached = ++sys->len; + sys->repl_trig_cnt++; + curr = (curr + 1) % sys->repl.capacity; + /* ensure write is done before setting head index */ + mb(); + atomic_set(&sys->repl.head_idx, curr); + } + + if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0) + queue_work(sys->repl_wq, &sys->repl_work); + + if (rx_len_cached <= sys->ep->rx_replenish_threshold) { + if (rx_len_cached == 0) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty); + else + WARN_ON(1); + } + sys->repl_trig_cnt = 0; + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); + } +} + +static void replenish_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + sys->repl_hdlr(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa_cleanup_rx() - release RX queue resources + * + */ +static void ipa_cleanup_rx(struct ipa_sys_context *sys) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + struct ipa_rx_pkt_wrapper *r; + u32 head; + u32 tail; + + spin_lock_bh(&sys->spinlock); + list_for_each_entry_safe(rx_pkt, r, + &sys->head_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + + list_for_each_entry_safe(rx_pkt, r, + &sys->rcycl_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + spin_unlock_bh(&sys->spinlock); + + if (sys->repl.cache) { + head = atomic_read(&sys->repl.head_idx); + tail = atomic_read(&sys->repl.tail_idx); + while (head != tail) { + rx_pkt = sys->repl.cache[head]; + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + head = (head + 1) % sys->repl.capacity; + } + kfree(sys->repl.cache); + } +} + +static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len) +{ + struct sk_buff *skb2 = NULL; + + skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL); + if (likely(skb2)) { + /* Set the data pointer */ + skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM); + memcpy(skb2->data, skb->data, len); + skb2->len = len; + skb_set_tail_pointer(skb2, len); + } + + return skb2; +} + +static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa_sys_context *sys) +{ + struct ipa_hw_pkt_status *status; + struct sk_buff *skb2; + int pad_len_byte; + int len; + unsigned char *buf; + int src_pipe; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + u32 skb2_len; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + + if (skb->len == 0) { + IPAERR("ZLT\n"); + sys->free_skb(skb); + goto out; + } + + if (sys->len_partial) { + IPADBG_LOW("len_partial %d\n", sys->len_partial); + buf = skb_push(skb, sys->len_partial); + memcpy(buf, sys->prev_skb->data, sys->len_partial); + sys->len_partial = 0; + sys->free_skb(sys->prev_skb); + sys->prev_skb = NULL; + goto begin; + } + + /* this pipe has TX comp (status only) + mux-ed LAN RX data + * (status+data) + */ + if (sys->len_rem) { + IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len, + sys->len_pad); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + sys->len_rem, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, sys->len_rem), + skb->data, sys->len_rem); + skb_trim(skb2, + skb2->len - sys->len_pad); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + if (sys->drop_packet) + dev_kfree_skb_any(skb2); + else + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + sys->len_pad = 0; + } else { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + skb->len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, skb->len), + skb->data, skb->len); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + sys->free_skb(skb); + goto out; + } + } + +begin: + while (skb->len) { + sys->drop_packet = false; + IPADBG_LOW("LEN_REM %d\n", skb->len); + + if (skb->len < IPA_PKT_STATUS_SIZE) { + WARN_ON(sys->prev_skb != NULL); + IPADBG("status straddles buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + goto out; + } + + status = (struct ipa_hw_pkt_status *)skb->data; + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + *status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if (status->status_opcode != + IPA_HW_STATUS_OPCODE_DROPPED_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_XLAT_PACKET) { + IPAERR("unsupported opcode(%d)\n", + status->status_opcode); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + continue; + } + IPA_STATS_EXCP_CNT(status->exception, + ipa_ctx->stats.rx_excp_pkts); + if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes || + status->endp_src_idx >= ipa_ctx->ipa_num_pipes) { + IPAERR("status fields invalid\n"); + IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); + WARN_ON(1); + ipa_assert(); + } + if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) { + struct ipa_tag_completion *comp; + + IPADBG_LOW("TAG packet arrived\n"); + if (status->tag_f_2 == IPA_COOKIE) { + skb_pull(skb, IPA_PKT_STATUS_SIZE); + if (skb->len < sizeof(comp)) { + IPAERR("TAG arrived without packet\n"); + goto out; + } + memcpy(&comp, skb->data, sizeof(comp)); + skb_pull(skb, sizeof(comp) + + IPA_SIZE_DL_CSUM_META_TRAILER); + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + continue; + } else { + IPADBG("ignoring TAG with wrong cookie\n"); + } + } + if (status->pkt_len == 0) { + IPADBG("Skip aggr close status\n"); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close); + IPA_STATS_DEC_CNT( + ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]); + continue; + } + if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) { + /* RX data */ + src_pipe = status->endp_src_idx; + + /* + * A packet which is received back to the AP after + * there was no route match. + */ + if (!status->exception && !status->route_match) + sys->drop_packet = true; + + if (skb->len == IPA_PKT_STATUS_SIZE && + !status->exception) { + WARN_ON(sys->prev_skb != NULL); + IPADBG_LOW("Ins header in next buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + goto out; + } + + pad_len_byte = ((status->pkt_len + 3) & ~3) - + status->pkt_len; + + len = status->pkt_len + pad_len_byte + + IPA_SIZE_DL_CSUM_META_TRAILER; + IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte, + status->pkt_len, len); + + if (status->exception == + IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) { + IPADBG_LOW("Dropping packet"); + IPADBG_LOW(" on DeAggr Exception\n"); + sys->drop_packet = true; + } + + skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE; + skb2_len = min(skb2_len, skb->len); + skb2 = ipa_skb_copy_for_client(skb, skb2_len); + if (likely(skb2)) { + if (skb->len < len + IPA_PKT_STATUS_SIZE) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, len); + sys->prev_skb = skb2; + sys->len_rem = len - skb->len + + IPA_PKT_STATUS_SIZE; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, status->pkt_len + + IPA_PKT_STATUS_SIZE); + IPADBG_LOW("rx avail for %d\n", + status->endp_dest_idx); + if (sys->drop_packet) { + dev_kfree_skb_any(skb2); + } else if (status->pkt_len > + IPA_GENERIC_AGGR_BYTE_LIMIT * + 1024) { + IPAERR("packet size invalid\n"); + IPAERR("STATUS opcode=%d\n", + status->status_opcode); + IPAERR("src=%d dst=%d len=%d\n", + status->endp_src_idx, + status->endp_dest_idx, + status->pkt_len); + ipa_assert(); + } else { + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(len + + IPA_PKT_STATUS_SIZE, 32) * + unused / used_align); + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + skb_pull(skb, len + + IPA_PKT_STATUS_SIZE); + } + } else { + IPAERR("fail to alloc skb\n"); + if (skb->len < len) { + sys->prev_skb = NULL; + sys->len_rem = len - skb->len + + IPA_PKT_STATUS_SIZE; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, len + + IPA_PKT_STATUS_SIZE); + } + } + /* TX comp */ + ipa_wq_write_done_status(src_pipe); + IPADBG_LOW("tx comp imp for %d\n", src_pipe); + } else { + /* TX comp */ + ipa_wq_write_done_status(status->endp_src_idx); + IPADBG_LOW + ("tx comp exp for %d\n", status->endp_src_idx); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl); + IPA_STATS_DEC_CNT( + ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]); + } + } + +out: + return 0; +} + +static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb, + struct sk_buff *skb, unsigned int len) +{ + struct sk_buff *skb2; + + skb2 = skb_copy_expand(prev_skb, 0, + len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, len), + skb->data, len); + } else { + IPAERR("copy expand failed\n"); + skb2 = NULL; + } + dev_kfree_skb_any(prev_skb); + + return skb2; +} + +static void wan_rx_handle_splt_pyld(struct sk_buff *skb, + struct ipa_sys_context *sys) +{ + struct sk_buff *skb2; + + IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = join_prev_skb(sys->prev_skb, skb, + sys->len_rem); + if (likely(skb2)) { + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, IPA_PKT_STATUS_SIZE); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + } else { + if (sys->prev_skb) { + skb2 = join_prev_skb(sys->prev_skb, skb, + skb->len); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + skb_pull(skb, skb->len); + } +} + +static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa_sys_context *sys) +{ + struct ipa_hw_pkt_status *status; + struct sk_buff *skb2; + __be16 pkt_len_with_pad; + u32 qmap_hdr; + int checksum_trailer_exists; + int frame_len; + int ep_idx; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + if (skb->len == 0) { + IPAERR("ZLT\n"); + goto bail; + } + + if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) { + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb)); + return 0; + } + if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) { + IPAERR("Recycle should enable only with GRO Aggr\n"); + ipa_assert(); + } + /* + * payload splits across 2 buff or more, + * take the start of the payload from prev_skb + */ + if (sys->len_rem) + wan_rx_handle_splt_pyld(skb, sys); + + + while (skb->len) { + IPADBG_LOW("LEN_REM %d\n", skb->len); + if (skb->len < IPA_PKT_STATUS_SIZE) { + IPAERR("status straddles buffer\n"); + WARN_ON(1); + goto bail; + } + status = (struct ipa_hw_pkt_status *)skb->data; + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); + + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + *status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if (status->status_opcode != + IPA_HW_STATUS_OPCODE_DROPPED_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_XLAT_PACKET) { + IPAERR("unsupported opcode\n"); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + continue; + } + IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts); + if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes || + status->endp_src_idx >= ipa_ctx->ipa_num_pipes || + status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) { + IPAERR("status fields invalid\n"); + WARN_ON(1); + goto bail; + } + if (status->pkt_len == 0) { + IPADBG_LOW("Skip aggr close status\n"); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts); + IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close); + continue; + } + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (status->endp_dest_idx != ep_idx) { + IPAERR("expected endp_dest_idx %d received %d\n", + ep_idx, status->endp_dest_idx); + WARN_ON(1); + goto bail; + } + /* RX data */ + if (skb->len == IPA_PKT_STATUS_SIZE) { + IPAERR("Ins header in next buffer\n"); + WARN_ON(1); + goto bail; + } + qmap_hdr = *(u32 *)(status+1); + /* + * Take the pkt_len_with_pad from the last 2 bytes of the QMAP + * header + */ + + /*QMAP is BE: convert the pkt_len field from BE to LE*/ + pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff); + IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad); + /*get the CHECKSUM_PROCESS bit*/ + checksum_trailer_exists = status->status_mask & + IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS; + IPADBG_LOW("checksum_trailer_exists %d\n", + checksum_trailer_exists); + + frame_len = IPA_PKT_STATUS_SIZE + + IPA_QMAP_HEADER_LENGTH + + pkt_len_with_pad; + if (checksum_trailer_exists) + frame_len += IPA_DL_CHECKSUM_LENGTH; + IPADBG_LOW("frame_len %d\n", frame_len); + + skb2 = skb_clone(skb, GFP_KERNEL); + if (likely(skb2)) { + /* + * the len of actual data is smaller than expected + * payload split across 2 buff + */ + if (skb->len < frame_len) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, frame_len); + sys->prev_skb = skb2; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, frame_len); + IPADBG_LOW("rx avail for %d\n", + status->endp_dest_idx); + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, IPA_PKT_STATUS_SIZE); + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(frame_len, 32) * + unused / used_align); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb2)); + skb_pull(skb, frame_len); + } + } else { + IPAERR("fail to clone\n"); + if (skb->len < frame_len) { + sys->prev_skb = NULL; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, frame_len); + } + } + } +bail: + sys->free_skb(skb); + return 0; +} + +static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys) +{ + struct ipa_a5_mux_hdr *mux_hdr; + unsigned int pull_len; + unsigned int padding; + struct ipa_ep_context *ep; + unsigned int src_pipe; + + mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data; + + src_pipe = mux_hdr->src_pipe_index; + + IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len); + + IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts); + IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts); + + /* + * Any packets arriving over AMPDU_TX should be dispatched + * to the regular WLAN RX data-path. + */ + if (unlikely(src_pipe == WLAN_AMPDU_TX_EP)) + src_pipe = WLAN_PROD_TX_EP; + + ep = &ipa_ctx->ep[src_pipe]; + spin_lock(&ipa_ctx->disconnect_lock); + if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes || + !ep->valid || !ep->client_notify)) { + IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n", + src_pipe, ep->valid, ep->client_notify); + dev_kfree_skb_any(rx_skb); + spin_unlock(&ipa_ctx->disconnect_lock); + return 0; + } + + pull_len = sizeof(struct ipa_a5_mux_hdr); + + /* + * IP packet starts on word boundary + * remove the MUX header and any padding and pass the frame to + * the client which registered a rx callback on the "src pipe" + */ + padding = ep->cfg.hdr.hdr_len & 0x3; + if (padding) + pull_len += 4 - padding; + + IPADBG("pulling %d bytes from skb\n", pull_len); + skb_pull(rx_skb, pull_len); + ep->client_notify(ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + spin_unlock(&ipa_ctx->disconnect_lock); + return 0; +} + +static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags) +{ + return __dev_alloc_skb(len, flags); +} + +static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len, + gfp_t flags) +{ + struct sk_buff *skb; + + skb = __dev_alloc_skb(len + IPA_HEADROOM, flags); + if (skb) + skb_reserve(skb, IPA_HEADROOM); + + return skb; +} + +static void ipa_free_skb_rx(struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + struct sk_buff *rx_skb = (struct sk_buff *)data; + struct ipa_hw_pkt_status *status; + struct ipa_ep_context *ep; + unsigned int src_pipe; + u32 metadata; + u8 ucp; + + status = (struct ipa_hw_pkt_status *)rx_skb->data; + src_pipe = status->endp_src_idx; + metadata = status->metadata; + ucp = status->ucp; + ep = &ipa_ctx->ep[src_pipe]; + if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes || + !ep->valid || + !ep->client_notify)) { + IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n", + src_pipe, ep->valid, ep->client_notify); + dev_kfree_skb_any(rx_skb); + return; + } + if (!status->exception) + skb_pull(rx_skb, IPA_PKT_STATUS_SIZE + + IPA_LAN_RX_HEADER_LENGTH); + else + skb_pull(rx_skb, IPA_PKT_STATUS_SIZE); + + /* + * Metadata Info + * ------------------------------------------ + * | 3 | 2 | 1 | 0 | + * | fw_desc | vdev_id | qmap mux id | Resv | + * ------------------------------------------ + */ + *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF); + *(u8 *)(rx_skb->cb + 4) = ucp; + IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n", + metadata, *(u32 *)rx_skb->cb); + IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4)); + + ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb)); +} + +void ipa2_recycle_wan_skb(struct sk_buff *skb) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + int ep_idx = ipa2_get_ep_mapping( + IPA_CLIENT_APPS_WAN_CONS); + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN | + (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist\n"); + ipa_assert(); + } + + rx_pkt = kmem_cache_zalloc( + ipa_ctx->rx_pkt_wrapper_cache, flag); + if (!rx_pkt) + ipa_assert(); + + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + rx_pkt->sys = ipa_ctx->ep[ep_idx].sys; + + rx_pkt->data.skb = skb; + rx_pkt->data.dma_addr = 0; + ipa_skb_recycle(rx_pkt->data.skb); + skb_reserve(rx_pkt->data.skb, IPA_HEADROOM); + INIT_LIST_HEAD(&rx_pkt->link); + spin_lock_bh(&rx_pkt->sys->spinlock); + list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list); + spin_unlock_bh(&rx_pkt->sys->spinlock); +} + +static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size) +{ + struct ipa_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + spin_unlock_bh(&sys->spinlock); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + if (size) + rx_pkt_expected->len = size; + rx_skb = rx_pkt_expected->data.skb; + dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + *(unsigned int *)rx_skb->cb = rx_skb->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->pyld_hdlr(rx_skb, sys); + sys->repl_hdlr(sys); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected); + +} + +static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size) +{ + struct ipa_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + spin_unlock_bh(&sys->spinlock); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + + if (size) + rx_pkt_expected->len = size; + + rx_skb = rx_pkt_expected->data.skb; + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->ep->wstats.tx_pkts_rcvd++; + if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) { + ipa2_free_skb(&rx_pkt_expected->data); + sys->ep->wstats.tx_pkts_dropped++; + } else { + sys->ep->wstats.tx_pkts_sent++; + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(&rx_pkt_expected->data)); + } + ipa_replenish_wlan_rx_cache(sys); +} + +static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys, + struct sps_iovec *iovec) +{ + IPADBG_LOW("ENTER.\n"); + if (unlikely(list_empty(&sys->head_desc_list))) { + IPAERR("descriptor list is empty!\n"); + WARN_ON(1); + return; + } + if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) { + IPAERR("received unexpected event. sps flag is 0x%x\n" + , iovec->flags); + WARN_ON(1); + return; + } + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(iovec)); + IPADBG("EXIT\n"); +} + +static void ipa_wq_rx_avail(struct work_struct *work) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + struct ipa_sys_context *sys; + + rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work); + if (unlikely(rx_pkt == NULL)) + WARN_ON(1); + sys = rx_pkt->sys; + ipa_wq_rx_common(sys, 0); +} + +static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb, + struct ipa_sys_context *sys) +{ + if (sys->ep->client_notify) { + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + } else { + dev_kfree_skb_any(rx_skb); + WARN_ON(1); + } + + return 0; +} + +static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in, + struct ipa_sys_context *sys) +{ + unsigned long aggr_byte_limit; + + sys->ep->status.status_en = true; + sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX; + if (IPA_CLIENT_IS_PROD(in->client)) { + if (!sys->ep->skip_ep_cfg) { + sys->policy = IPA_POLICY_NOINTR_MODE; + sys->sps_option = SPS_O_AUTO_ENABLE; + sys->sps_callback = NULL; + sys->ep->status.status_ep = ipa2_get_ep_mapping( + IPA_CLIENT_APPS_LAN_CONS); + if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) + sys->ep->status.status_en = false; + } else { + sys->policy = IPA_POLICY_INTR_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | + SPS_O_EOT); + sys->sps_callback = + ipa_sps_irq_tx_no_aggr_notify; + } + return 0; + } + + aggr_byte_limit = + (unsigned long)IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + + if (in->client == IPA_CLIENT_APPS_LAN_CONS || + in->client == IPA_CLIENT_APPS_WAN_CONS) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + INIT_WORK(&sys->repl_work, ipa_wq_repl_rx); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( + IPA_GENERIC_RX_BUFF_BASE_SZ) - + IPA_HEADROOM; + sys->get_skb = ipa_get_skb_ipa_rx_headroom; + sys->free_skb = ipa_free_skb_rx; + in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + in->ipa_ep_cfg.aggr.aggr_time_limit = + IPA_GENERIC_AGGR_TIME_LIMIT; + if (in->client == IPA_CLIENT_APPS_LAN_CONS) { + sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr; + sys->rx_pool_sz = + ipa_ctx->lan_rx_ring_size; + if (nr_cpu_ids > 1) { + sys->repl_hdlr = + ipa_fast_replenish_rx_cache; + sys->repl_trig_thresh = + sys->rx_pool_sz / 8; + } else { + sys->repl_hdlr = + ipa_replenish_rx_cache; + } + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_LAN_RX; + } else if (in->client == + IPA_CLIENT_APPS_WAN_CONS) { + sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr; + sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size; + if (nr_cpu_ids > 1) { + sys->repl_hdlr = + ipa_fast_replenish_rx_cache; + sys->repl_trig_thresh = + sys->rx_pool_sz / 8; + } else { + sys->repl_hdlr = + ipa_replenish_rx_cache; + } + if (in->napi_enabled && in->recycle_enabled) + sys->repl_hdlr = + ipa_replenish_rx_cache_recycle; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_WAN_RX; + in->ipa_ep_cfg.aggr.aggr_sw_eof_active + = true; + if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) { + IPAERR("get close-by %u\n", + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit); + /* disable ipa_status */ + sys->ep->status.status_en = false; + sys->rx_buff_sz = + IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit - IPA_HEADROOM)); + in->ipa_ep_cfg.aggr.aggr_byte_limit = + sys->rx_buff_sz < in->ipa_ep_cfg.aggr.aggr_byte_limit ? + IPA_ADJUST_AGGR_BYTE_LIMIT( + sys->rx_buff_sz) : + IPA_ADJUST_AGGR_BYTE_LIMIT( + in->ipa_ep_cfg.aggr.aggr_byte_limit); + IPAERR("set aggr_limit %lu\n", + (unsigned long) + in->ipa_ep_cfg.aggr.aggr_byte_limit); + } else { + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + } + } + } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + sizeof(struct sps_iovec) - 1; + if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ) + sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ; + sys->pyld_hdlr = NULL; + sys->repl_hdlr = ipa_replenish_wlan_rx_cache; + sys->get_skb = ipa_get_skb_ipa_rx; + sys->free_skb = ipa_free_skb_rx; + in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_WLAN_RX; + } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + sizeof(struct sps_iovec) - 1; + if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ) + sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ; + sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr; + sys->get_skb = ipa_get_skb_ipa_rx; + sys->free_skb = ipa_free_skb_rx; + sys->repl_hdlr = ipa_replenish_rx_cache; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_ODU_RX; + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_NOINTR_MODE; + sys->sps_option = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + } else { + IPAERR("Need to install a RX pipe hdlr\n"); + WARN_ON(1); + return -EINVAL; + } + return 0; +} + +static int ipa_assign_policy(struct ipa_sys_connect_params *in, + struct ipa_sys_context *sys) +{ + if (in->client == IPA_CLIENT_APPS_CMD_PROD) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT); + sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify; + return 0; + } + + if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { + if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT | + SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_tx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_tx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_tx_work_func); + atomic_set(&sys->curr_polling_state, 0); + } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT | + SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_RX_SKB_SIZE; + sys->rx_pool_sz = IPA_RX_POOL_CEIL; + sys->pyld_hdlr = ipa_rx_pyld_hdlr; + sys->get_skb = ipa_get_skb_ipa_rx; + sys->free_skb = ipa_free_skb_rx; + sys->repl_hdlr = ipa_replenish_rx_cache; + } else if (IPA_CLIENT_IS_PROD(in->client)) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT); + sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify; + } else { + IPAERR("Need to install a RX pipe hdlr\n"); + WARN_ON(1); + return -EINVAL; + } + + return 0; + } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) + return ipa_assign_policy_v2(in, sys); + + IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type); + WARN_ON(1); + return -EINVAL; +} + +/** + * ipa_tx_client_rx_notify_release() - Callback function + * which will call the user supplied callback function to + * release the skb, or release it on its own if no callback + * function was supplied + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa_tx_dp_mul + */ +static void ipa_tx_client_rx_notify_release(void *user1, int user2) +{ + struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1; + int ep_idx = user2; + + IPADBG_LOW("Received data desc anchor:%p\n", dd); + + atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc); + ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; + + /* wlan host driver waits till tx complete before unload */ + IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n", + ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc)); + IPADBG_LOW("calling client notify callback with priv:%p\n", + ipa_ctx->ep[ep_idx].priv); + + if (ipa_ctx->ep[ep_idx].client_notify) { + ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)user1); + ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++; + } +} +/** + * ipa_tx_client_rx_pkt_status() - Callback function + * which will call the user supplied callback function to + * increase the available fifo descriptor + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa_tx_dp_mul + */ +static void ipa_tx_client_rx_pkt_status(void *user1, int user2) +{ + int ep_idx = user2; + + atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc); + ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; +} + + +/** + * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets + * @src: [in] - Client that is sending data + * @ipa_tx_data_desc: [in] data descriptors from wlan + * + * this is used for to transfer data descriptors that received + * from WLAN1_PROD pipe to IPA HW + * + * The function will send data descriptors from WLAN1_PROD (one + * at a time) using sps_transfer_one. Will set EOT flag for last + * descriptor Once this send was done from SPS point-of-view the + * IPA driver will get notified by the supplied callback - + * ipa_sps_irq_tx_no_aggr_notify() + * + * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa2_tx_dp_mul(enum ipa_client_type src, + struct ipa_tx_data_desc *data_desc) +{ + /* The second byte in wlan header holds qmap id */ +#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1 + struct ipa_tx_data_desc *entry; + struct ipa_sys_context *sys; + struct ipa_desc desc = { 0 }; + u32 num_desc, cnt; + int ep_idx; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + IPADBG_LOW("Received data desc anchor:%p\n", data_desc); + + spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + + ep_idx = ipa2_get_ep_mapping(src); + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist.\n"); + goto fail_send; + } + IPADBG_LOW("ep idx:%d\n", ep_idx); + sys = ipa_ctx->ep[ep_idx].sys; + + if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) { + IPAERR("dest EP not valid.\n"); + goto fail_send; + } + sys->ep->wstats.rx_hd_rcvd++; + + /* Calculate the number of descriptors */ + num_desc = 0; + list_for_each_entry(entry, &data_desc->link, link) { + num_desc++; + } + IPADBG_LOW("Number of Data Descriptors:%d", num_desc); + + if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) { + IPAERR("Insufficient data descriptors available\n"); + goto fail_send; + } + + /* Assign callback only for last data descriptor */ + cnt = 0; + list_for_each_entry(entry, &data_desc->link, link) { + IPADBG_LOW("Parsing data desc :%d\n", cnt); + cnt++; + ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] = + (u8)sys->ep->cfg.meta.qmap_id; + desc.pyld = entry->pyld_buffer; + desc.len = entry->pyld_len; + desc.type = IPA_DATA_DESC_SKB; + desc.user1 = data_desc; + desc.user2 = ep_idx; + IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n", + entry->priv, desc.pyld, desc.len); + + /* In case of last descriptor populate callback */ + if (cnt == num_desc) { + IPADBG_LOW("data desc:%p\n", data_desc); + desc.callback = ipa_tx_client_rx_notify_release; + } else { + desc.callback = ipa_tx_client_rx_pkt_status; + } + + IPADBG_LOW("calling ipa_send_one()\n"); + if (ipa_send_one(sys, &desc, true)) { + IPAERR("fail to send skb\n"); + sys->ep->wstats.rx_pkt_leak += (cnt-1); + sys->ep->wstats.rx_dp_fail++; + goto fail_send; + } + + if (atomic_read(&sys->ep->avail_fifo_desc) >= 0) + atomic_dec(&sys->ep->avail_fifo_desc); + + sys->ep->wstats.rx_pkts_rcvd++; + IPADBG_LOW("ep=%d fifo desc=%d\n", + ep_idx, atomic_read(&sys->ep->avail_fifo_desc)); + } + + sys->ep->wstats.rx_hd_processed++; + spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + return 0; + +fail_send: + spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + return -EFAULT; + +} + +void ipa2_free_skb(struct ipa_rx_data *data) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return; + } + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + ipa_ctx->wc_memb.total_tx_pkts_freed++; + rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data); + + ipa_skb_recycle(rx_pkt->data.skb); + (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + + list_add_tail(&rx_pkt->link, + &ipa_ctx->wc_memb.wlan_comm_desc_list); + ipa_ctx->wc_memb.wlan_comm_free_cnt++; + + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); +} + + +/* Functions added to support kernel tests */ + +int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + if (sys_in->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm client:%d\n", sys_in->client); + goto fail_gen; + } + + ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client :%d\n", sys_in->client); + goto fail_gen; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + + if (ep->valid == 1) { + if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) { + IPAERR("EP %d already allocated\n", ipa_ep_idx); + goto fail_and_disable_clocks; + } else { + if (ipa2_cfg_ep_hdr(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr)) { + IPAERR("fail to configure hdr prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa2_cfg_ep_cfg(ipa_ep_idx, + &sys_in->ipa_ep_cfg.cfg)) { + IPAERR("fail to configure cfg prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + IPAERR("client %d (ep: %d) overlay ok sys=%p\n", + sys_in->client, ipa_ep_idx, ep->sys); + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + *clnt_hdl = ipa_ep_idx; + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + return 0; + } + } + + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = true; + + result = ipa_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", + result, ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + *ipa_pipe_num = ipa_ep_idx; + *ipa_bam_hdl = ipa_ctx->bam_handle; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_gen2: +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +int ipa2_sys_teardown(u32 clnt_hdl) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm(Either endpoint or client hdl invalid)\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_disable_data_path(clnt_hdl); + ep->valid = 0; + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl) +{ + IPAERR("GSI not supported in IPAv2"); + return -EFAULT; +} + + +/** + * ipa_adjust_ra_buff_base_sz() + * + * Return value: the largest power of two which is smaller + * than the input value + */ +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit) +{ + aggr_byte_limit += IPA_MTU; + aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT; + aggr_byte_limit--; + aggr_byte_limit |= aggr_byte_limit >> 1; + aggr_byte_limit |= aggr_byte_limit >> 2; + aggr_byte_limit |= aggr_byte_limit >> 4; + aggr_byte_limit |= aggr_byte_limit >> 8; + aggr_byte_limit |= aggr_byte_limit >> 16; + aggr_byte_limit++; + return aggr_byte_limit >> 1; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c new file mode 100644 index 0000000000000000000000000000000000000000..7de00414e3db39f7bd544f7e41f015e7da0355a9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -0,0 +1,1549 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" + +#define IPA_FLT_TABLE_WORD_SIZE (4) +#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3) +#define IPA_FLT_BIT_MASK (0x1) +#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1) +#define IPA_FLT_STATUS_OF_ADD_FAILED (-1) +#define IPA_FLT_STATUS_OF_DEL_FAILED (-1) +#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1) + +static int ipa_generate_hw_rule_from_eq( + const struct ipa_ipfltri_rule_eq *attrib, u8 **buf) +{ + uint8_t num_offset_meq_32 = attrib->num_offset_meq_32; + uint8_t num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16; + uint8_t num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32; + uint8_t num_offset_meq_128 = attrib->num_offset_meq_128; + int i; + + if (attrib->tos_eq_present) { + *buf = ipa_write_8(attrib->tos_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->protocol_eq_present) { + *buf = ipa_write_8(attrib->protocol_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (num_offset_meq_32) { + *buf = ipa_write_8(attrib->offset_meq_32[0].offset, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[0].mask, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[0].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_32--; + } + + if (num_offset_meq_32) { + *buf = ipa_write_8(attrib->offset_meq_32[1].offset, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[1].mask, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[1].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_32--; + } + + if (num_ihl_offset_range_16) { + *buf = ipa_write_8(attrib->ihl_offset_range_16[0].offset, *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_high, + *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_low, + *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_range_16--; + } + + if (num_ihl_offset_range_16) { + *buf = ipa_write_8(attrib->ihl_offset_range_16[1].offset, *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_high, + *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_low, + *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_range_16--; + } + + if (attrib->ihl_offset_eq_16_present) { + *buf = ipa_write_8(attrib->ihl_offset_eq_16.offset, *buf); + *buf = ipa_write_16(attrib->ihl_offset_eq_16.value, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->ihl_offset_eq_32_present) { + *buf = ipa_write_8(attrib->ihl_offset_eq_32.offset, *buf); + *buf = ipa_write_32(attrib->ihl_offset_eq_32.value, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (num_ihl_offset_meq_32) { + *buf = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_meq_32--; + } + + /* TODO check layout of 16 byte mask and value */ + if (num_offset_meq_128) { + *buf = ipa_write_8(attrib->offset_meq_128[0].offset, *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[0].mask[i], + *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[0].value[i], + *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_128--; + } + + if (num_offset_meq_128) { + *buf = ipa_write_8(attrib->offset_meq_128[1].offset, *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[1].mask[i], + *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[1].value[i], + *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_128--; + } + + if (attrib->tc_eq_present) { + *buf = ipa_write_8(attrib->tc_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->fl_eq_present) { + *buf = ipa_write_32(attrib->fl_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (num_ihl_offset_meq_32) { + *buf = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_meq_32--; + } + + if (attrib->metadata_meq32_present) { + *buf = ipa_write_8(attrib->metadata_meq32.offset, *buf); + *buf = ipa_write_32(attrib->metadata_meq32.mask, *buf); + *buf = ipa_write_32(attrib->metadata_meq32.value, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->ipv4_frag_eq_present) + *buf = ipa_pad_to_32(*buf); + + return 0; +} + +/** + * ipa_generate_flt_hw_rule() - generates the filtering hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip, + struct ipa_flt_entry *entry, u8 *buf) +{ + struct ipa_flt_rule_hw_hdr *hdr; + const struct ipa_flt_rule *rule = + (const struct ipa_flt_rule *)&entry->rule; + u16 en_rule = 0; + u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; + u8 *start; + + if (buf == NULL) { + memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); + buf = (u8 *)tmp; + } + + start = buf; + hdr = (struct ipa_flt_rule_hw_hdr *)buf; + hdr->u.hdr.action = entry->rule.action; + hdr->u.hdr.retain_hdr = entry->rule.retain_hdr; + hdr->u.hdr.to_uc = entry->rule.to_uc; + if (entry->rt_tbl) + hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx; + else + hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx; + hdr->u.hdr.rsvd = 0; + buf += sizeof(struct ipa_flt_rule_hw_hdr); + + if (rule->eq_attrib_type) { + if (ipa_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + en_rule = rule->eq_attrib.rule_eq_bitmap; + } else { + if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + } + + IPADBG_LOW("en_rule 0x%x, action=%d, rt_idx=%d, uc=%d, retain_hdr=%d\n", + en_rule, + hdr->u.hdr.action, + hdr->u.hdr.rt_tbl_idx, + hdr->u.hdr.to_uc, + hdr->u.hdr.retain_hdr); + + hdr->u.hdr.en_rule = en_rule; + ipa_write_32(hdr->u.word, (u8 *)hdr); + + if (entry->hw_len == 0) { + entry->hw_len = buf - start; + } else if (entry->hw_len != (buf - start)) { + IPAERR("hw_len differs b/w passes passed=%x calc=%td\n", + entry->hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +/** + * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table + * @ip: the ip address family type + * @hdr_sz: header size + * + * Returns: size on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz) +{ + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + u32 total_sz = 0; + u32 rule_set_sz; + int i; + + *hdr_sz = 0; + tbl = &ipa_ctx->glob_flt_tbl[ip]; + rule_set_sz = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (ipa_generate_flt_hw_rule(ip, entry, NULL)) { + IPAERR("failed to find HW FLT rule size\n"); + return -EPERM; + } + IPADBG("glob ip %d len %d\n", ip, entry->hw_len); + rule_set_sz += entry->hw_len; + } + + if (rule_set_sz) { + tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE; + /* this rule-set uses a word in header block */ + *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; + if (!tbl->in_sys) { + /* add the terminator */ + total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE); + total_sz = (total_sz + + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) & + ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + rule_set_sz = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (ipa_generate_flt_hw_rule(ip, entry, NULL)) { + IPAERR("failed to find HW FLT rule size\n"); + return -EPERM; + } + IPADBG("pipe %d len %d\n", i, entry->hw_len); + rule_set_sz += entry->hw_len; + } + + if (rule_set_sz) { + tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE; + /* this rule-set uses a word in header block */ + *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; + if (!tbl->in_sys) { + /* add the terminator */ + total_sz += (rule_set_sz + + IPA_FLT_TABLE_WORD_SIZE); + total_sz = (total_sz + + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) & + ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + } + } + } + + *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; + total_sz += *hdr_sz; + IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip); + + return total_sz; +} + +static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, + u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top) +{ + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + int i; + u32 offset; + u8 *body; + struct ipa_mem_buffer flt_tbl_mem; + u8 *ftbl_membody; + + *hdr_top = 0; + body = base; + +#define IPA_WRITE_FLT_HDR(idx, val) { \ + if (idx <= 5) { \ + *((u32 *)hdr + 1 + idx) = val; \ + } else if (idx >= 6 && idx <= 10) { \ + WARN_ON(1); \ + } else if (idx >= 11 && idx <= 19) { \ + *((u32 *)hdr2 + idx - 11) = val; \ + } else { \ + WARN_ON(1); \ + } \ +} + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + + if (!list_empty(&tbl->head_flt_rule_list)) { + *hdr_top |= IPA_FLT_BIT_MASK; + + if (!tbl->in_sys) { + offset = body - base + body_start_offset; + if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) { + IPAERR("offset is not word multiple %d\n", + offset); + goto proc_err; + } + + offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + /* rule is at an offset from base */ + offset |= IPA_FLT_BIT_MASK; + + if (hdr2) + *(u32 *)hdr = offset; + else + hdr = ipa_write_32(offset, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, body)) { + IPAERR("failed to gen HW FLT rule\n"); + goto proc_err; + } + body += entry->hw_len; + } + + /* write the rule-set terminator */ + body = ipa_write_32(0, body); + if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) + /* advance body to next word boundary */ + body = body + (IPA_FLT_TABLE_WORD_SIZE - + ((long)body & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)); + } else { + if (tbl->sz == 0) { + IPAERR("tbl size is 0\n"); + WARN_ON(1); + goto proc_err; + } + + /* allocate memory for the flt tbl */ + flt_tbl_mem.size = tbl->sz; + flt_tbl_mem.base = + dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size, + &flt_tbl_mem.phys_base, GFP_KERNEL); + if (!flt_tbl_mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", + flt_tbl_mem.size); + WARN_ON(1); + goto proc_err; + } + + WARN_ON(flt_tbl_mem.phys_base & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT); + ftbl_membody = flt_tbl_mem.base; + memset(flt_tbl_mem.base, 0, flt_tbl_mem.size); + + if (hdr2) + *(u32 *)hdr = flt_tbl_mem.phys_base; + else + hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, + ftbl_membody)) { + IPAERR("failed to gen HW FLT rule\n"); + WARN_ON(1); + } + ftbl_membody += entry->hw_len; + } + + /* write the rule-set terminator */ + ftbl_membody = ipa_write_32(0, ftbl_membody); + if (tbl->curr_mem.phys_base) { + WARN_ON(tbl->prev_mem.phys_base); + tbl->prev_mem = tbl->curr_mem; + } + tbl->curr_mem = flt_tbl_mem; + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + if (!list_empty(&tbl->head_flt_rule_list)) { + /* pipe "i" is at bit "i+1" */ + *hdr_top |= (1 << (i + 1)); + + if (!tbl->in_sys) { + offset = body - base + body_start_offset; + if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) { + IPAERR("ofst is not word multiple %d\n", + offset); + goto proc_err; + } + offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + /* rule is at an offset from base */ + offset |= IPA_FLT_BIT_MASK; + + if (hdr2) + IPA_WRITE_FLT_HDR(i, offset) + else + hdr = ipa_write_32(offset, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, + &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, + body)) { + IPAERR("fail gen FLT rule\n"); + goto proc_err; + } + body += entry->hw_len; + } + + /* write the rule-set terminator */ + body = ipa_write_32(0, body); + if ((long)body & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) + /* advance body to next word boundary */ + body = body + (IPA_FLT_TABLE_WORD_SIZE - + ((long)body & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)); + } else { + if (tbl->sz == 0) { + IPAERR("tbl size is 0\n"); + WARN_ON(1); + goto proc_err; + } + + /* allocate memory for the flt tbl */ + flt_tbl_mem.size = tbl->sz; + flt_tbl_mem.base = + dma_alloc_coherent(ipa_ctx->pdev, + flt_tbl_mem.size, + &flt_tbl_mem.phys_base, + GFP_KERNEL); + if (!flt_tbl_mem.base) { + IPAERR("fail alloc DMA buff size %d\n", + flt_tbl_mem.size); + WARN_ON(1); + goto proc_err; + } + + WARN_ON(flt_tbl_mem.phys_base & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT); + + ftbl_membody = flt_tbl_mem.base; + memset(flt_tbl_mem.base, 0, flt_tbl_mem.size); + + if (hdr2) + IPA_WRITE_FLT_HDR(i, + flt_tbl_mem.phys_base) + else + hdr = ipa_write_32( + flt_tbl_mem.phys_base, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, + &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, + ftbl_membody)) { + IPAERR("fail gen FLT rule\n"); + WARN_ON(1); + } + ftbl_membody += entry->hw_len; + } + + /* write the rule-set terminator */ + ftbl_membody = + ipa_write_32(0, ftbl_membody); + if (tbl->curr_mem.phys_base) { + WARN_ON(tbl->prev_mem.phys_base); + tbl->prev_mem = tbl->curr_mem; + } + tbl->curr_mem = flt_tbl_mem; + } + } + } + + return 0; + +proc_err: + return -EPERM; +} + + +/** + * ipa_generate_flt_hw_tbl() - generates the filtering hardware table + * @ip: [in] the ip address family type + * @mem: [out] buffer to put the filtering table + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem) +{ + u32 hdr_top = 0; + u32 hdr_sz; + u8 *hdr; + u8 *body; + u8 *base; + int res; + + res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz); + if (res < 0) { + IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res); + return res; + } + + mem->size = res; + mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size); + + if (mem->size == 0) { + IPAERR("flt tbl empty ip=%d\n", ip); + goto error; + } + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + goto error; + } + + /* build the flt tbl in the DMA buffer to submit to IPA HW */ + base = hdr = (u8 *)mem->base; + body = base + hdr_sz; + + /* write a dummy header to move cursor */ + hdr = ipa_write_32(hdr_top, hdr); + + if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, NULL, + &hdr_top)) { + IPAERR("fail to generate FLT HW table\n"); + goto proc_err; + } + + /* now write the hdr_top */ + ipa_write_32(hdr_top, base); + + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + return 0; + +proc_err: + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); +error: + return -EPERM; +} + +static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip) +{ + struct ipa_flt_tbl *tbl; + int i; + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + if (tbl->prev_mem.phys_base) { + IPADBG_LOW("reaping glob flt tbl (prev) ip=%d\n", ip); + dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, + tbl->prev_mem.base, tbl->prev_mem.phys_base); + memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); + } + + if (list_empty(&tbl->head_flt_rule_list)) { + if (tbl->curr_mem.phys_base) { + IPADBG_LOW("reaping glob flt tbl (curr) ip=%d\n", ip); + dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size, + tbl->curr_mem.base, + tbl->curr_mem.phys_base); + memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem)); + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + if (tbl->prev_mem.phys_base) { + IPADBG_LOW("reaping flt tbl"); + IPADBG_LOW("(prev) pipe=%d ip=%d\n", i, ip); + dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, + tbl->prev_mem.base, + tbl->prev_mem.phys_base); + memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); + } + + if (list_empty(&tbl->head_flt_rule_list)) { + if (tbl->curr_mem.phys_base) { + IPADBG_LOW("reaping flt tbl"); + IPADBG_LOW("(curr) pipe=%d ip=%d\n", + i, ip); + dma_free_coherent(ipa_ctx->pdev, + tbl->curr_mem.size, + tbl->curr_mem.base, + tbl->curr_mem.phys_base); + memset(&tbl->curr_mem, 0, + sizeof(tbl->curr_mem)); + } + } + } +} + +int __ipa_commit_flt_v1_1(enum ipa_ip_type ip) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer *mem; + void *cmd; + struct ipa_ip_v4_filter_init *v4; + struct ipa_ip_v6_filter_init *v6; + u16 avail; + u16 size; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); + if (!mem) { + IPAERR("failed to alloc memory object\n"); + goto fail_alloc_mem; + } + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_v1_RAM_V4_FLT_SIZE : + IPA_MEM_PART(v4_flt_size_ddr); + size = sizeof(struct ipa_ip_v4_filter_init); + } else { + avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_v1_RAM_V6_FLT_SIZE : + IPA_MEM_PART(v6_flt_size_ddr); + size = sizeof(struct ipa_ip_v6_filter_init); + } + cmd = kmalloc(size, flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_alloc_cmd; + } + + if (ipa_generate_flt_hw_tbl_v1_1(ip, mem)) { + IPAERR("fail to generate FLT HW TBL ip %d\n", ip); + goto fail_hw_tbl_gen; + } + + if (mem->size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail); + goto fail_send_cmd; + } + + if (ip == IPA_IP_v4) { + v4 = (struct ipa_ip_v4_filter_init *)cmd; + desc.opcode = IPA_IP_V4_FILTER_INIT; + v4->ipv4_rules_addr = mem->phys_base; + v4->size_ipv4_rules = mem->size; + v4->ipv4_addr = IPA_MEM_v1_RAM_V4_FLT_OFST; + } else { + v6 = (struct ipa_ip_v6_filter_init *)cmd; + desc.opcode = IPA_IP_V6_FILTER_INIT; + v6->ipv6_rules_addr = mem->phys_base; + v6->size_ipv6_rules = mem->size; + v6->ipv6_addr = IPA_MEM_v1_RAM_V6_FLT_OFST; + } + + desc.pyld = cmd; + desc.len = size; + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + goto fail_send_cmd; + } + + __ipa_reap_sys_flt_tbls(ip); + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); + kfree(cmd); + kfree(mem); + + return 0; + +fail_send_cmd: + if (mem->phys_base) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +fail_hw_tbl_gen: + kfree(cmd); +fail_alloc_cmd: + kfree(mem); +fail_alloc_mem: + + return -EPERM; +} + +static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1, + struct ipa_mem_buffer *head2) +{ + int i; + u32 hdr_sz; + int num_words; + u32 *entr; + u32 body_start_offset; + u32 hdr_top; + int res; + + if (ip == IPA_IP_v4) + body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) - + IPA_MEM_PART(v4_flt_ofst); + else + body_start_offset = IPA_MEM_PART(apps_v6_flt_ofst) - + IPA_MEM_PART(v6_flt_ofst); + + num_words = 7; + head1->size = num_words * 4; + head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size, + &head1->phys_base, GFP_KERNEL); + if (!head1->base) { + IPAERR("fail to alloc DMA buff of size %d\n", head1->size); + goto err; + } + entr = (u32 *)head1->base; + for (i = 0; i < num_words; i++) { + *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; + entr++; + } + + num_words = 9; + head2->size = num_words * 4; + head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size, + &head2->phys_base, GFP_KERNEL); + if (!head2->base) { + IPAERR("fail to alloc DMA buff of size %d\n", head2->size); + goto head_err; + } + entr = (u32 *)head2->base; + for (i = 0; i < num_words; i++) { + *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; + entr++; + } + + res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz); + if (res < 0) { + IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res); + goto body_err; + } + + mem->size = res; + mem->size -= hdr_sz; + mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size); + + if (mem->size) { + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", + mem->size); + goto body_err; + } + } + + if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base, + body_start_offset, head2->base, &hdr_top)) { + IPAERR("fail to generate FLT HW table\n"); + goto proc_err; + } + + IPADBG("HEAD1\n"); + IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size); + IPADBG("HEAD2\n"); + IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size); + if (mem->size) { + IPADBG("BODY\n"); + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + } + + return 0; + +proc_err: + if (mem->size) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +body_err: + dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base, + head2->phys_base); +head_err: + dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base, + head1->phys_base); +err: + return -EPERM; +} + +int __ipa_commit_flt_v2(enum ipa_ip_type ip) +{ + struct ipa_desc *desc; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd; + struct ipa_mem_buffer body; + struct ipa_mem_buffer head1; + struct ipa_mem_buffer head2; + int rc = 0; + u32 local_addrb; + u32 local_addrh; + bool lcl; + int num_desc = 0; + int i; + u16 avail; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC); + if (desc == NULL) { + IPAERR("fail to alloc desc blob ip %d\n", ip); + rc = -ENOMEM; + goto fail_desc; + } + + cmd = kzalloc(16 * sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("fail to alloc cmd blob ip %d\n", ip); + rc = -ENOMEM; + goto fail_imm; + } + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_flt_tbl_lcl ? + IPA_MEM_PART(apps_v4_flt_size) : + IPA_MEM_PART(v4_flt_size_ddr); + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + 4; + local_addrb = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_flt_ofst); + lcl = ipa_ctx->ip4_flt_tbl_lcl; + } else { + avail = ipa_ctx->ip6_flt_tbl_lcl ? + IPA_MEM_PART(apps_v6_flt_size) : + IPA_MEM_PART(v6_flt_size_ddr); + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + 4; + local_addrb = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_flt_ofst); + lcl = ipa_ctx->ip6_flt_tbl_lcl; + } + + if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) { + IPAERR("fail to generate FLT HW TBL ip %d\n", ip); + rc = -EFAULT; + goto fail_gen; + } + + if (body.size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", body.size, avail); + goto fail_send_cmd; + } + + cmd[num_desc].size = 4; + cmd[num_desc].system_addr = head1.phys_base; + cmd[num_desc].local_addr = local_addrh; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + + for (i = 0; i < 6; i++) { + if (ipa_ctx->skip_ep_cfg_shadow[i]) { + IPADBG_LOW("skip %d\n", i); + continue; + } + + if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) == i || + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) == i || + ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD) == i || + (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i + && ipa_ctx->modem_cfg_emb_pipe_flt)) { + IPADBG_LOW("skip %d\n", i); + continue; + } + + if (ip == IPA_IP_v4) { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + + 8 + i * 4; + } else { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + + 8 + i * 4; + } + cmd[num_desc].size = 4; + cmd[num_desc].system_addr = head1.phys_base + 4 + i * 4; + cmd[num_desc].local_addr = local_addrh; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + } + + for (i = 11; i < ipa_ctx->ipa_num_pipes; i++) { + if (ipa_ctx->skip_ep_cfg_shadow[i]) { + IPADBG_LOW("skip %d\n", i); + continue; + } + if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i && + ipa_ctx->modem_cfg_emb_pipe_flt) { + IPADBG_LOW("skip %d\n", i); + continue; + } + if (ip == IPA_IP_v4) { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + + 13 * 4 + (i - 11) * 4; + } else { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + + 13 * 4 + (i - 11) * 4; + } + cmd[num_desc].size = 4; + cmd[num_desc].system_addr = head2.phys_base + (i - 11) * 4; + cmd[num_desc].local_addr = local_addrh; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + } + + if (lcl) { + cmd[num_desc].size = body.size; + cmd[num_desc].system_addr = body.phys_base; + cmd[num_desc].local_addr = local_addrb; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + + if (ipa_send_cmd(num_desc, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd; + } + } else { + if (ipa_send_cmd(num_desc, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd; + } + } + + __ipa_reap_sys_flt_tbls(ip); + +fail_send_cmd: + if (body.size) + dma_free_coherent(ipa_ctx->pdev, body.size, body.base, + body.phys_base); + dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base, + head1.phys_base); + dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base, + head2.phys_base); +fail_gen: + kfree(cmd); +fail_imm: + kfree(desc); +fail_desc: + return rc; +} + +static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl, bool user) +{ + struct ipa_flt_entry *entry; + struct ipa_rt_tbl *rt_tbl = NULL; + int id; + + if (rule->action != IPA_PASS_TO_EXCEPTION) { + if (!rule->eq_attrib_type) { + if (!rule->rt_tbl_hdl) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + + rt_tbl = ipa_id_find(rule->rt_tbl_hdl); + if (rt_tbl == NULL) { + IPAERR_RL("RT tbl not found\n"); + goto error; + } + + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { + IPAERR_RL("RT table cookie is invalid\n"); + goto error; + } + } else { + if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + } else { + if (rule->rt_tbl_idx > 0) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + + entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc FLT rule object\n"); + goto error; + } + INIT_LIST_HEAD(&entry->link); + entry->rule = *rule; + entry->cookie = IPA_FLT_COOKIE; + entry->rt_tbl = rt_tbl; + entry->tbl = tbl; + if (add_rear) { + if (tbl->sticky_rear) + list_add_tail(&entry->link, + tbl->head_flt_rule_list.prev); + else + list_add_tail(&entry->link, &tbl->head_flt_rule_list); + } else { + list_add(&entry->link, &tbl->head_flt_rule_list); + } + tbl->rule_cnt++; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + *rule_hdl = id; + entry->id = id; + entry->ipacm_installed = user; + IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); + + return 0; +ipa_insert_failed: + tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + list_del(&entry->link); + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); +error: + return -EPERM; +} + +static int __ipa_del_flt_rule(u32 rule_hdl) +{ + struct ipa_flt_entry *entry; + int id; + + entry = ipa_id_find(rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + id = entry->id; + + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + IPADBG_LOW("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt); + entry->cookie = 0; + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + + return 0; +} + +static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, + enum ipa_ip_type ip) +{ + struct ipa_flt_entry *entry; + struct ipa_rt_tbl *rt_tbl = NULL; + + entry = ipa_id_find(frule->rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + + if (frule->rule.action != IPA_PASS_TO_EXCEPTION) { + if (!frule->rule.eq_attrib_type) { + if (!frule->rule.rt_tbl_hdl) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + + rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl); + if (rt_tbl == NULL) { + IPAERR_RL("RT tbl not found\n"); + goto error; + } + + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { + IPAERR_RL("RT table cookie is invalid\n"); + goto error; + } + } else { + if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + } else { + if (frule->rule.rt_tbl_idx > 0) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + + entry->rule = frule->rule; + entry->rt_tbl = rt_tbl; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + entry->hw_len = 0; + + return 0; + +error: + return -EPERM; +} + +static int __ipa_add_global_flt_rule(enum ipa_ip_type ip, + const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl) +{ + struct ipa_flt_tbl *tbl; + + if (rule == NULL || rule_hdl == NULL) { + IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl); + + return -EINVAL; + } + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + IPADBG_LOW("add global flt rule ip=%d\n", ip); + + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, false); +} + +static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl, bool user) +{ + struct ipa_flt_tbl *tbl; + int ipa_ep_idx; + + if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule, + rule_hdl, ep); + + return -EINVAL; + } + ipa_ep_idx = ipa2_get_ep_mapping(ep); + if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) { + IPAERR_RL("ep not valid ep=%d\n", ep); + return -EINVAL; + } + if (ipa_ctx->ep[ipa_ep_idx].valid == 0) + IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx); + + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip]; + IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); + + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user); +} + +/** + * ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of filtering rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + return ipa2_add_flt_rule_usr(rules, false); +} + +/** + * ipa2_add_flt_rule_usr() - Add the specified filtering rules + * to SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ + int i; + int result; + + if (rules == NULL || rules->num_rules == 0 || + rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + if (rules->global) + result = __ipa_add_global_flt_rule(rules->ip, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].flt_rule_hdl); + else + result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].flt_rule_hdl, + user_only); + if (result) { + IPAERR_RL("failed to add flt rule %d\n", i); + rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_del_flt_rule() - Remove the specified filtering rules from SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del rt rule %i\n", i); + hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_mdfy_flt_rule() - Modify the specified filtering rules in SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) { + IPAERR_RL("failed to mdfy rt rule %i\n", i); + hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + + +/** + * ipa2_commit_flt() - Commit the current SW filtering table of specified type + * to IPA HW + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_commit_flt(enum ipa_ip_type ip) +{ + int result; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + + if (ipa_ctx->ctrl->ipa_commit_flt(ip)) { + result = -EPERM; + goto bail; + } + result = 0; + +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_reset_flt() - Reset the current SW filtering table of specified type + * (does not commit to HW) + * @ip: [in] the family of routing tables + * @user_only: [in] indicate rules deleted by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only) +{ + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + struct ipa_flt_entry *next; + int i; + int id; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + mutex_lock(&ipa_ctx->lock); + IPADBG("reset flt ip=%d\n", ip); + list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) { + if (ipa_id_find(entry->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + if ((ip == IPA_IP_v4 && + entry->rule.attrib.attrib_mask == IPA_FLT_PROTOCOL && + entry->rule.attrib.u.v4.protocol == + IPA_INVALID_L4_PROTOCOL) || + (ip == IPA_IP_v6 && + entry->rule.attrib.attrib_mask == IPA_FLT_NEXT_HDR && + entry->rule.attrib.u.v6.next_hdr == + IPA_INVALID_L4_PROTOCOL)) + continue; + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, + link) { + if (ipa_id_find(entry->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->flt_rule_cache, + entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + } + + /* commit the change to IPA-HW */ + if (ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4) || + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6)) { + IPAERR_RL("fail to commit flt-rule\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa_ctx->lock); + return -EPERM; + } + mutex_unlock(&ipa_ctx->lock); + return 0; +} + +void ipa_install_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa_flt_tbl *tbl; + struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx]; + struct ipa_flt_rule rule; + + memset(&rule, 0, sizeof(rule)); + + mutex_lock(&ipa_ctx->lock); + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, + &ep->dflt_flt4_rule_hdl, false); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); + tbl->sticky_rear = true; + + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, + &ep->dflt_flt6_rule_hdl, false); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); + tbl->sticky_rear = true; + mutex_unlock(&ipa_ctx->lock); +} + +void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa_flt_tbl *tbl; + struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx]; + + mutex_lock(&ipa_ctx->lock); + if (ep->dflt_flt4_rule_hdl) { + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt4_rule_hdl = 0; + } + if (ep->dflt_flt6_rule_hdl) { + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt6_rule_hdl = 0; + } + mutex_unlock(&ipa_ctx->lock); +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c new file mode 100644 index 0000000000000000000000000000000000000000..d67b8744a65db9dfffbf543044f62a606b27c0f2 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -0,0 +1,1580 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" + +static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60}; +static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64}; + +#define HDR_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_L2_MAX) + +#define HDR_PROC_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_PROC_MAX) + +/* uCP command numbers */ +#define IPA_HDR_UCP_802_3_TO_802_3 6 +#define IPA_HDR_UCP_802_3_TO_ETHII 7 +#define IPA_HDR_UCP_ETHII_TO_802_3 8 +#define IPA_HDR_UCP_ETHII_TO_ETHII 9 + +/** + * ipa_generate_hdr_hw_tbl() - generates the headers table + * @mem: [out] buffer to put the header table + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem) +{ + struct ipa_hdr_entry *entry; + + mem->size = ipa_ctx->hdr_tbl.end; + + if (mem->size == 0) { + IPAERR("hdr tbl empty\n"); + return -EPERM; + } + IPADBG_LOW("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end); + + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->is_hdr_proc_ctx) + continue; + IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len, + entry->offset_entry->offset); + memcpy(mem->base + entry->offset_entry->offset, entry->hdr, + entry->hdr_len); + } + + return 0; +} + +static void ipa_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, + u32 hdr_base_addr) +{ + struct ipa_hdr_proc_ctx_entry *entry; + + list_for_each_entry(entry, + &ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + IPADBG_LOW("processing type %d ofst=%d\n", + entry->type, entry->offset_entry->offset); + if (entry->type == IPA_HDR_PROC_NONE) { + struct ipa_hdr_proc_ctx_add_hdr_seq *ctx; + + ctx = (struct ipa_hdr_proc_ctx_add_hdr_seq *) + (mem->base + entry->offset_entry->offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = entry->hdr->hdr_len; + ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ? + entry->hdr->phys_base : + hdr_base_addr + + entry->hdr->offset_entry->offset; + IPADBG_LOW("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else { + struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *) + (mem->base + entry->offset_entry->offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = entry->hdr->hdr_len; + ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ? + entry->hdr->phys_base : + hdr_base_addr + + entry->hdr->offset_entry->offset; + IPADBG_LOW("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->cmd.length = 0; + if (entry->type == IPA_HDR_PROC_ETHII_TO_ETHII) + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII; + else if (entry->type == IPA_HDR_PROC_ETHII_TO_802_3) + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3; + else if (entry->type == IPA_HDR_PROC_802_3_TO_ETHII) + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII; + else if (entry->type == IPA_HDR_PROC_802_3_TO_802_3) + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3; + IPADBG_LOW("command id %d\n", ctx->cmd.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } + } +} + +/** + * ipa_generate_hdr_proc_ctx_hw_tbl() - + * generates the headers processing context table. + * @mem: [out] buffer to put the processing context table + * @aligned_mem: [out] actual processing context table (with alignment). + * Processing context table needs to be 8 Bytes aligned. + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem) +{ + u32 hdr_base_addr; + + mem->size = (ipa_ctx->hdr_proc_ctx_tbl.end) ? : 4; + + /* make sure table is aligned */ + mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + + IPADBG_LOW("tbl_sz=%d\n", ipa_ctx->hdr_proc_ctx_tbl.end); + + mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + aligned_mem->phys_base = + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base); + aligned_mem->base = mem->base + + (aligned_mem->phys_base - mem->phys_base); + aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + memset(aligned_mem->base, 0, aligned_mem->size); + hdr_base_addr = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) : + hdr_sys_addr; + ipa_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr); + + return 0; +} + +/* + * __ipa_commit_hdr() commits hdr to hardware + * This function needs to be called with a locked mutex. + */ +int __ipa_commit_hdr_v1_1(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer *mem; + struct ipa_hdr_init_local *cmd; + u16 len; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); + if (!mem) { + IPAERR("failed to alloc memory object\n"); + goto fail_alloc_mem; + } + + /* the immediate command param size is same for both local and system */ + len = sizeof(struct ipa_hdr_init_local); + + /* + * we can use init_local ptr for init_system due to layout of the + * struct + */ + cmd = kmalloc(len, flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_alloc_cmd; + } + + if (ipa_generate_hdr_hw_tbl(mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto fail_hw_tbl_gen; + } + + if (ipa_ctx->hdr_tbl_lcl) { + if (mem->size > IPA_MEM_v1_RAM_HDR_SIZE) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, + IPA_MEM_v1_RAM_HDR_SIZE); + goto fail_send_cmd; + } + } else { + if (mem->size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto fail_send_cmd; + } + } + + cmd->hdr_table_src_addr = mem->phys_base; + if (ipa_ctx->hdr_tbl_lcl) { + cmd->size_hdr_table = mem->size; + cmd->hdr_table_dst_addr = IPA_MEM_v1_RAM_HDR_OFST; + desc.opcode = IPA_HDR_INIT_LOCAL; + } else { + desc.opcode = IPA_HDR_INIT_SYSTEM; + } + desc.pyld = cmd; + desc.len = sizeof(struct ipa_hdr_init_local); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + goto fail_send_cmd; + } + + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); + } else { + if (ipa_ctx->hdr_mem.phys_base) { + dma_free_coherent(ipa_ctx->pdev, ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + } + ipa_ctx->hdr_mem = *mem; + } + kfree(cmd); + kfree(mem); + + return 0; + +fail_send_cmd: + if (mem->base) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +fail_hw_tbl_gen: + kfree(cmd); +fail_alloc_cmd: + kfree(mem); +fail_alloc_mem: + + return -EPERM; +} + +int __ipa_commit_hdr_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_hdr_init_system *cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = -EFAULT; + + if (ipa_generate_hdr_hw_tbl(&mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto end; + } + + if (ipa_ctx->hdr_tbl_lcl) { + if (mem.size > IPA_MEM_PART(apps_hdr_size)) { + IPAERR("tbl too big, needed %d avail %d\n", mem.size, + IPA_MEM_PART(apps_hdr_size)); + goto fail_send_cmd; + } else { + dma_cmd = kzalloc(sizeof(*dma_cmd), flag); + if (dma_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + dma_cmd->system_addr = mem.phys_base; + dma_cmd->size = mem.size; + dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_hdr_ofst); + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = (void *)dma_cmd; + desc.len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + } + } else { + if (mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big, needed %d avail %d\n", mem.size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto fail_send_cmd; + } else { + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("fail to alloc hdr init cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + cmd->hdr_table_addr = mem.phys_base; + desc.opcode = IPA_HDR_INIT_SYSTEM; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hdr_init_system); + } + } + + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) + IPAERR("fail to send immediate command\n"); + else + rc = 0; + + kfree(dma_cmd); + kfree(cmd); + +fail_send_cmd: + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + ipa_ctx->hdr_mem = mem; + } else { + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + } + } + +end: + return rc; +} + +int __ipa_commit_hdr_v2_5(void) +{ + struct ipa_desc desc[2]; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer ctx_mem; + struct ipa_mem_buffer aligned_ctx_mem; + struct ipa_hdr_init_system *hdr_init_cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL; + struct ipa_register_write *reg_write_cmd = NULL; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = -EFAULT; + u32 proc_ctx_size; + u32 proc_ctx_ofst; + u32 proc_ctx_size_ddr; + + memset(desc, 0, 2 * sizeof(struct ipa_desc)); + + if (ipa_generate_hdr_hw_tbl(&hdr_mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto end; + } + + if (ipa_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem, + &aligned_ctx_mem)) { + IPAERR("fail to generate HDR PROC CTX HW TBL\n"); + goto end; + } + + if (ipa_ctx->hdr_tbl_lcl) { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size)); + goto fail_send_cmd1; + } else { + dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag); + if (dma_cmd_hdr == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + dma_cmd_hdr->system_addr = hdr_mem.phys_base; + dma_cmd_hdr->size = hdr_mem.size; + dma_cmd_hdr->local_addr = + ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_hdr_ofst); + desc[0].opcode = IPA_DMA_SHARED_MEM; + desc[0].pyld = (void *)dma_cmd_hdr; + desc[0].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + } + } else { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto fail_send_cmd1; + } else { + hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd), + flag); + if (hdr_init_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + hdr_init_cmd->hdr_table_addr = hdr_mem.phys_base; + desc[0].opcode = IPA_HDR_INIT_SYSTEM; + desc[0].pyld = (void *)hdr_init_cmd; + desc[0].len = sizeof(struct ipa_hdr_init_system); + } + } + desc[0].type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size); + + proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size); + proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst); + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { + if (aligned_ctx_mem.size > proc_ctx_size) { + IPAERR("tbl too big needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size); + goto fail_send_cmd1; + } else { + dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx), + flag); + if (dma_cmd_ctx == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + dma_cmd_ctx->system_addr = aligned_ctx_mem.phys_base; + dma_cmd_ctx->size = aligned_ctx_mem.size; + dma_cmd_ctx->local_addr = + ipa_ctx->smem_restricted_bytes + + proc_ctx_ofst; + desc[1].opcode = IPA_DMA_SHARED_MEM; + desc[1].pyld = (void *)dma_cmd_ctx; + desc[1].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + } + } else { + proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (aligned_ctx_mem.size > proc_ctx_size_ddr) { + IPAERR("tbl too big, needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size_ddr); + goto fail_send_cmd1; + } else { + reg_write_cmd = kzalloc(sizeof(*reg_write_cmd), + flag); + if (reg_write_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + reg_write_cmd->offset = + IPA_SYS_PKT_PROC_CNTXT_BASE_OFST; + reg_write_cmd->value = aligned_ctx_mem.phys_base; + reg_write_cmd->value_mask = + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1); + desc[1].pyld = (void *)reg_write_cmd; + desc[1].opcode = IPA_REGISTER_WRITE; + desc[1].len = sizeof(*reg_write_cmd); + } + } + desc[1].type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size); + + if (ipa_send_cmd(2, desc)) + IPAERR("fail to send immediate command\n"); + else + rc = 0; + +fail_send_cmd1: + + kfree(dma_cmd_hdr); + kfree(hdr_init_cmd); + kfree(dma_cmd_ctx); + kfree(reg_write_cmd); + + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, + ctx_mem.base, + ctx_mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_proc_ctx_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_proc_ctx_mem.size, + ipa_ctx->hdr_proc_ctx_mem.base, + ipa_ctx->hdr_proc_ctx_mem.phys_base); + ipa_ctx->hdr_proc_ctx_mem = ctx_mem; + } else { + dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, + ctx_mem.base, + ctx_mem.phys_base); + } + } + + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, + hdr_mem.base, + hdr_mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + ipa_ctx->hdr_mem = hdr_mem; + } else { + dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, + hdr_mem.base, + hdr_mem.phys_base); + } + } +end: + return rc; +} + +/** + * __ipa_commit_hdr_v2_6L() - Commits a header to the IPA HW. + * + * This function needs to be called with a locked mutex. + */ +int __ipa_commit_hdr_v2_6L(void) +{ + /* Same implementation as IPAv2 */ + return __ipa_commit_hdr_v2(); +} + +static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, + bool add_ref_hdr, bool user_only) +{ + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *entry; + struct ipa_hdr_proc_ctx_offset_entry *offset = NULL; + u32 bin; + struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; + int id; + int needed_len; + int mem_size; + + IPADBG_LOW("processing type %d hdr_hdl %d\n", + proc_ctx->type, proc_ctx->hdr_hdl); + + if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) { + IPAERR_RL("invalid processing type %d\n", proc_ctx->type); + return -EINVAL; + } + + hdr_entry = ipa_id_find(proc_ctx->hdr_hdl); + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("hdr_hdl is invalid\n"); + return -EINVAL; + } + + entry = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc proc_ctx object\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&entry->link); + + entry->type = proc_ctx->type; + entry->hdr = hdr_entry; + if (add_ref_hdr) + hdr_entry->ref_cnt++; + entry->cookie = IPA_PROC_HDR_COOKIE; + entry->ipacm_installed = user_only; + + needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ? + sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) : + sizeof(struct ipa_hdr_proc_ctx_add_hdr_cmd_seq); + + if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) { + bin = IPA_HDR_PROC_CTX_BIN0; + } else if (needed_len <= + ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) { + bin = IPA_HDR_PROC_CTX_BIN1; + } else { + IPAERR_RL("unexpected needed len %d\n", needed_len); + WARN_ON(1); + goto bad_len; + } + + mem_size = (ipa_ctx->hdr_proc_ctx_tbl_lcl) ? + IPA_MEM_PART(apps_hdr_proc_ctx_size) : + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (list_empty(&htbl->head_free_offset_list[bin])) { + if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) { + IPAERR_RL("hdr proc ctx table overflow\n"); + goto bad_len; + } + + offset = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc offset object\n"); + goto bad_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which are set + * in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + offset->ipacm_installed = user_only; + htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + } else { + /* get the first free slot */ + offset = + list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa_hdr_proc_ctx_offset_entry, link); + offset->ipacm_installed = user_only; + list_move(&offset->link, &htbl->head_offset_list[bin]); + } + + entry->offset_entry = offset; + list_add(&entry->link, &htbl->head_proc_ctx_entry_list); + htbl->proc_ctx_cnt++; + IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len, + htbl->proc_ctx_cnt, offset->offset); + + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to alloc id\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + proc_ctx->proc_ctx_hdl = id; + entry->ref_cnt++; + + return 0; + +ipa_insert_failed: + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + +bad_len: + if (add_ref_hdr) + hdr_entry->ref_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry); + return -EPERM; +} + + +static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user) +{ + struct ipa_hdr_entry *entry; + struct ipa_hdr_offset_entry *offset = NULL; + u32 bin; + struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; + int id; + int mem_size; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) { + IPAERR_RL("bad parm\n"); + goto error; + } + + if (!HDR_TYPE_IS_VALID(hdr->type)) { + IPAERR_RL("invalid hdr type %d\n", hdr->type); + goto error; + } + + entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, flag); + if (!entry) { + IPAERR("failed to alloc hdr object\n"); + goto error; + } + + INIT_LIST_HEAD(&entry->link); + + memcpy(entry->hdr, hdr->hdr, hdr->hdr_len); + entry->hdr_len = hdr->hdr_len; + strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX); + entry->is_partial = hdr->is_partial; + entry->type = hdr->type; + entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; + entry->eth2_ofst = hdr->eth2_ofst; + entry->cookie = IPA_HDR_COOKIE; + entry->ipacm_installed = user; + + if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) + bin = IPA_HDR_BIN0; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1]) + bin = IPA_HDR_BIN1; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2]) + bin = IPA_HDR_BIN2; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3]) + bin = IPA_HDR_BIN3; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4]) + bin = IPA_HDR_BIN4; + else { + IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len); + goto bad_hdr_len; + } + + mem_size = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) : + IPA_MEM_PART(apps_hdr_size_ddr); + + if (list_empty(&htbl->head_free_offset_list[bin])) { + /* + * if header does not fit to table, place it in DDR + * This is valid for IPA 2.5 and on, + * with the exception of IPA2.6L. + */ + if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) { + if (ipa_ctx->ipa_hw_type != IPA_HW_v2_5) { + IPAERR("not enough room for header\n"); + goto bad_hdr_len; + } else { + entry->is_hdr_proc_ctx = true; + entry->phys_base = dma_map_single(ipa_ctx->pdev, + entry->hdr, + entry->hdr_len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + entry->phys_base)) { + IPAERR("dma_map_single failureed\n"); + goto fail_dma_mapping; + } + } + } else { + entry->is_hdr_proc_ctx = false; + offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc hdr offset object\n"); + goto bad_hdr_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which + * are set in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + htbl->end += ipa_hdr_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + offset->ipacm_installed = user; + } + } else { + entry->is_hdr_proc_ctx = false; + /* get the first free slot */ + offset = + list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa_hdr_offset_entry, link); + list_move(&offset->link, &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + offset->ipacm_installed = user; + } + + list_add(&entry->link, &htbl->head_hdr_entry_list); + htbl->hdr_cnt++; + if (entry->is_hdr_proc_ctx) + IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n", + hdr->hdr_len, + htbl->hdr_cnt, + &entry->phys_base); + else + IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", + hdr->hdr_len, + htbl->hdr_cnt, + entry->offset_entry->offset); + + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to alloc id\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + hdr->hdr_hdl = id; + entry->ref_cnt++; + + if (entry->is_hdr_proc_ctx) { + struct ipa_hdr_proc_ctx_add proc_ctx; + + IPADBG("adding processing context for header %s\n", hdr->name); + proc_ctx.type = IPA_HDR_PROC_NONE; + proc_ctx.hdr_hdl = id; + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) { + IPAERR("failed to add hdr proc ctx\n"); + goto fail_add_proc_ctx; + } + entry->proc_ctx = ipa_id_find(proc_ctx.proc_ctx_hdl); + } + + return 0; + +fail_add_proc_ctx: + entry->ref_cnt--; + hdr->hdr_hdl = 0; + ipa_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } + htbl->hdr_cnt--; + list_del(&entry->link); + +fail_dma_mapping: + entry->is_hdr_proc_ctx = false; +bad_hdr_len: + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_cache, entry); +error: + return -EPERM; +} + +static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, + bool release_hdr, bool by_user) +{ + struct ipa_hdr_proc_ctx_entry *entry; + struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; + + entry = ipa_id_find(proc_ctx_hdl); + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + IPADBG("del ctx proc cnt=%d ofst=%d\n", + htbl->proc_ctx_cnt, entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("proc_ctx already deleted by user\n"); + return -EINVAL; + } + + if (by_user) + entry->user_deleted = true; + + if (--entry->ref_cnt) { + IPADBG("proc_ctx_hdl %x ref_cnt %d\n", + proc_ctx_hdl, entry->ref_cnt); + return 0; + } + + if (release_hdr) + __ipa_del_hdr(entry->hdr->id, false); + + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + list_del(&entry->link); + htbl->proc_ctx_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(proc_ctx_hdl); + + return 0; +} + + +int __ipa_del_hdr(u32 hdr_hdl, bool by_user) +{ + struct ipa_hdr_entry *entry; + struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; + + entry = ipa_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (entry->is_hdr_proc_ctx) + IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n", + entry->hdr_len, htbl->hdr_cnt, &entry->phys_base); + else + IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len, + htbl->hdr_cnt, entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("hdr already deleted by user\n"); + return -EINVAL; + } + + if (by_user) { + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + IPADBG("Trying to delete hdr %s offset=%u\n", + entry->name, entry->offset_entry->offset); + if (!entry->offset_entry->offset) { + IPAERR("User cannot delete default header\n"); + return -EPERM; + } + } + entry->user_deleted = true; + } + + if (--entry->ref_cnt) { + IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt); + return 0; + } + + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + __ipa_del_hdr_proc_ctx(entry->proc_ctx->id, false, false); + } else { + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(hdr_hdl); + + return 0; +} + +/** + * ipa2_add_hdr() - add the specified headers to SW and optionally commit them + * to IPA HW + * @hdrs: [inout] set of headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return ipa2_add_hdr_usr(hdrs, false); +} + +/** + * ipa2_add_hdr_usr() - add the specified headers to SW + * and optionally commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate installed from user + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ + int i; + int result = -EFAULT; + + if (unlikely(!ipa_ctx)) { + IPAERR_RL("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (hdrs == NULL || hdrs->num_hdrs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + IPADBG("adding %d headers to IPA driver internal data struct\n", + hdrs->num_hdrs); + for (i = 0; i < hdrs->num_hdrs; i++) { + if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) { + IPAERR_RL("failed to add hdr %d\n", i); + hdrs->hdr[i].status = -1; + } else { + hdrs->hdr[i].status = 0; + } + } + + if (hdrs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} +/** + * ipa2_del_hdr_by_user() - Remove the specified headers + * from SW and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user) +{ + int i; + int result = -EFAULT; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_del_hdr() - Remove the specified headers from SW + * and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return ipa2_del_hdr_by_user(hdls, false); +} + +/** + * ipa2_add_hdr_proc_ctx() - add the specified headers to SW + * and optionally commit them to IPA HW + * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate installed by user-space module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) +{ + int i; + int result = -EFAULT; + + if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) { + IPAERR_RL("Processing context not supported on IPA HW %d\n", + ipa_ctx->ipa_hw_type); + return -EFAULT; + } + + if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + IPADBG("adding %d header processing contextes to IPA driver\n", + proc_ctxs->num_proc_ctxs); + for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], + true, user_only)) { + IPAERR_RL("failed to add hdr pric ctx %d\n", i); + proc_ctxs->proc_ctx[i].status = -1; + } else { + proc_ctxs->proc_ctx[i].status = 0; + } + } + + if (proc_ctxs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_del_hdr_proc_ctx_by_user() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user) +{ + int i; + int result; + + if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) { + IPAERR("Processing context not supported on IPA HW %d\n", + ipa_ctx->ipa_hw_type); + return -EFAULT; + } + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_del_hdr_proc_ctx() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + return ipa2_del_hdr_proc_ctx_by_user(hdls, false); +} + +/** + * ipa2_commit_hdr() - commit to IPA HW the current header table in SW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_commit_hdr(void) +{ + int result = -EFAULT; + + /* + * issue a commit on the routing module since routing rules point to + * header table entries + */ + if (ipa2_commit_rt(IPA_IP_v4)) + return -EPERM; + if (ipa2_commit_rt(IPA_IP_v6)) + return -EPERM; + + mutex_lock(&ipa_ctx->lock); + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_reset_hdr() - reset the current header table in SW (does not commit to + * HW) + * + * @user_only: [in] indicate delete rules installed by userspace + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_hdr(bool user_only) +{ + struct ipa_hdr_entry *entry; + struct ipa_hdr_entry *next; + struct ipa_hdr_proc_ctx_entry *ctx_entry; + struct ipa_hdr_proc_ctx_entry *ctx_next; + struct ipa_hdr_offset_entry *off_entry; + struct ipa_hdr_offset_entry *off_next; + struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry; + struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next; + struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; + struct ipa_hdr_proc_ctx_tbl *htbl_proc = &ipa_ctx->hdr_proc_ctx_tbl; + int i; + + /* + * issue a reset on the routing module since routing rules point to + * header table entries + */ + if (ipa2_reset_rt(IPA_IP_v4, user_only)) + IPAERR("fail to reset v4 rt\n"); + if (ipa2_reset_rt(IPA_IP_v6, user_only)) + IPAERR("fail to reset v4 rt\n"); + + mutex_lock(&ipa_ctx->lock); + IPADBG("reset hdr\n"); + list_for_each_entry_safe(entry, next, + &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) { + + /* do not remove the default header */ + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + IPADBG("Trying to remove hdr %s offset=%u\n", + entry->name, entry->offset_entry->offset); + if (!entry->offset_entry->offset) { + if (entry->is_hdr_proc_ctx) { + mutex_unlock(&ipa_ctx->lock); + WARN_ON(1); + IPAERR("default header is proc ctx\n"); + return -EFAULT; + } + IPADBG("skip default header\n"); + continue; + } + } + + if (ipa_id_find(entry->id) == NULL) { + mutex_unlock(&ipa_ctx->lock); + WARN_ON(1); + return -EFAULT; + } + + if (!user_only || entry->ipacm_installed) { + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } else { + /* move the offset entry to free list */ + entry->offset_entry->ipacm_installed = 0; + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[ + entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->ref_cnt = 0; + entry->cookie = 0; + + /* remove the handle from the database */ + ipa_id_remove(entry->id); + kmem_cache_free(ipa_ctx->hdr_cache, entry); + } + } + + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + list_for_each_entry_safe(off_entry, off_next, + &ipa_ctx->hdr_tbl.head_offset_list[i], + link) { + /** + * do not remove the default exception + * header which is at offset 0 + */ + if (off_entry->offset == 0) + continue; + list_del(&off_entry->link); + kmem_cache_free(ipa_ctx->hdr_offset_cache, + off_entry); + } + list_for_each_entry_safe(off_entry, off_next, + &ipa_ctx->hdr_tbl.head_free_offset_list[i], + link) { + list_del(&off_entry->link); + kmem_cache_free(ipa_ctx->hdr_offset_cache, + off_entry); + } + } + /* there is one header of size 8 */ + ipa_ctx->hdr_tbl.end = 8; + ipa_ctx->hdr_tbl.hdr_cnt = 1; + } + + IPADBG("reset hdr proc ctx\n"); + list_for_each_entry_safe( + ctx_entry, + ctx_next, + &ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + + if (ipa_id_find(ctx_entry->id) == NULL) { + mutex_unlock(&ipa_ctx->lock); + WARN_ON_RATELIMIT_IPA(1); + return -EFAULT; + } + + if (!user_only || + ctx_entry->ipacm_installed) { + /* move the offset entry to appropriate free list */ + list_move(&ctx_entry->offset_entry->link, + &htbl_proc->head_free_offset_list[ + ctx_entry->offset_entry->bin]); + list_del(&ctx_entry->link); + htbl_proc->proc_ctx_cnt--; + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + + /* remove the handle from the database */ + ipa_id_remove(ctx_entry->id); + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, + ctx_entry); + } + } + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i], + link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i], + link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + } + ipa_ctx->hdr_proc_ctx_tbl.end = 0; + ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0; + } + + /* commit the change to IPA-HW */ + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + IPAERR_RL("fail to commit hdr\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + mutex_unlock(&ipa_ctx->lock); + return 0; +} + +static struct ipa_hdr_entry *__ipa_find_hdr(const char *name) +{ + struct ipa_hdr_entry *entry; + + list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa2_get_hdr() - Lookup the specified header resource + * @lookup: [inout] header to lookup and its handle + * + * lookup the specified header resource and return handle if it exists + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_hdr later if this function succeeds + */ +int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + struct ipa_hdr_entry *entry; + int result = -1; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (lookup == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_hdr(lookup->name); + if (entry) { + lookup->hdl = entry->id; + result = 0; + } + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * __ipa_release_hdr() - drop reference to header and cause + * deletion if reference count permits + * @hdr_hdl: [in] handle of header to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa_release_hdr(u32 hdr_hdl) +{ + int result = 0; + + if (__ipa_del_hdr(hdr_hdl, false)) { + IPADBG("fail to del hdr %x\n", hdr_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * __ipa_release_hdr_proc_ctx() - drop reference to processing context + * and cause deletion if reference count permits + * @proc_ctx_hdl: [in] handle of processing context to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl) +{ + int result = 0; + + if (__ipa_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) { + IPADBG("fail to del hdr %x\n", proc_ctx_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * ipa2_put_hdr() - Release the specified header handle + * @hdr_hdl: [in] the header handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_put_hdr(u32 hdr_hdl) +{ + struct ipa_hdr_entry *entry; + int result = -EFAULT; + + mutex_lock(&ipa_ctx->lock); + + entry = ipa_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto bail; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("invalid header entry\n"); + result = -EINVAL; + goto bail; + } + + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_copy_hdr() - Lookup the specified header resource and return a copy of + * it + * @copy: [inout] header to lookup and its copy + * + * lookup the specified header resource and return a copy of it (along with its + * attributes) if it exists, this would be called for partial headers + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + struct ipa_hdr_entry *entry; + int result = -EFAULT; + + if (copy == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa_ctx->lock); + copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_hdr(copy->name); + if (entry) { + memcpy(copy->hdr, entry->hdr, entry->hdr_len); + copy->hdr_len = entry->hdr_len; + copy->type = entry->type; + copy->is_partial = entry->is_partial; + copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid; + copy->eth2_ofst = entry->eth2_ofst; + result = 0; + } + mutex_unlock(&ipa_ctx->lock); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..1b1ebea08e92163a6c9d4d8c68e753fac66d397d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2015, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_HW_DEFS_H +#define _IPA_HW_DEFS_H +#include + +/* This header defines various HW related data types */ + +/* immediate command op-codes */ +#define IPA_DECIPH_INIT (1) +#define IPA_PPP_FRM_INIT (2) +#define IPA_IP_V4_FILTER_INIT (3) +#define IPA_IP_V6_FILTER_INIT (4) +#define IPA_IP_V4_NAT_INIT (5) +#define IPA_IP_V6_NAT_INIT (6) +#define IPA_IP_V4_ROUTING_INIT (7) +#define IPA_IP_V6_ROUTING_INIT (8) +#define IPA_HDR_INIT_LOCAL (9) +#define IPA_HDR_INIT_SYSTEM (10) +#define IPA_DECIPH_SETUP (11) +#define IPA_REGISTER_WRITE (12) +#define IPA_NAT_DMA (14) +#define IPA_IP_PACKET_TAG (15) +#define IPA_IP_PACKET_INIT (16) +#define IPA_DMA_SHARED_MEM (19) +#define IPA_IP_PACKET_TAG_STATUS (20) + +/* Processing context TLV type */ +#define IPA_PROC_CTX_TLV_TYPE_END 0 +#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1 +#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3 + + +/** + * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule + * @word: filtering rule properties + * @en_rule: enable rule + * @action: post routing action + * @rt_tbl_idx: index in routing table + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @to_uc: direct IPA to sent the packet to uc instead of + * the intended destination. This will be performed just after + * routing block processing, so routing will have determined + * destination end point and uc will receive this information + * together with the packet as part of the HW packet TX commands + * @rsvd: reserved bits + */ +struct ipa_flt_rule_hw_hdr { + union { + u32 word; + struct { + u32 en_rule:16; + u32 action:5; + u32 rt_tbl_idx:5; + u32 retain_hdr:1; + u32 to_uc:1; + u32 rsvd:4; + } hdr; + } u; +}; + +/** + * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule + * @word: filtering rule properties + * @en_rule: enable rule + * @pipe_dest_idx: destination pipe index + * @system: changed from local to system due to HW change + * @hdr_offset: header offset + * @proc_ctx: whether hdr_offset points to header table or to + * header processing context table + */ +struct ipa_rt_rule_hw_hdr { + union { + u32 word; + struct { + u32 en_rule:16; + u32 pipe_dest_idx:5; + u32 system:1; + u32 hdr_offset:10; + } hdr; + struct { + u32 en_rule:16; + u32 pipe_dest_idx:5; + u32 system:1; + u32 hdr_offset:9; + u32 proc_ctx:1; + } hdr_v2_5; + } u; +}; + +/** + * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload + * @ipv4_rules_addr: address of ipv4 rules + * @size_ipv4_rules: size of the above + * @ipv4_addr: ipv4 address + * @rsvd: reserved + */ +struct ipa_ip_v4_filter_init { + u64 ipv4_rules_addr:32; + u64 size_ipv4_rules:12; + u64 ipv4_addr:16; + u64 rsvd:4; +}; + +/** + * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload + * @ipv6_rules_addr: address of ipv6 rules + * @size_ipv6_rules: size of the above + * @ipv6_addr: ipv6 address + */ +struct ipa_ip_v6_filter_init { + u64 ipv6_rules_addr:32; + u64 size_ipv6_rules:16; + u64 ipv6_addr:16; +}; + +/** + * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload + * @ipv4_rules_addr: address of ipv4 rules + * @size_ipv4_rules: size of the above + * @ipv4_addr: ipv4 address + * @rsvd: reserved + */ +struct ipa_ip_v4_routing_init { + u64 ipv4_rules_addr:32; + u64 size_ipv4_rules:12; + u64 ipv4_addr:16; + u64 rsvd:4; +}; + +/** + * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload + * @ipv6_rules_addr: address of ipv6 rules + * @size_ipv6_rules: size of the above + * @ipv6_addr: ipv6 address + */ +struct ipa_ip_v6_routing_init { + u64 ipv6_rules_addr:32; + u64 size_ipv6_rules:16; + u64 ipv6_addr:16; +}; + +/** + * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload + * @hdr_table_src_addr: word address of header table in system memory where the + * table starts (use as source for memory copying) + * @size_hdr_table: size of the above (in bytes) + * @hdr_table_dst_addr: header address in IPA sram (used as dst for memory copy) + * @rsvd: reserved + */ +struct ipa_hdr_init_local { + u64 hdr_table_src_addr:32; + u64 size_hdr_table:12; + u64 hdr_table_dst_addr:16; + u64 rsvd:4; +}; + +/** + * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload + * @hdr_table_addr: word address of header table in system memory where the + * table starts (use as source for memory copying) + * @rsvd: reserved + */ +struct ipa_hdr_init_system { + u64 hdr_table_addr:32; + u64 rsvd:32; +}; + +/** + * struct ipa_hdr_proc_ctx_tlv - + * HW structure of IPA processing context header - TLV part + * @type: 0 - end type + * 1 - header addition type + * 3 - processing command type + * @length: number of bytes after tlv + * for type: + * 0 - needs to be 0 + * 1 - header addition length + * 3 - number of 32B including type and length. + * @value: specific value for type + * for type: + * 0 - needs to be 0 + * 1 - header length + * 3 - command ID (see IPA_HDR_UCP_* definitions) + */ +struct ipa_hdr_proc_ctx_tlv { + u32 type:8; + u32 length:8; + u32 value:16; +}; + +/** + * struct ipa_hdr_proc_ctx_hdr_add - + * HW structure of IPA processing context - add header tlv + * @tlv: IPA processing context TLV + * @hdr_addr: processing context header address + */ +struct ipa_hdr_proc_ctx_hdr_add { + struct ipa_hdr_proc_ctx_tlv tlv; + u32 hdr_addr; +}; + +#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7) +#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6) +#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(5) +#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(4) +#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(3) +#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(2) + +/** + * struct ipa_a5_mux_hdr - A5 MUX header definition + * @interface_id: interface ID + * @src_pipe_index: source pipe index + * @flags: flags + * @metadata: metadata + * + * A5 MUX header is in BE, A5 runs in LE. This struct definition + * allows A5 SW to correctly parse the header + */ +struct ipa_a5_mux_hdr { + u16 interface_id; + u8 src_pipe_index; + u8 flags; + u32 metadata; +}; + +/** + * struct ipa_register_write - IPA_REGISTER_WRITE command payload + * @rsvd: reserved + * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear + * @offset: offset from IPA base address + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + */ +struct ipa_register_write { + u32 rsvd:15; + u32 skip_pipeline_clear:1; + u32 offset:16; + u32 value:32; + u32 value_mask:32; +}; + +/** + * struct ipa_nat_dma - IPA_NAT_DMA command payload + * @table_index: NAT table index + * @rsvd1: reserved + * @base_addr: base address + * @rsvd2: reserved + * @offset: offset + * @data: metadata + * @rsvd3: reserved + */ +struct ipa_nat_dma { + u64 table_index:3; + u64 rsvd1:1; + u64 base_addr:2; + u64 rsvd2:2; + u64 offset:32; + u64 data:16; + u64 rsvd3:8; +}; + +/** + * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload + * @destination_pipe_index: destination pipe index + * @rsvd1: reserved + * @metadata: metadata + * @rsvd2: reserved + */ +struct ipa_ip_packet_init { + u64 destination_pipe_index:5; + u64 rsvd1:3; + u64 metadata:32; + u64 rsvd2:24; +}; + +/** + * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload + * @ipv4_rules_addr: ipv4 rules address + * @ipv4_expansion_rules_addr: ipv4 expansion rules address + * @index_table_addr: index tables address + * @index_table_expansion_addr: index expansion table address + * @table_index: index in table + * @ipv4_rules_addr_type: ipv4 address type + * @ipv4_expansion_rules_addr_type: ipv4 expansion address type + * @index_table_addr_type: index table address type + * @index_table_expansion_addr_type: index expansion table type + * @size_base_tables: size of base tables + * @size_expansion_tables: size of expansion tables + * @rsvd2: reserved + * @public_ip_addr: public IP address + */ +struct ipa_ip_v4_nat_init { + u64 ipv4_rules_addr:32; + u64 ipv4_expansion_rules_addr:32; + u64 index_table_addr:32; + u64 index_table_expansion_addr:32; + u64 table_index:3; + u64 rsvd1:1; + u64 ipv4_rules_addr_type:1; + u64 ipv4_expansion_rules_addr_type:1; + u64 index_table_addr_type:1; + u64 index_table_expansion_addr_type:1; + u64 size_base_tables:12; + u64 size_expansion_tables:10; + u64 rsvd2:2; + u64 public_ip_addr:32; +}; + +/** + * struct ipa_ip_packet_tag - IPA_IP_PACKET_TAG command payload + * @tag: tag value returned with response + */ +struct ipa_ip_packet_tag { + u32 tag; +}; + +/** + * struct ipa_ip_packet_tag_status - IPA_IP_PACKET_TAG_STATUS command payload + * @rsvd: reserved + * @tag_f_1: tag value returned within status + * @tag_f_2: tag value returned within status + */ +struct ipa_ip_packet_tag_status { + u32 rsvd:16; + u32 tag_f_1:16; + u32 tag_f_2:32; +}; + +/*! @brief Struct for the IPAv2.0 and IPAv2.5 UL packet status header */ +struct ipa_hw_pkt_status { + u32 status_opcode:8; + u32 exception:8; + u32 status_mask:16; + u32 pkt_len:16; + u32 endp_src_idx:5; + u32 reserved_1:3; + u32 endp_dest_idx:5; + u32 reserved_2:3; + u32 metadata:32; + union { + struct { + u32 filt_local:1; + u32 filt_global:1; + u32 filt_pipe_idx:5; + u32 filt_match:1; + u32 filt_rule_idx:6; + u32 ret_hdr:1; + u32 reserved_3:1; + u32 tag_f_1:16; + + } ipa_hw_v2_0_pkt_status; + struct { + u32 filt_local:1; + u32 filt_global:1; + u32 filt_pipe_idx:5; + u32 ret_hdr:1; + u32 filt_rule_idx:8; + u32 tag_f_1:16; + + } ipa_hw_v2_5_pkt_status; + }; + + u32 tag_f_2:32; + u32 time_day_ctr:32; + u32 nat_hit:1; + u32 nat_tbl_idx:13; + u32 nat_type:2; + u32 route_local:1; + u32 route_tbl_idx:5; + u32 route_match:1; + u32 ucp:1; + u32 route_rule_idx:8; + u32 hdr_local:1; + u32 hdr_offset:10; + u32 frag_hit:1; + u32 frag_rule:4; + u32 reserved_4:16; +}; + +#define IPA_PKT_STATUS_SIZE 32 + +/*! @brief Status header opcodes */ +enum ipa_hw_status_opcode { + IPA_HW_STATUS_OPCODE_MIN, + IPA_HW_STATUS_OPCODE_PACKET = IPA_HW_STATUS_OPCODE_MIN, + IPA_HW_STATUS_OPCODE_NEW_FRAG_RULE, + IPA_HW_STATUS_OPCODE_DROPPED_PACKET, + IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET, + IPA_HW_STATUS_OPCODE_XLAT_PACKET = 6, + IPA_HW_STATUS_OPCODE_MAX +}; + +/*! @brief Possible Masks received in status */ +enum ipa_hw_pkt_status_mask { + IPA_HW_PKT_STATUS_MASK_FRAG_PROCESS = 0x1, + IPA_HW_PKT_STATUS_MASK_FILT_PROCESS = 0x2, + IPA_HW_PKT_STATUS_MASK_NAT_PROCESS = 0x4, + IPA_HW_PKT_STATUS_MASK_ROUTE_PROCESS = 0x8, + IPA_HW_PKT_STATUS_MASK_TAG_VALID = 0x10, + IPA_HW_PKT_STATUS_MASK_FRAGMENT = 0x20, + IPA_HW_PKT_STATUS_MASK_FIRST_FRAGMENT = 0x40, + IPA_HW_PKT_STATUS_MASK_V4 = 0x80, + IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS = 0x100, + IPA_HW_PKT_STATUS_MASK_AGGR_PROCESS = 0x200, + IPA_HW_PKT_STATUS_MASK_DEST_EOT = 0x400, + IPA_HW_PKT_STATUS_MASK_DEAGGR_PROCESS = 0x800, + IPA_HW_PKT_STATUS_MASK_DEAGG_FIRST = 0x1000, + IPA_HW_PKT_STATUS_MASK_SRC_EOT = 0x2000 +}; + +/*! @brief Possible Exceptions received in status */ +enum ipa_hw_pkt_status_exception { + IPA_HW_PKT_STATUS_EXCEPTION_NONE = 0x0, + IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR = 0x1, + IPA_HW_PKT_STATUS_EXCEPTION_REPL = 0x2, + IPA_HW_PKT_STATUS_EXCEPTION_IPTYPE = 0x4, + IPA_HW_PKT_STATUS_EXCEPTION_IHL = 0x8, + IPA_HW_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, + IPA_HW_PKT_STATUS_EXCEPTION_SW_FILT = 0x20, + IPA_HW_PKT_STATUS_EXCEPTION_NAT = 0x40, + IPA_HW_PKT_STATUS_EXCEPTION_ACTUAL_MAX, + IPA_HW_PKT_STATUS_EXCEPTION_MAX = 0xFF +}; + +/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */ +struct ipa_hw_imm_cmd_dma_shared_mem { + u32 reserved_1:16; + u32 size:16; + u32 system_addr:32; + u32 local_addr:16; + u32 direction:1; + u32 skip_pipeline_clear:1; + u32 reserved_2:14; + u32 padding:32; +}; + +#endif /* _IPA_HW_DEFS_H */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h new file mode 100644 index 0000000000000000000000000000000000000000..514c3b423351b67d7a4c09d6fb4735e2ed253814 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -0,0 +1,1988 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_I_H_ +#define _IPA_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_hw_defs.h" +#include "ipa_ram_mmap.h" +#include "ipa_reg.h" +#include "ipa_qmi_service.h" +#include "../ipa_api.h" +#include "../ipa_common_i.h" +#include "ipa_uc_offload_i.h" + +#define DRV_NAME "ipa" +#define NAT_DEV_NAME "ipaNatTable" + +#define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + + +#define MTU_BYTE 1500 + +#define IPA_MAX_NUM_PIPES 0x14 +#define IPA_SYS_DESC_FIFO_SZ 0x2000 +#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000 +#define IPA_LAN_RX_HEADER_LENGTH (2) +#define IPA_QMAP_HEADER_LENGTH (4) +#define IPA_DL_CHECKSUM_LENGTH (8) +#define IPA_NUM_DESC_PER_SW_TX (2) +#define IPA_GENERIC_RX_POOL_SZ 192 +#define IPA_UC_FINISH_MAX 6 +#define IPA_UC_WAIT_MIN_SLEEP 1000 +#define IPA_UC_WAII_MAX_SLEEP 1200 +#define IPA_BAM_STOP_MAX_RETRY 10 + +#define IPA_MAX_STATUS_STAT_NUM 30 + + +#define IPA_MAX_NUM_REQ_CACHE 10 +#define IPA_IPC_LOG_PAGES 50 + +#define IPA_WDI_RX_RING_RES 0 +#define IPA_WDI_RX_RING_RP_RES 1 +#define IPA_WDI_RX_COMP_RING_RES 2 +#define IPA_WDI_RX_COMP_RING_WP_RES 3 +#define IPA_WDI_TX_RING_RES 4 +#define IPA_WDI_CE_RING_RES 5 +#define IPA_WDI_CE_DB_RES 6 +#define IPA_WDI_TX_DB_RES 7 +#define IPA_WDI_MAX_RES 8 + +#define IPADBG(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_ctx) { \ + IPA_IPC_LOGGING(ipa_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPADBG_LOW(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_ctx) \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAERR(fmt, args...) \ + do { \ + pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_ctx) { \ + IPA_IPC_LOGGING(ipa_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPAERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \ + __LINE__, ## args);\ + if (ipa_ctx) { \ + IPA_IPC_LOGGING(ipa_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define WLAN_AMPDU_TX_EP 15 +#define WLAN_PROD_TX_EP 19 +#define WLAN1_CONS_RX_EP 14 +#define WLAN2_CONS_RX_EP 16 +#define WLAN3_CONS_RX_EP 17 +#define WLAN4_CONS_RX_EP 18 + +#define MAX_NUM_EXCP 8 + +#define IPA_STATS + +#ifdef IPA_STATS +#define IPA_STATS_INC_CNT(val) (++val) +#define IPA_STATS_DEC_CNT(val) (--val) +#define IPA_STATS_EXCP_CNT(flags, base) do { \ + int i; \ + for (i = 0; i < MAX_NUM_EXCP; i++) \ + if (flags & BIT(i)) \ + ++base[i]; \ + if (flags == 0) \ + ++base[MAX_NUM_EXCP - 1]; \ + } while (0) +#else +#define IPA_STATS_INC_CNT(x) do { } while (0) +#define IPA_STATS_DEC_CNT(x) +#define IPA_STATS_EXCP_CNT(flags, base) do { } while (0) +#endif + +#define IPA_TOS_EQ BIT(0) +#define IPA_PROTOCOL_EQ BIT(1) +#define IPA_OFFSET_MEQ32_0 BIT(2) +#define IPA_OFFSET_MEQ32_1 BIT(3) +#define IPA_IHL_OFFSET_RANGE16_0 BIT(4) +#define IPA_IHL_OFFSET_RANGE16_1 BIT(5) +#define IPA_IHL_OFFSET_EQ_16 BIT(6) +#define IPA_IHL_OFFSET_EQ_32 BIT(7) +#define IPA_IHL_OFFSET_MEQ32_0 BIT(8) +#define IPA_OFFSET_MEQ128_0 BIT(9) +#define IPA_OFFSET_MEQ128_1 BIT(10) +#define IPA_TC_EQ BIT(11) +#define IPA_FL_EQ BIT(12) +#define IPA_IHL_OFFSET_MEQ32_1 BIT(13) +#define IPA_METADATA_COMPARE BIT(14) +#define IPA_IS_FRAG BIT(15) + +#define IPA_HDR_BIN0 0 +#define IPA_HDR_BIN1 1 +#define IPA_HDR_BIN2 2 +#define IPA_HDR_BIN3 3 +#define IPA_HDR_BIN4 4 +#define IPA_HDR_BIN_MAX 5 + +#define IPA_HDR_PROC_CTX_BIN0 0 +#define IPA_HDR_PROC_CTX_BIN1 1 +#define IPA_HDR_PROC_CTX_BIN_MAX 2 + +#define IPA_EVENT_THRESHOLD 0x10 + +/* + * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set + * to max packet size + 1. After setting the threshold, USB core + * will not be notified on ZLTs + */ +#define IPA_USB_EVENT_THRESHOLD 0x4001 + +#define IPA_RX_POOL_CEIL 32 +#define IPA_RX_SKB_SIZE 1792 + +#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr" +#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr" +#define IPA_INVALID_L4_PROTOCOL 0xFF + +#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask)) +#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \ + (reg |= ((val) << (shift)) & (mask)) + +#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \ + (((start_ofst) + 127) & ~127) +#define IPA_RT_FLT_HW_RULE_BUF_SIZE (256) + +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8 +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \ + (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \ + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1)) + +#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX) +#define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_) + +#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120 +#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96 +#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 +#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40 + +struct ipa2_active_client_htable_entry { + struct hlist_node list; + char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN]; + int count; + enum ipa_active_client_log_type type; +}; + +struct ipa2_active_clients_log_ctx { + char *log_buffer[IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES]; + int log_head; + int log_tail; + bool log_rdy; + struct hlist_head htable[IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE]; +}; + + +struct ipa_client_names { + enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS]; + int length; +}; + +struct ipa_smmu_cb_ctx { + bool valid; + struct device *dev; + struct iommu_domain *iommu_domain; + unsigned long next_addr; + u32 va_start; + u32 va_size; + u32 va_end; +}; + + +enum ipa_smmu_cb_type { + IPA_SMMU_CB_AP, + IPA_SMMU_CB_WLAN, + IPA_SMMU_CB_UC, + IPA_SMMU_CB_MAX + +}; +#define VALID_IPA_SMMU_CB_TYPE(t) \ + ((t) >= IPA_SMMU_CB_AP && (t) < IPA_SMMU_CB_MAX) + + +/** + * struct ipa_flt_entry - IPA filtering table entry + * @link: entry's link in global filtering enrties list + * @rule: filter rule + * @cookie: cookie used for validity check + * @tbl: filter table + * @rt_tbl: routing table + * @hw_len: entry's size + * @id: rule handle - globally unique + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_flt_entry { + struct list_head link; + u32 cookie; + struct ipa_flt_rule rule; + struct ipa_flt_tbl *tbl; + struct ipa_rt_tbl *rt_tbl; + u32 hw_len; + int id; + bool ipacm_installed; +}; + +/** + * struct ipa_rt_tbl - IPA routing table + * @link: table's link in global routing tables list + * @head_rt_rule_list: head of routing rules list + * @name: routing table name + * @idx: routing table index + * @rule_cnt: number of rules in routing table + * @ref_cnt: reference counter of routing table + * @set: collection of routing tables + * @cookie: cookie used for validity check + * @in_sys: flag indicating if the table is located in system memory + * @sz: the size of the routing table + * @curr_mem: current routing tables block in sys memory + * @prev_mem: previous routing table block in sys memory + * @id: routing table id + */ +struct ipa_rt_tbl { + struct list_head link; + u32 cookie; + struct list_head head_rt_rule_list; + char name[IPA_RESOURCE_NAME_MAX]; + u32 idx; + u32 rule_cnt; + u32 ref_cnt; + struct ipa_rt_tbl_set *set; + bool in_sys; + u32 sz; + struct ipa_mem_buffer curr_mem; + struct ipa_mem_buffer prev_mem; + int id; +}; + +/** + * struct ipa_hdr_entry - IPA header table entry + * @link: entry's link in global header table entries list + * @hdr: the header + * @hdr_len: header length + * @name: name of header table entry + * @type: l2 header type + * @is_partial: flag indicating if header table entry is partial + * @is_hdr_proc_ctx: false - hdr entry resides in hdr table, + * true - hdr entry resides in DDR and pointed to by proc ctx + * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true, + * else 0 + * @proc_ctx: processing context header + * @offset_entry: entry's offset + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: header entry id + * @is_eth2_ofst_valid: is eth2_ofst field valid? + * @eth2_ofst: offset to start of Ethernet-II/802.3 header + * @user_deleted: is the header deleted by the user? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_entry { + struct list_head link; + u32 cookie; + u8 hdr[IPA_HDR_MAX_SIZE]; + u32 hdr_len; + char name[IPA_RESOURCE_NAME_MAX]; + enum ipa_hdr_l2_type type; + u8 is_partial; + bool is_hdr_proc_ctx; + dma_addr_t phys_base; + struct ipa_hdr_proc_ctx_entry *proc_ctx; + struct ipa_hdr_offset_entry *offset_entry; + u32 ref_cnt; + int id; + u8 is_eth2_ofst_valid; + u16 eth2_ofst; + bool user_deleted; + bool ipacm_installed; +}; + +/** + * struct ipa_hdr_tbl - IPA header table + * @head_hdr_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @hdr_cnt: number of headers + * @end: the last header index + */ +struct ipa_hdr_tbl { + struct list_head head_hdr_entry_list; + struct list_head head_offset_list[IPA_HDR_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_BIN_MAX]; + u32 hdr_cnt; + u32 end; +}; + +/** + * struct ipa_hdr_offset_entry - IPA header offset entry + * @link: entry's link in global processing context header offset entries list + * @offset: the offset + * @bin: bin + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_proc_ctx_offset_entry { + struct list_head link; + u32 offset; + u32 bin; + bool ipacm_installed; +}; + +/** + * struct ipa_hdr_proc_ctx_add_hdr_seq - + * IPA processing context header - add header sequence + * @hdr_add: add header command + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hdr_proc_ctx_add_hdr_seq { + struct ipa_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hdr_proc_ctx_add_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @cmd: tlv processing command (cmd.type must be 3) + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hdr_proc_ctx_add_hdr_cmd_seq { + struct ipa_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hdr_proc_ctx_tlv cmd; + struct ipa_hdr_proc_ctx_tlv end; +}; + +/** + *struct ipa_hdr_proc_ctx_entry - IPA processing context header table entry + * @link: entry's link in global header table entries list + * @type: + * @offset_entry: entry's offset + * @hdr: the header + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: processing context header entry id + * @user_deleted: is the hdr processing context deleted by the user? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_proc_ctx_entry { + struct list_head link; + u32 cookie; + enum ipa_hdr_proc_type type; + struct ipa_hdr_proc_ctx_offset_entry *offset_entry; + struct ipa_hdr_entry *hdr; + u32 ref_cnt; + int id; + bool user_deleted; + bool ipacm_installed; +}; + +/** + * struct ipa_hdr_proc_ctx_tbl - IPA processing context header table + * @head_proc_ctx_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @proc_ctx_cnt: number of processing context headers + * @end: the last processing context header index + * @start_offset: offset in words of processing context header table + */ +struct ipa_hdr_proc_ctx_tbl { + struct list_head head_proc_ctx_entry_list; + struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + u32 proc_ctx_cnt; + u32 end; + u32 start_offset; +}; + +/** + * struct ipa_flt_tbl - IPA filter table + * @head_flt_rule_list: filter rules list + * @rule_cnt: number of filter rules + * @in_sys: flag indicating if filter table is located in system memory + * @sz: the size of the filter table + * @end: the last header index + * @curr_mem: current filter tables block in sys memory + * @prev_mem: previous filter table block in sys memory + */ +struct ipa_flt_tbl { + struct list_head head_flt_rule_list; + u32 rule_cnt; + bool in_sys; + u32 sz; + struct ipa_mem_buffer curr_mem; + struct ipa_mem_buffer prev_mem; + bool sticky_rear; +}; + +/** + * struct ipa_rt_entry - IPA routing table entry + * @link: entry's link in global routing table entries list + * @rule: routing rule + * @cookie: cookie used for validity check + * @tbl: routing table + * @hdr: header table + * @proc_ctx: processing context table + * @hw_len: the length of the table + * @id: rule handle - globaly unique + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_rt_entry { + struct list_head link; + u32 cookie; + struct ipa_rt_rule rule; + struct ipa_rt_tbl *tbl; + struct ipa_hdr_entry *hdr; + struct ipa_hdr_proc_ctx_entry *proc_ctx; + u32 hw_len; + int id; + bool ipacm_installed; +}; + +/** + * struct ipa_rt_tbl_set - collection of routing tables + * @head_rt_tbl_list: collection of routing tables + * @tbl_cnt: number of routing tables + */ +struct ipa_rt_tbl_set { + struct list_head head_rt_tbl_list; + u32 tbl_cnt; +}; + +/** + * struct ipa_ep_cfg_status - status configuration in IPA end-point + * @status_en: Determines if end point supports Status Indications. SW should + * set this bit in order to enable Statuses. Output Pipe - send + * Status indications only if bit is set. Input Pipe - forward Status + * indication to STATUS_ENDP only if bit is set. Valid for Input + * and Output Pipes (IPA Consumer and Producer) + * @status_ep: Statuses generated for this endpoint will be forwarded to the + * specified Status End Point. Status endpoint needs to be + * configured with STATUS_EN=1 Valid only for Input Pipes (IPA + * Consumer) + */ +struct ipa_ep_cfg_status { + bool status_en; + u8 status_ep; +}; + +/** + * struct ipa_wlan_stats - Wlan stats for each wlan endpoint + * @rx_pkts_rcvd: Packets sent by wlan driver + * @rx_pkts_status_rcvd: Status packets received from ipa hw + * @rx_hd_processed: Data Descriptors processed by IPA Driver + * @rx_hd_reply: Data Descriptors recycled by wlan driver + * @rx_hd_rcvd: Data Descriptors sent by wlan driver + * @rx_pkt_leak: Packet count that are not recycled + * @rx_dp_fail: Packets failed to transfer to IPA HW + * @tx_pkts_rcvd: SKB Buffers received from ipa hw + * @tx_pkts_sent: SKB Buffers sent to wlan driver + * @tx_pkts_dropped: Dropped packets count + */ +struct ipa_wlan_stats { + u32 rx_pkts_rcvd; + u32 rx_pkts_status_rcvd; + u32 rx_hd_processed; + u32 rx_hd_reply; + u32 rx_hd_rcvd; + u32 rx_pkt_leak; + u32 rx_dp_fail; + u32 tx_pkts_rcvd; + u32 tx_pkts_sent; + u32 tx_pkts_dropped; +}; + +/** + * struct ipa_wlan_comm_memb - Wlan comm members + * @wlan_spinlock: protects wlan comm buff list and its size + * @ipa_tx_mul_spinlock: protects tx dp mul transfer + * @wlan_comm_total_cnt: wlan common skb buffers allocated count + * @wlan_comm_free_cnt: wlan common skb buffer free count + * @total_tx_pkts_freed: Recycled Buffer count + * @wlan_comm_desc_list: wlan common skb buffer list + */ +struct ipa_wlan_comm_memb { + spinlock_t wlan_spinlock; + spinlock_t ipa_tx_mul_spinlock; + u32 wlan_comm_total_cnt; + u32 wlan_comm_free_cnt; + u32 total_tx_pkts_freed; + struct list_head wlan_comm_desc_list; + atomic_t active_clnt_cnt; +}; + +struct ipa_status_stats { + struct ipa_hw_pkt_status status[IPA_MAX_STATUS_STAT_NUM]; + int curr; +}; + +enum ipa_wakelock_ref_client { + IPA_WAKELOCK_REF_CLIENT_TX = 0, + IPA_WAKELOCK_REF_CLIENT_LAN_RX = 1, + IPA_WAKELOCK_REF_CLIENT_WAN_RX = 2, + IPA_WAKELOCK_REF_CLIENT_WLAN_RX = 3, + IPA_WAKELOCK_REF_CLIENT_ODU_RX = 4, + IPA_WAKELOCK_REF_CLIENT_SPS = 5, + IPA_WAKELOCK_REF_CLIENT_MAX +}; + +/** + * struct ipa_ep_context - IPA end point context + * @valid: flag indicating id EP context is valid + * @client: EP client type + * @ep_hdl: EP's client SPS handle + * @cfg: EP cionfiguration + * @dst_pipe_index: destination pipe index + * @rt_tbl_idx: routing table index + * @connect: SPS connect + * @priv: user provided information which will forwarded once the user is + * notified for new data avail + * @client_notify: user provided CB for EP events notification, the event is + * data revived. + * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory + * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory + * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset + * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset + * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client + * @data_fifo_client_allocated: if data FIFO was allocated by a client + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @rx_replenish_threshold: Indicates the WM value which requires the RX + * descriptors replenish function to be called to + * avoid the RX pipe to run out of descriptors + * and cause HOLB. + * @disconnect_in_progress: Indicates client disconnect in progress. + * @qmi_request_sent: Indicates whether QMI request to enable clear data path + * request is sent or not. + * @napi_enabled: when true, IPA call client callback to start polling + */ +struct ipa_ep_context { + int valid; + enum ipa_client_type client; + struct sps_pipe *ep_hdl; + struct ipa_ep_cfg cfg; + struct ipa_ep_cfg_holb holb; + struct ipa_ep_cfg_status status; + u32 dst_pipe_index; + u32 rt_tbl_idx; + struct sps_connect connect; + void *priv; + void (*client_notify)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + bool desc_fifo_in_pipe_mem; + bool data_fifo_in_pipe_mem; + u32 desc_fifo_pipe_mem_ofst; + u32 data_fifo_pipe_mem_ofst; + bool desc_fifo_client_allocated; + bool data_fifo_client_allocated; + atomic_t avail_fifo_desc; + u32 dflt_flt4_rule_hdl; + u32 dflt_flt6_rule_hdl; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct ipa_wlan_stats wstats; + u32 uc_offload_state; + u32 rx_replenish_threshold; + bool disconnect_in_progress; + u32 qmi_request_sent; + enum ipa_wakelock_ref_client wakelock_client; + bool napi_enabled; + bool switch_to_intr; + int inactive_cycles; + u32 eot_in_poll_err; + bool ep_disabled; + + /* sys MUST be the last element of this struct */ + struct ipa_sys_context *sys; +}; + +enum ipa_sys_pipe_policy { + IPA_POLICY_INTR_MODE, + IPA_POLICY_NOINTR_MODE, + IPA_POLICY_INTR_POLL_MODE, +}; + +struct ipa_repl_ctx { + struct ipa_rx_pkt_wrapper **cache; + atomic_t head_idx; + atomic_t tail_idx; + u32 capacity; +}; + +/** + * struct ipa_sys_context - IPA endpoint context for system to BAM pipes + * @head_desc_list: header descriptors list + * @len: the size of the above list + * @spinlock: protects the list and its size + * @event: used to request CALLBACK mode from SPS driver + * @ep: IPA EP context + * + * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN + */ +struct ipa_sys_context { + u32 len; + struct sps_register_event event; + atomic_t curr_polling_state; + struct delayed_work switch_to_intr_work; + enum ipa_sys_pipe_policy policy; + int (*pyld_hdlr)(struct sk_buff *skb, struct ipa_sys_context *sys); + struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags); + void (*free_skb)(struct sk_buff *skb); + u32 rx_buff_sz; + u32 rx_pool_sz; + struct sk_buff *prev_skb; + unsigned int len_rem; + unsigned int len_pad; + unsigned int len_partial; + bool drop_packet; + struct work_struct work; + void (*sps_callback)(struct sps_event_notify *notify); + enum sps_option sps_option; + struct delayed_work replenish_rx_work; + struct work_struct repl_work; + void (*repl_hdlr)(struct ipa_sys_context *sys); + struct ipa_repl_ctx repl; + unsigned int repl_trig_cnt; + unsigned int repl_trig_thresh; + + /* ordering is important - mutable fields go above */ + struct ipa_ep_context *ep; + struct list_head head_desc_list; + struct list_head rcycl_list; + spinlock_t spinlock; + struct workqueue_struct *wq; + struct workqueue_struct *repl_wq; + struct ipa_status_stats *status_stat; + /* ordering is important - other immutable fields go below */ +}; + +/** + * enum ipa_desc_type - IPA decriptors type + * + * IPA decriptors type, IPA supports DD and ICD but no CD + */ +enum ipa_desc_type { + IPA_DATA_DESC, + IPA_DATA_DESC_SKB, + IPA_DATA_DESC_SKB_PAGED, + IPA_IMM_CMD_DESC +}; + +/** + * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper + * @type: specify if this packet is for the skb or immediate command + * @mem: memory buffer used by this Tx packet + * @work: work struct for current Tx packet + * @link: linked to the wrappers on that pipe + * @callback: IPA client provided callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @sys: corresponding IPA sys context + * @mult: valid only for first of a "multiple" transfer, + * holds info for the "sps_transfer" buffer + * @cnt: 1 for single transfers, + * >1 and <0xFFFF for first of a "multiple" transfer, + * 0xFFFF for last desc, 0 for rest of "multiple' transfer + * @bounce: va of bounce buffer + * @unmap_dma: in case this is true, the buffer will not be dma unmapped + * + * This struct can wrap both data packet and immediate command packet. + */ +struct ipa_tx_pkt_wrapper { + enum ipa_desc_type type; + struct ipa_mem_buffer mem; + struct work_struct work; + struct list_head link; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct ipa_sys_context *sys; + struct ipa_mem_buffer mult; + u32 cnt; + void *bounce; + bool no_unmap_dma; +}; + +/** + * struct ipa_desc - IPA descriptor + * @type: skb or immediate command or plain old data + * @pyld: points to skb + * @frag: points to paged fragment + * or kmalloc'ed immediate command parameters/plain old data + * @dma_address: dma mapped address of pyld + * @dma_address_valid: valid field for dma_address + * @len: length of the pyld + * @opcode: for immediate commands + * @callback: IPA client provided completion callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @xfer_done: completion object for sync completion + */ +struct ipa_desc { + enum ipa_desc_type type; + void *pyld; + skb_frag_t *frag; + dma_addr_t dma_address; + bool dma_address_valid; + u16 len; + u16 opcode; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct completion xfer_done; +}; + +/** + * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper + * @skb: skb + * @dma_address: DMA address of this Rx packet + * @link: linked to the Rx packets on that pipe + * @len: how many bytes are copied into skb's flat buffer + */ +struct ipa_rx_pkt_wrapper { + struct list_head link; + struct ipa_rx_data data; + u32 len; + struct work_struct work; + struct ipa_sys_context *sys; +}; + +/** + * struct ipa_nat_mem - IPA NAT memory description + * @class: pointer to the struct class + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @dev_num: device number + * @vaddr: virtual address + * @dma_handle: DMA handle + * @size: NAT memory size + * @is_mapped: flag indicating if NAT memory is mapped + * @is_sys_mem: flag indicating if NAT memory is sys memory + * @is_dev_init: flag indicating if NAT device is initialized + * @lock: NAT memory mutex + * @nat_base_address: nat table virutal address + * @ipv4_rules_addr: base nat table address + * @ipv4_expansion_rules_addr: expansion table address + * @index_table_addr: index table address + * @index_table_expansion_addr: index expansion table address + * @size_base_tables: base table size + * @size_expansion_tables: expansion table size + * @public_ip_addr: ip address of nat table + */ +struct ipa_nat_mem { + struct class *class; + struct device *dev; + struct cdev cdev; + dev_t dev_num; + void *vaddr; + dma_addr_t dma_handle; + size_t size; + bool is_mapped; + bool is_sys_mem; + bool is_dev_init; + bool is_dev; + struct mutex lock; + void *nat_base_address; + char *ipv4_rules_addr; + char *ipv4_expansion_rules_addr; + char *index_table_addr; + char *index_table_expansion_addr; + u32 size_base_tables; + u32 size_expansion_tables; + u32 public_ip_addr; + void *tmp_vaddr; + dma_addr_t tmp_dma_handle; + bool is_tmp_mem; +}; + +/** + * enum ipa_hw_mode - IPA hardware mode + * @IPA_HW_Normal: Regular IPA hardware + * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation + * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge + */ +enum ipa_hw_mode { + IPA_HW_MODE_NORMAL = 0, + IPA_HW_MODE_VIRTUAL = 1, + IPA_HW_MODE_PCIE = 2 +}; + +enum ipa_config_this_ep { + IPA_CONFIGURE_THIS_EP, + IPA_DO_NOT_CONFIGURE_THIS_EP, +}; + +struct ipa_stats { + u32 tx_sw_pkts; + u32 tx_hw_pkts; + u32 rx_pkts; + u32 rx_excp_pkts[MAX_NUM_EXCP]; + u32 rx_repl_repost; + u32 tx_pkts_compl; + u32 rx_q_len; + u32 msg_w[IPA_EVENT_MAX_NUM]; + u32 msg_r[IPA_EVENT_MAX_NUM]; + u32 stat_compl; + u32 aggr_close; + u32 wan_aggr_close; + u32 wan_rx_empty; + u32 wan_repl_rx_empty; + u32 lan_rx_empty; + u32 lan_repl_rx_empty; + u32 flow_enable; + u32 flow_disable; + u32 tx_non_linear; +}; + +struct ipa_active_clients { + struct mutex mutex; + spinlock_t spinlock; + bool mutex_locked; + int cnt; +}; + +struct ipa_wakelock_ref_cnt { + spinlock_t spinlock; + u32 cnt; +}; + +struct ipa_tag_completion { + struct completion comp; + atomic_t cnt; +}; + +struct ipa_controller; + +/** + * struct ipa_uc_hdlrs - IPA uC callback functions + * @ipa_uc_loaded_hdlr: Function handler when uC is loaded + * @ipa_uc_event_hdlr: Event handler function + * @ipa_uc_response_hdlr: Response handler function + * @ipa_uc_event_log_info_hdlr: Log event handler function + */ +struct ipa_uc_hdlrs { + void (*ipa_uc_loaded_hdlr)(void); + + void (*ipa_uc_event_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio); + int (*ipa_uc_response_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio, + u32 *uc_status); + void (*ipa_uc_event_log_info_hdlr) + (struct IpaHwEventLogInfoData_t *uc_event_top_mmio); +}; + +/** + * enum ipa_hw_flags - flags which defines the behavior of HW + * + * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert + * failure. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported + * in the event ring only. No event to CPU. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event + * IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST + * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by + * QMB (avoid memcpy) + * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in + * IN Channel + * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is + * entering a mode where it expects a doorbell to be rung for OUT Channel + * @IPA_HW_FLAG_NO_START_OOB_TIMER + */ +enum ipa_hw_flags { + IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE = 0x01, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR = 0x02, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP = 0x04, + IPA_HW_FLAG_WORK_OVER_DDR = 0x08, + IPA_HW_FLAG_NO_REPORT_OOB = 0x10, + IPA_HW_FLAG_NO_REPORT_DB_MODE = 0x20, + IPA_HW_FLAG_NO_START_OOB_TIMER = 0x40 +}; + +/** + * struct ipa_uc_ctx - IPA uC context + * @uc_inited: Indicates if uC interface has been initialized + * @uc_loaded: Indicates if uC has loaded + * @uc_failed: Indicates if uC has failed / returned an error + * @uc_lock: uC interface lock to allow only one uC interaction at a time + * @uc_completation: Completion mechanism to wait for uC commands + * @uc_sram_mmio: Pointer to uC mapped memory + * @pending_cmd: The last command sent waiting to be ACKed + * @uc_status: The last status provided by the uC + * @uc_zip_error: uC has notified the APPS upon a ZIP engine error + * @uc_error_type: error type from uC error event + */ +struct ipa_uc_ctx { + bool uc_inited; + bool uc_loaded; + bool uc_failed; + struct mutex uc_lock; + struct completion uc_completion; + struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio; + struct IpaHwEventLogInfoData_t *uc_event_top_mmio; + u32 uc_event_top_ofst; + u32 pending_cmd; + u32 uc_status; + bool uc_zip_error; + u32 uc_error_type; + phys_addr_t rdy_ring_base_pa; + phys_addr_t rdy_ring_rp_pa; + u32 rdy_ring_size; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa_uc_wdi_ctx + * @wdi_uc_top_ofst: + * @wdi_uc_top_mmio: + * @wdi_uc_stats_ofst: + * @wdi_uc_stats_mmio: + */ +struct ipa_uc_wdi_ctx { + /* WDI specific fields */ + u32 wdi_uc_stats_ofst; + struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; + /* for AP+STA stats update */ +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb stats_notify; +#endif +}; + +/** + * struct ipa_sps_pm - SPS power management related members + * @dec_clients: true if need to decrease active clients count + * @eot_activity: represent EOT interrupt activity to determine to reset + * the inactivity timer + * @sps_pm_lock: Lock to protect the sps_pm functionality. + */ +struct ipa_sps_pm { + atomic_t dec_clients; + atomic_t eot_activity; + struct mutex sps_pm_lock; +}; + +/** + * struct ipacm_client_info - the client-info indicated from IPACM + * @ipacm_client_enum: the enum to indicate tether-client + * @ipacm_client_uplink: the bool to indicate pipe for uplink + */ +struct ipacm_client_info { + enum ipacm_client_enum client_enum; + bool uplink; +}; + +struct ipa_cne_evt { + struct ipa_wan_msg wan_msg; + struct ipa_msg_meta msg_meta; +}; + +/** + * struct ipa_context - IPA context + * @class: pointer to the struct class + * @dev_num: device number + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @bam_handle: IPA driver's BAM handle + * @ep: list of all end points + * @skip_ep_cfg_shadow: state to update filter table correctly across + power-save + * @resume_on_connect: resume ep on ipa_connect + * @flt_tbl: list of all IPA filter tables + * @mode: IPA operating mode + * @mmio: iomem + * @ipa_wrapper_base: IPA wrapper base address + * @glob_flt_tbl: global filter table + * @hdr_tbl: IPA header table + * @hdr_proc_ctx_tbl: IPA processing context table + * @rt_tbl_set: list of routing tables each of which is a list of rules + * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped + * @flt_rule_cache: filter rule cache + * @rt_rule_cache: routing rule cache + * @hdr_cache: header cache + * @hdr_offset_cache: header offset cache + * @hdr_proc_ctx_cache: processing context cache + * @hdr_proc_ctx_offset_cache: processing context offset cache + * @rt_tbl_cache: routing table cache + * @tx_pkt_wrapper_cache: Tx packets cache + * @rx_pkt_wrapper_cache: Rx packets cache + * @rt_idx_bitmap: routing table index bitmap + * @lock: this does NOT protect the linked lists within ipa_sys_context + * @smem_sz: shared memory size available for SW use starting + * from non-restricted bytes + * @smem_restricted_bytes: the bytes that SW should not use in the shared mem + * @nat_mem: NAT memory + * @excp_hdr_hdl: exception header handle + * @dflt_v4_rt_rule_hdl: default v4 routing rule handle + * @dflt_v6_rt_rule_hdl: default v6 routing rule handle + * @aggregation_type: aggregation type used on USB client endpoint + * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint + * @aggregation_time_limit: aggregation time limit used on USB client endpoint + * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system + * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system + * @hdr_mem: header memory + * @hdr_proc_ctx_mem: processing context memory + * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system + * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system + * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system + * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system + * @empty_rt_tbl_mem: empty routing tables memory + * @power_mgmt_wq: workqueue for power management + * @sps_power_mgmt_wq: workqueue SPS related power management + * @tag_process_before_gating: indicates whether to start tag process before + * gating IPA clocks + * @sps_pm: sps power management related information + * @disconnect_lock: protects LAN_CONS packet receive notification CB + * @pipe_mem_pool: pipe memory pool + * @dma_pool: special purpose DMA pool + * @ipa_active_clients: structure for reference counting connected IPA clients + * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc') + * @ipa_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe) + * @use_ipa_teth_bridge: use tethering bridge driver + * @ipa_bam_remote_mode: ipa bam is in remote mode + * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules + * @logbuf: ipc log buffer for high priority messages + * @logbuf_low: ipc log buffer for low priority messages + * @ipa_wdi2: using wdi-2.0 + * @ipa_bus_hdl: msm driver handle for the data path bus + * @ctrl: holds the core specific operations based on + * core version (vtable like) + * @enable_clock_scaling: clock scaling is enabled ? + * @curr_ipa_clk_rate: ipa_clk current rate + * @wcstats: wlan common buffer stats + * @uc_ctx: uC interface context + * @uc_wdi_ctx: WDI specific fields for uC interface + * @ipa_num_pipes: The number of pipes used by IPA HW + * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided + * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA + * @w_lock: Indicates the wakeup source. + * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired + + * IPA context - holds all relevant info about IPA driver and its state + */ +struct ipa_context { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; + unsigned long bam_handle; + struct ipa_ep_context ep[IPA_MAX_NUM_PIPES]; + bool skip_ep_cfg_shadow[IPA_MAX_NUM_PIPES]; + bool resume_on_connect[IPA_CLIENT_MAX]; + struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX]; + void __iomem *mmio; + u32 ipa_wrapper_base; + u32 ipa_wrapper_size; + struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX]; + struct ipa_hdr_tbl hdr_tbl; + struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl; + struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX]; + struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX]; + struct kmem_cache *flt_rule_cache; + struct kmem_cache *rt_rule_cache; + struct kmem_cache *hdr_cache; + struct kmem_cache *hdr_offset_cache; + struct kmem_cache *hdr_proc_ctx_cache; + struct kmem_cache *hdr_proc_ctx_offset_cache; + struct kmem_cache *rt_tbl_cache; + struct kmem_cache *tx_pkt_wrapper_cache; + struct kmem_cache *rx_pkt_wrapper_cache; + unsigned long rt_idx_bitmap[IPA_IP_MAX]; + struct mutex lock; + u16 smem_sz; + u16 smem_restricted_bytes; + u16 smem_reqd_sz; + struct ipa_nat_mem nat_mem; + u32 excp_hdr_hdl; + u32 dflt_v4_rt_rule_hdl; + u32 dflt_v6_rt_rule_hdl; + uint aggregation_type; + uint aggregation_byte_limit; + uint aggregation_time_limit; + bool hdr_tbl_lcl; + bool hdr_proc_ctx_tbl_lcl; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer hdr_proc_ctx_mem; + bool ip4_rt_tbl_lcl; + bool ip6_rt_tbl_lcl; + bool ip4_flt_tbl_lcl; + bool ip6_flt_tbl_lcl; + struct ipa_mem_buffer empty_rt_tbl_mem; + struct gen_pool *pipe_mem_pool; + struct dma_pool *dma_pool; + struct ipa_active_clients ipa_active_clients; + struct ipa2_active_clients_log_ctx ipa2_active_clients_logging; + struct workqueue_struct *power_mgmt_wq; + struct workqueue_struct *sps_power_mgmt_wq; + bool tag_process_before_gating; + struct ipa_sps_pm sps_pm; + u32 clnt_hdl_cmd; + u32 clnt_hdl_data_in; + u32 clnt_hdl_data_out; + spinlock_t disconnect_lock; + u8 a5_pipe_index; + struct list_head intf_list; + struct list_head msg_list; + struct list_head pull_msg_list; + struct mutex msg_lock; + struct list_head msg_wlan_client_list; + struct mutex msg_wlan_client_lock; + wait_queue_head_t msg_waitq; + enum ipa_hw_type ipa_hw_type; + enum ipa_hw_mode ipa_hw_mode; + bool use_ipa_teth_bridge; + bool ipa_bam_remote_mode; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + /* featurize if memory footprint becomes a concern */ + struct ipa_stats stats; + void *smem_pipe_mem; + void *logbuf; + void *logbuf_low; + u32 ipa_bus_hdl; + struct ipa_controller *ctrl; + struct idr ipa_idr; + struct device *pdev; + struct device *uc_pdev; + spinlock_t idr_lock; + u32 enable_clock_scaling; + u32 curr_ipa_clk_rate; + bool q6_proxy_clk_vote_valid; + u32 ipa_num_pipes; + + struct ipa_wlan_comm_memb wc_memb; + + struct ipa_uc_ctx uc_ctx; + + struct ipa_uc_wdi_ctx uc_wdi_ctx; + struct ipa_uc_ntn_ctx uc_ntn_ctx; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + bool smmu_present; + bool smmu_s1_bypass; + unsigned long peer_bam_iova; + phys_addr_t peer_bam_pa; + u32 peer_bam_map_size; + unsigned long peer_bam_dev; + u32 peer_bam_map_cnt; + u32 wdi_map_cnt; + bool use_dma_zone; + struct wakeup_source *w_lock; + struct ipa_wakelock_ref_cnt wakelock_ref_cnt; + + /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */ + bool ipa_client_apps_wan_cons_agg_gro; + /* M-release support to know client pipes */ + struct ipacm_client_info ipacm_client[IPA_MAX_NUM_PIPES]; + bool tethered_flow_control; + u32 ipa_rx_min_timeout_usec; + u32 ipa_rx_max_timeout_usec; + u32 ipa_polling_iteration; + struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE]; + int num_ipa_cne_evt_req; + struct mutex ipa_cne_evt_lock; + bool ipa_uc_monitor_holb; +}; + +/** + * struct ipa_route - IPA route + * @route_dis: route disable + * @route_def_pipe: route default pipe + * @route_def_hdr_table: route default header table + * @route_def_hdr_ofst: route default header offset table + * @route_frag_def_pipe: Default pipe to route fragmented exception + * packets and frag new rule statues, if source pipe does not have + * a notification status pipe defined. + */ +struct ipa_route { + u32 route_dis; + u32 route_def_pipe; + u32 route_def_hdr_table; + u32 route_def_hdr_ofst; + u8 route_frag_def_pipe; +}; + +/** + * enum ipa_pipe_mem_type - IPA pipe memory type + * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory + * @IPA_PRIVATE_MEM: IPA's private memory + * @IPA_SYSTEM_MEM: System RAM, requires allocation + */ +enum ipa_pipe_mem_type { + IPA_SPS_PIPE_MEM = 0, + IPA_PRIVATE_MEM = 1, + IPA_SYSTEM_MEM = 2, +}; + +struct ipa_plat_drv_res { + bool use_ipa_teth_bridge; + u32 ipa_mem_base; + u32 ipa_mem_size; + u32 bam_mem_base; + u32 bam_mem_size; + u32 ipa_irq; + u32 bam_irq; + u32 ipa_pipe_mem_start_ofst; + u32 ipa_pipe_mem_size; + enum ipa_hw_type ipa_hw_type; + enum ipa_hw_mode ipa_hw_mode; + u32 ee; + bool ipa_bam_remote_mode; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + bool use_dma_zone; + bool tethered_flow_control; + u32 ipa_rx_polling_sleep_msec; + u32 ipa_polling_iteration; + bool ipa_uc_monitor_holb; +}; + +struct ipa_mem_partition { + u16 ofst_start; + u16 nat_ofst; + u16 nat_size; + u16 v4_flt_ofst; + u16 v4_flt_size; + u16 v4_flt_size_ddr; + u16 v6_flt_ofst; + u16 v6_flt_size; + u16 v6_flt_size_ddr; + u16 v4_rt_ofst; + u16 v4_num_index; + u16 v4_modem_rt_index_lo; + u16 v4_modem_rt_index_hi; + u16 v4_apps_rt_index_lo; + u16 v4_apps_rt_index_hi; + u16 v4_rt_size; + u16 v4_rt_size_ddr; + u16 v6_rt_ofst; + u16 v6_num_index; + u16 v6_modem_rt_index_lo; + u16 v6_modem_rt_index_hi; + u16 v6_apps_rt_index_lo; + u16 v6_apps_rt_index_hi; + u16 v6_rt_size; + u16 v6_rt_size_ddr; + u16 modem_hdr_ofst; + u16 modem_hdr_size; + u16 apps_hdr_ofst; + u16 apps_hdr_size; + u16 apps_hdr_size_ddr; + u16 modem_hdr_proc_ctx_ofst; + u16 modem_hdr_proc_ctx_size; + u16 apps_hdr_proc_ctx_ofst; + u16 apps_hdr_proc_ctx_size; + u16 apps_hdr_proc_ctx_size_ddr; + u16 modem_comp_decomp_ofst; + u16 modem_comp_decomp_size; + u16 modem_ofst; + u16 modem_size; + u16 apps_v4_flt_ofst; + u16 apps_v4_flt_size; + u16 apps_v6_flt_ofst; + u16 apps_v6_flt_size; + u16 uc_info_ofst; + u16 uc_info_size; + u16 end_ofst; + u16 apps_v4_rt_ofst; + u16 apps_v4_rt_size; + u16 apps_v6_rt_ofst; + u16 apps_v6_rt_size; +}; + +struct ipa_controller { + struct ipa_mem_partition mem_partition; + u32 ipa_clk_rate_turbo; + u32 ipa_clk_rate_nominal; + u32 ipa_clk_rate_svs; + u32 clock_scaling_bw_threshold_turbo; + u32 clock_scaling_bw_threshold_nominal; + u32 ipa_reg_base_ofst; + u32 max_holb_tmr_val; + void (*ipa_sram_read_settings)(void); + int (*ipa_init_sram)(void); + int (*ipa_init_hdr)(void); + int (*ipa_init_rt4)(void); + int (*ipa_init_rt6)(void); + int (*ipa_init_flt4)(void); + int (*ipa_init_flt6)(void); + void (*ipa_cfg_ep_hdr)(u32 pipe_number, + const struct ipa_ep_cfg_hdr *ipa_ep_hdr_cfg); + int (*ipa_cfg_ep_hdr_ext)(u32 pipe_number, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_hdr_ext_cfg); + void (*ipa_cfg_ep_aggr)(u32 pipe_number, + const struct ipa_ep_cfg_aggr *ipa_ep_agrr_cfg); + int (*ipa_cfg_ep_deaggr)(u32 pipe_index, + const struct ipa_ep_cfg_deaggr *ep_deaggr); + void (*ipa_cfg_ep_nat)(u32 pipe_number, + const struct ipa_ep_cfg_nat *ipa_ep_nat_cfg); + void (*ipa_cfg_ep_mode)(u32 pipe_number, u32 dst_pipe_number, + const struct ipa_ep_cfg_mode *ep_mode); + void (*ipa_cfg_ep_route)(u32 pipe_index, u32 rt_tbl_index); + void (*ipa_cfg_ep_holb)(u32 pipe_index, + const struct ipa_ep_cfg_holb *ep_holb); + void (*ipa_cfg_route)(struct ipa_route *route); + int (*ipa_read_gen_reg)(char *buff, int max_len); + int (*ipa_read_ep_reg)(char *buff, int max_len, int pipe); + void (*ipa_write_dbg_cnt)(int option); + int (*ipa_read_dbg_cnt)(char *buf, int max_len); + void (*ipa_cfg_ep_status)(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ep_status); + int (*ipa_commit_flt)(enum ipa_ip_type ip); + int (*ipa_commit_rt)(enum ipa_ip_type ip); + int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); + int (*ipa_commit_hdr)(void); + void (*ipa_cfg_ep_cfg)(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *cfg); + void (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask); + void (*ipa_enable_clks)(void); + void (*ipa_disable_clks)(void); + struct msm_bus_scale_pdata *msm_bus_data_ptr; + + void (*ipa_cfg_ep_metadata)(u32 pipe_number, + const struct ipa_ep_cfg_metadata *metadata); +}; + +extern struct ipa_context *ipa_ctx; + +/* public APIs */ +/* + * Connect / Disconnect + */ +int ipa2_connect(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl); +int ipa2_disconnect(u32 clnt_hdl); + +/* + * Resume / Suspend + */ +int ipa2_reset_endpoint(u32 clnt_hdl); + +/* + * Remove ep delay + */ +int ipa2_clear_endpoint_delay(u32 clnt_hdl); + +/* + * Disable ep + */ +int ipa2_disable_endpoint(u32 clnt_hdl); + +/* + * Configuration + */ +int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + +int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg); + +int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + +int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + +int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg); + +int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + +int ipa2_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + +int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); + +int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + +int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg); + +int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); + +/* + * Header removal / addition + */ +int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs); + +int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user); + +int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls); + +int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); + +int ipa2_commit_hdr(void); + +int ipa2_reset_hdr(bool user_only); + +int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup); + +int ipa2_put_hdr(u32 hdr_hdl); + +int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy); + +/* + * Header Processing Context + */ +int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); + +int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); + +int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user); + +/* + * Routing + */ +int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); + +int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + +int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); + +int ipa2_commit_rt(enum ipa_ip_type ip); + +int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only); + +int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); + +int ipa2_put_rt_tbl(u32 rt_tbl_hdl); + +int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in); + +int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); + +/* + * Filtering + */ +int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); + +int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + +int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); + +int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); + +int ipa2_commit_flt(enum ipa_ip_type ip); + +int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only); + +/* + * NAT + */ +int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); + +int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); + +int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); + +int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); + +/* + * Messaging + */ +int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); +int ipa2_resend_wlan_msg(void); +int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); +int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta); + +/* + * Interface + */ +int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); +int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); +int ipa2_deregister_intf(const char *name); + +/* + * Aggregation + */ +int ipa2_set_aggr_mode(enum ipa_aggr_mode mode); + +int ipa2_set_qcncm_ndp_sig(char sig[3]); + +int ipa2_set_single_ndp_per_mbim(bool enable); + +/* + * Data path + */ +int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +/* + * To transfer multiple data packets + * While passing the data descriptor list, the anchor node + * should be of type struct ipa_tx_data_desc not list_head + */ +int ipa2_tx_dp_mul(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + +void ipa2_free_skb(struct ipa_rx_data *data); + +/* + * System pipes + */ +int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl); + +int ipa2_teardown_sys_pipe(u32 clnt_hdl); + +int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status); + +int ipa2_sys_teardown(u32 clnt_hdl); + +int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl); + +int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); +int ipa2_disconnect_wdi_pipe(u32 clnt_hdl); +int ipa2_enable_wdi_pipe(u32 clnt_hdl); +int ipa2_disable_wdi_pipe(u32 clnt_hdl); +int ipa2_resume_wdi_pipe(u32 clnt_hdl); +int ipa2_suspend_wdi_pipe(u32 clnt_hdl); +int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); +u16 ipa2_get_smem_restr_bytes(void); +int ipa2_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes); +int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); +int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); +int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); +void ipa2_ntn_uc_dereg_rdyCB(void); + +int ipa2_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify); +int ipa2_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); +int ipa2_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); +int ipa2_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); + +/* + * To retrieve doorbell physical address of + * wlan pipes + */ +int ipa2_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); +/* + * To de-register uC ready callback + */ +int ipa2_uc_dereg_rdyCB(void); + +int ipa2_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova); +/* + * Tethering bridge (Rmnet / MBIM) + */ +int ipa2_teth_bridge_init(struct teth_bridge_init_params *params); + +int ipa2_teth_bridge_disconnect(enum ipa_client_type client); + +int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params); + +/* + * Tethering client info + */ +void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink); + +enum ipacm_client_enum ipa2_get_client(int pipe_idx); + +bool ipa2_get_client_uplink(int pipe_idx); + +int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats); + +int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota); + +/* + * IPADMA + */ +int ipa2_dma_init(void); + +int ipa2_dma_enable(void); + +int ipa2_dma_disable(void); + +int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len); + +int ipa2_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + +int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); + +void ipa2_dma_destroy(void); + +/* + * MHI APIs for IPA MHI client driver + */ +int ipa2_init_mhi(struct ipa_mhi_init_params *params); + +int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params); + +int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); + +int ipa2_disconnect_mhi_pipe(u32 clnt_hdl); + +bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client); + +int ipa2_disable_sps_pipe(enum ipa_client_type client); + +int ipa2_mhi_reset_channel_internal(enum ipa_client_type client); + +int ipa2_mhi_start_channel_internal(enum ipa_client_type client); + +int ipa2_mhi_suspend_ul_channels(void); + +int ipa2_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index); + +/* + * mux id + */ +int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in); + +/* + * interrupts + */ +int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + +int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt); + +/* + * Miscellaneous + */ +void ipa2_bam_reg_dump(void); + +int ipa2_get_ep_mapping(enum ipa_client_type client); + +bool ipa2_is_ready(void); + +void ipa2_proxy_clk_vote(void); +void ipa2_proxy_clk_unvote(void); + +bool ipa2_is_client_handle_valid(u32 clnt_hdl); + +enum ipa_client_type ipa2_get_client_mapping(int pipe_idx); + +enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx); + +bool ipa2_get_modem_cfg_emb_pipe_flt(void); + +/* internal functions */ + +int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl); + +int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, + bool in_atomic); +int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, + bool in_atomic); +int ipa2_get_ep_mapping(enum ipa_client_type client); + +int ipa_generate_hw_rule(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + u8 **buf, + u16 *en_rule); +int ipa_init_hw(void); +struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name); +int ipa_set_single_ndp_per_mbim(bool enable); +int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable); +void ipa_debugfs_init(void); +void ipa_debugfs_remove(void); + +void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size); + +void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time); + +#ifdef IPA_DEBUG +#define IPA_DUMP_BUFF(base, phy_base, size) \ + ipa_dump_buff_internal(base, phy_base, size) +#else +#define IPA_DUMP_BUFF(base, phy_base, size) +#endif +int ipa_controller_static_bind(struct ipa_controller *controller, + enum ipa_hw_type ipa_hw_type); +int ipa_cfg_route(struct ipa_route *route); +int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr); +int ipa_cfg_filter(u32 disable); +int ipa_pipe_mem_init(u32 start_ofst, u32 size); +int ipa_pipe_mem_alloc(u32 *ofst, u32 size); +int ipa_pipe_mem_free(u32 ofst, u32 size); +int ipa_straddle_boundary(u32 start, u32 end, u32 boundary); +struct ipa_context *ipa_get_ctx(void); +void ipa_enable_clks(void); +void ipa_disable_clks(void); +void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id); +void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx); +void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx); +int ipa2_active_clients_log_print_buffer(char *buf, int size); +int ipa2_active_clients_log_print_table(char *buf, int size); +void ipa2_active_clients_log_clear(void); +int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev); +int __ipa_del_rt_rule(u32 rule_hdl); +int __ipa_del_hdr(u32 hdr_hdl, bool by_user); +int __ipa_release_hdr(u32 hdr_hdl); +int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl); +int _ipa_read_gen_reg_v1_1(char *buff, int max_len); +int _ipa_read_gen_reg_v2_0(char *buff, int max_len); +int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe); +int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe); +void _ipa_write_dbg_cnt_v1_1(int option); +void _ipa_write_dbg_cnt_v2_0(int option); +int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len); +int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len); +void _ipa_enable_clks_v1_1(void); +void _ipa_enable_clks_v2_0(void); +void _ipa_disable_clks_v1_1(void); +void _ipa_disable_clks_v2_0(void); +void ipa_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data); + +static inline u32 ipa_read_reg(void __iomem *base, u32 offset) +{ + return ioread32(base + offset); +} + +static inline u32 ipa_read_reg_field(void __iomem *base, u32 offset, + u32 mask, u32 shift) +{ + return (ipa_read_reg(base, offset) & mask) >> shift; +} + +static inline void ipa_write_reg(void __iomem *base, u32 offset, u32 val) +{ + iowrite32(val, base + offset); +} + +ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos); +int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count); +int ipa_query_intf(struct ipa_ioc_query_intf *lookup); +int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx); +int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx); +int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext); + +void wwan_cleanup(void); + +int teth_bridge_driver_init(void); +void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data); + +int _ipa_init_sram_v2(void); +int _ipa_init_sram_v2_5(void); +int _ipa_init_sram_v2_6L(void); +int _ipa_init_hdr_v2(void); +int _ipa_init_hdr_v2_5(void); +int _ipa_init_hdr_v2_6L(void); +int _ipa_init_rt4_v2(void); +int _ipa_init_rt6_v2(void); +int _ipa_init_flt4_v2(void); +int _ipa_init_flt6_v2(void); + +int __ipa_commit_flt_v1_1(enum ipa_ip_type ip); +int __ipa_commit_flt_v2(enum ipa_ip_type ip); +int __ipa_commit_rt_v1_1(enum ipa_ip_type ip); +int __ipa_commit_rt_v2(enum ipa_ip_type ip); +int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); +int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); +int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); + +int __ipa_commit_hdr_v1_1(void); +int __ipa_commit_hdr_v2(void); +int __ipa_commit_hdr_v2_5(void); +int __ipa_commit_hdr_v2_6L(void); +int ipa_generate_flt_eq(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_attrib); +void ipa_skb_recycle(struct sk_buff *skb); +void ipa_install_dflt_flt_rules(u32 ipa_ep_idx); +void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx); + +int ipa_enable_data_path(u32 clnt_hdl); +int ipa_disable_data_path(u32 clnt_hdl); +int ipa2_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask); +int ipa2_disable_force_clear(u32 request_id); +int ipa_id_alloc(void *ptr); +void *ipa_id_find(u32 id); +void ipa_id_remove(u32 id); + +int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); + +int ipa2_cfg_ep_status(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ipa_ep_cfg); +int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity); +int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity); + +int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name name); +int ipa2_suspend_resource_sync(enum ipa_rm_resource_name name); +int ipa2_resume_resource(enum ipa_rm_resource_name name); +bool ipa_should_pipe_be_suspended(enum ipa_client_type client); +int ipa_tag_aggr_force_close(int pipe_num); + +void ipa_active_clients_lock(void); +int ipa_active_clients_trylock(unsigned long *flags); +void ipa_active_clients_unlock(void); +void ipa_active_clients_trylock_unlock(unsigned long *flags); +int ipa2_wdi_init(void); +int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id); +int ipa_tag_process(struct ipa_desc *desc, int num_descs, + unsigned long timeout); + +int ipa_q6_pre_shutdown_cleanup(void); +int ipa_q6_post_shutdown_cleanup(void); +int ipa_init_q6_smem(void); +int ipa_q6_monitor_holb_mitigation(bool enable); + +int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect, + enum ipa_client_type ipa_client); + +int ipa_uc_interface_init(void); +int ipa_uc_reset_pipe(enum ipa_client_type ipa_client); +int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable); +int ipa2_uc_state_check(void); +int ipa_uc_loaded_check(void); +int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies); +void ipa_register_panic_hdlr(void); +void ipa_uc_register_handlers(enum ipa_hw_features feature, + struct ipa_uc_hdlrs *hdlrs); +int create_nat_device(void); +int ipa_uc_notify_clk_state(bool enabled); +void ipa_dma_async_memcpy_notify_cb(void *priv, + enum ipa_dp_evt_type evt, unsigned long data); + +int ipa_uc_update_hw_flags(u32 flags); + +int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)); +void ipa2_uc_mhi_cleanup(void); +int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd); +int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx); +int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection); +int ipa2_uc_mhi_reset_channel(int channelHandle); +int ipa2_uc_mhi_suspend_channel(int channelHandle); +int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected); +int ipa2_uc_mhi_stop_event_update_channel(int channelHandle); +int ipa2_uc_mhi_print_stats(char *dbg_buff, int size); +int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); +u32 ipa_get_num_pipes(void); +u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys); +struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(enum ipa_smmu_cb_type cb_type); +struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void); +struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void); +struct iommu_domain *ipa_get_uc_smmu_domain(void); +struct iommu_domain *ipa2_get_wlan_smmu_domain(void); +int ipa2_ap_suspend(struct device *dev); +int ipa2_ap_resume(struct device *dev); +struct iommu_domain *ipa2_get_smmu_domain(void); +struct iommu_domain *ipa2_get_uc_smmu_domain(void); +struct device *ipa2_get_dma_dev(void); +int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +void ipa_suspend_apps_pipes(bool suspend); +void ipa_update_repl_threshold(enum ipa_client_type ipa_client); +void ipa_flow_control(enum ipa_client_type ipa_client, bool enable, + uint32_t qmap_id); +int ipa2_restore_suspend_handler(void); +void ipa_sps_irq_control_all(bool enable); +void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client); +void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client); +int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +int ipa2_rx_poll(u32 clnt_hdl, int budget); +void ipa2_recycle_wan_skb(struct sk_buff *skb); +int ipa_ntn_init(void); +int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats); +int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), + void *user_data); +struct device *ipa2_get_pdev(void); +#endif /* _IPA_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c new file mode 100644 index 0000000000000000000000000000000000000000..ad4ffe81cdd17b496c9a3e1a6e78d1e9ca68b1d2 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2016, 2020, The Linux Foundation. All rights reserved. + */ +#include +#include "ipa_i.h" + +#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq" +#define IPA_IRQ_NUM_MAX 32 + +struct ipa_interrupt_info { + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + bool deferred_flag; +}; + +struct ipa_interrupt_work_wrap { + struct work_struct interrupt_work; + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + void *interrupt_data; +}; + +static struct ipa_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX]; +static struct workqueue_struct *ipa_interrupt_wq; +static u32 ipa_ee; + +static void ipa_interrupt_defer(struct work_struct *work); +static DECLARE_WORK(ipa_interrupt_defer_work, ipa_interrupt_defer); + +static int ipa2_irq_mapping[IPA_IRQ_MAX] = { + [IPA_BAD_SNOC_ACCESS_IRQ] = 0, + [IPA_EOT_COAL_IRQ] = 1, + [IPA_UC_IRQ_0] = 2, + [IPA_UC_IRQ_1] = 3, + [IPA_UC_IRQ_2] = 4, + [IPA_UC_IRQ_3] = 5, + [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6, + [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7, + [IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = 8, + [IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = 9, + [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 10, + [IPA_RX_ERR_IRQ] = 11, + [IPA_DEAGGR_ERR_IRQ] = 12, + [IPA_TX_ERR_IRQ] = 13, + [IPA_STEP_MODE_IRQ] = 14, + [IPA_PROC_ERR_IRQ] = 15, + [IPA_TX_SUSPEND_IRQ] = 16, + [IPA_TX_HOLB_DROP_IRQ] = 17, + [IPA_BAM_IDLE_IRQ] = 18, +}; + +static void deferred_interrupt_work(struct work_struct *work) +{ + struct ipa_interrupt_work_wrap *work_data = + container_of(work, + struct ipa_interrupt_work_wrap, + interrupt_work); + IPADBG("call handler from workq...\n"); + work_data->handler(work_data->interrupt, work_data->private_data, + work_data->interrupt_data); + kfree(work_data->interrupt_data); + kfree(work_data); +} + +static bool is_valid_ep(u32 ep_suspend_data) +{ + u32 bmsk = 1; + u32 i = 0; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if ((ep_suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) + return true; + bmsk = bmsk << 1; + } + return false; +} + +static int handle_interrupt(int irq_num, bool isr_context) +{ + struct ipa_interrupt_info interrupt_info; + struct ipa_interrupt_work_wrap *work_data; + u32 suspend_data; + void *interrupt_data = NULL; + struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL; + int res; + + interrupt_info = ipa_interrupt_to_cb[irq_num]; + if (interrupt_info.handler == NULL) { + IPAERR("A callback function wasn't set for interrupt num %d\n", + irq_num); + return -EINVAL; + } + + switch (interrupt_info.interrupt) { + case IPA_TX_SUSPEND_IRQ: + IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n"); + suspend_data = ipa_read_reg(ipa_ctx->mmio, + IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(ipa_ee)); + if (!is_valid_ep(suspend_data)) + return 0; + IPADBG_LOW("get interrupt %d\n", suspend_data); + suspend_interrupt_data = + kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC); + if (!suspend_interrupt_data) { + IPAERR("failed allocating suspend_interrupt_data\n"); + return -ENOMEM; + } + suspend_interrupt_data->endpoints = suspend_data; + interrupt_data = suspend_interrupt_data; + break; + default: + break; + } + + /* Force defer processing if in ISR context. */ + if (interrupt_info.deferred_flag || isr_context) { + work_data = kzalloc(sizeof(struct ipa_interrupt_work_wrap), + GFP_ATOMIC); + if (!work_data) { + IPAERR("failed allocating ipa_interrupt_work_wrap\n"); + res = -ENOMEM; + goto fail_alloc_work; + } + INIT_WORK(&work_data->interrupt_work, deferred_interrupt_work); + work_data->handler = interrupt_info.handler; + work_data->interrupt = interrupt_info.interrupt; + work_data->private_data = interrupt_info.private_data; + work_data->interrupt_data = interrupt_data; + queue_work(ipa_interrupt_wq, &work_data->interrupt_work); + + } else { + interrupt_info.handler(interrupt_info.interrupt, + interrupt_info.private_data, + interrupt_data); + kfree(interrupt_data); + } + + return 0; + +fail_alloc_work: + kfree(interrupt_data); + return res; +} + +static inline bool is_uc_irq(int irq_num) +{ + if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 && + ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3) + return true; + else + return false; +} + +static void ipa_process_interrupts(bool isr_context) +{ + u32 reg; + u32 bmsk; + u32 i = 0; + u32 en; + bool uc_irq; + + en = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee)); + reg = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_STTS_EE_n_ADDR(ipa_ee)); + IPADBG_LOW( + "ISR enter\n isr_ctx = %d EN reg = 0x%x STTS reg = 0x%x\n", + isr_context, en, reg); + while (en & reg) { + bmsk = 1; + for (i = 0; i < IPA_IRQ_NUM_MAX; i++) { + if (!(en & reg & bmsk)) { + bmsk = bmsk << 1; + continue; + } + uc_irq = is_uc_irq(i); + /* + * Clear uC interrupt before processing to avoid + * clearing unhandled interrupts + */ + if (uc_irq) + ipa_write_reg(ipa_ctx->mmio, + IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk); + + /* Process the interrupts */ + handle_interrupt(i, isr_context); + + /* + * Clear non uC interrupt after processing + * to avoid clearing interrupt data + */ + if (!uc_irq) + ipa_write_reg(ipa_ctx->mmio, + IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk); + + bmsk = bmsk << 1; + } + /* + * Check pending interrupts that may have + * been raised since last read + */ + reg = ipa_read_reg(ipa_ctx->mmio, + IPA_IRQ_STTS_EE_n_ADDR(ipa_ee)); + } + IPADBG_LOW("Exit\n"); +} + +static void ipa_interrupt_defer(struct work_struct *work) +{ + IPADBG_LOW("processing interrupts in wq\n"); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_process_interrupts(false); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG_LOW("Done\n"); +} + +static irqreturn_t ipa_isr(int irq, void *ctxt) +{ + unsigned long flags; + + IPADBG_LOW("Enter\n"); + /* defer interrupt handling in case IPA is not clocked on */ + if (ipa_active_clients_trylock(&flags) == 0) { + IPADBG("defer interrupt processing\n"); + queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work); + return IRQ_HANDLED; + } + + if (ipa_ctx->ipa_active_clients.cnt == 0) { + IPADBG("defer interrupt processing\n"); + queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work); + goto bail; + } + + ipa_process_interrupts(true); + IPADBG_LOW("Exit\n"); +bail: + ipa_active_clients_trylock_unlock(&flags); + return IRQ_HANDLED; +} +/** + * ipa2_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ +int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + u32 val; + u32 bmsk; + int irq_num; + + IPADBG_LOW("interrupt_enum(%d)\n", interrupt); + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa2_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + + ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag; + ipa_interrupt_to_cb[irq_num].handler = handler; + ipa_interrupt_to_cb[irq_num].private_data = private_data; + ipa_interrupt_to_cb[irq_num].interrupt = interrupt; + + val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee)); + IPADBG("read IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val); + bmsk = 1 << irq_num; + val |= bmsk; + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val); + IPADBG_LOW("wrote IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val); + return 0; +} + +/** + * ipa2_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ +int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + u32 val; + u32 bmsk; + int irq_num; + + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa2_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + + kfree(ipa_interrupt_to_cb[irq_num].private_data); + ipa_interrupt_to_cb[irq_num].deferred_flag = false; + ipa_interrupt_to_cb[irq_num].handler = NULL; + ipa_interrupt_to_cb[irq_num].private_data = NULL; + ipa_interrupt_to_cb[irq_num].interrupt = -1; + + val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee)); + bmsk = 1 << irq_num; + val &= ~bmsk; + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val); + + return 0; +} + +/** + * ipa_interrupts_init() - Initialize the IPA interrupts framework + * @ipa_irq: The interrupt number to allocate + * @ee: Execution environment + * @ipa_dev: The basic device structure representing the IPA driver + * + * - Initialize the ipa_interrupt_to_cb array + * - Clear interrupts status + * - Register the ipa interrupt handler - ipa_isr + * - Enable apps processor wakeup by IPA interrupts + */ +int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) +{ + int idx; + u32 reg = 0xFFFFFFFF; + int res = 0; + + ipa_ee = ee; + for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) { + ipa_interrupt_to_cb[idx].deferred_flag = false; + ipa_interrupt_to_cb[idx].handler = NULL; + ipa_interrupt_to_cb[idx].private_data = NULL; + ipa_interrupt_to_cb[idx].interrupt = -1; + } + + ipa_interrupt_wq = create_singlethread_workqueue( + INTERRUPT_WORKQUEUE_NAME); + if (!ipa_interrupt_wq) { + IPAERR("workqueue creation failed\n"); + return -ENOMEM; + } + + /*Clearing interrupts status*/ + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg); + + res = request_irq(ipa_irq, (irq_handler_t) ipa_isr, + IRQF_TRIGGER_RISING, "ipa", ipa_dev); + if (res) { + IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq); + return -ENODEV; + } + IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); + + res = enable_irq_wake(ipa_irq); + if (res) + IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n", + ipa_irq, res); + else + IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq); + + return 0; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c new file mode 100644 index 0000000000000000000000000000000000000000..437e212f102f51bd2c31b2072b352d6e20a2f53e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2019, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "ipa_i.h" +#include + +struct ipa_intf { + char name[IPA_RESOURCE_NAME_MAX]; + struct list_head link; + u32 num_tx_props; + u32 num_rx_props; + u32 num_ext_props; + struct ipa_ioc_tx_intf_prop *tx; + struct ipa_ioc_rx_intf_prop *rx; + struct ipa_ioc_ext_intf_prop *ext; + enum ipa_client_type excp_pipe; +}; + +struct ipa_push_msg { + struct ipa_msg_meta meta; + ipa_msg_free_fn callback; + void *buff; + struct list_head link; +}; + +struct ipa_pull_msg { + struct ipa_msg_meta meta; + ipa_msg_pull_fn callback; + struct list_head link; +}; + +/** + * ipa2_register_intf() - register "logical" interface + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * + * Register an interface and its tx and rx properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + return ipa2_register_intf_ext(name, tx, rx, NULL); +} + +/** + * ipa2_register_intf_ext() - register "logical" interface which has only + * extended properties + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * @ext: [in] EXT properties of the interface + * + * Register an interface and its tx, rx and ext properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + struct ipa_intf *intf; + u32 len; + + if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) { + IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name, + tx, rx, ext); + return -EINVAL; + } + + if (tx && tx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (rx && rx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (ext && ext->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + len = sizeof(struct ipa_intf); + intf = kzalloc(len, GFP_KERNEL); + if (intf == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + return -ENOMEM; + } + + strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX); + + if (tx) { + intf->num_tx_props = tx->num_props; + len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop); + intf->tx = kmemdup(tx->prop, len, GFP_KERNEL); + if (intf->tx == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->tx, tx->prop, len); + } + + if (rx) { + intf->num_rx_props = rx->num_props; + len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop); + intf->rx = kmemdup(rx->prop, len, GFP_KERNEL); + if (intf->rx == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->rx, rx->prop, len); + } + + if (ext) { + intf->num_ext_props = ext->num_props; + len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop); + intf->ext = kmemdup(ext->prop, len, GFP_KERNEL); + if (intf->ext == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + kfree(intf->rx); + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->ext, ext->prop, len); + } + + if (ext && ext->excp_pipe_valid) + intf->excp_pipe = ext->excp_pipe; + else + intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS; + + mutex_lock(&ipa_ctx->lock); + list_add_tail(&intf->link, &ipa_ctx->intf_list); + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +/** + * ipa2_deregister_intf() - de-register previously registered logical interface + * @name: [in] interface name + * + * De-register a previously registered interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_deregister_intf(const char *name) +{ + struct ipa_intf *entry; + struct ipa_intf *next; + int result = -EINVAL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (name == NULL) { + IPAERR("invalid param name=%p\n", name); + return result; + } + + mutex_lock(&ipa_ctx->lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, name)) { + list_del(&entry->link); + kfree(entry->ext); + kfree(entry->rx); + kfree(entry->tx); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf() - query logical interface properties + * @lookup: [inout] interface name and number of properties + * + * Obtain the handle and number of tx and rx properties for the named + * interface, used as part of querying the tx and rx properties for + * configuration of various rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf(struct ipa_ioc_query_intf *lookup) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (lookup == NULL) { + IPAERR("invalid param lookup=%p\n", lookup); + return result; + } + + mutex_lock(&ipa_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, lookup->name)) { + lookup->num_tx_props = entry->num_tx_props; + lookup->num_rx_props = entry->num_rx_props; + lookup->num_ext_props = entry->num_ext_props; + lookup->excp_pipe = entry->excp_pipe; + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf_tx_props() - qeury TX props of an interface + * @tx: [inout] interface tx attributes + * + * Obtain the tx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (tx == NULL) { + IPAERR("invalid param tx=%p\n", tx); + return result; + } + + mutex_lock(&ipa_ctx->lock); + tx->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, tx->name)) { + /* add the entry check */ + if (entry->num_tx_props != tx->num_tx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_tx_props, + tx->num_tx_props); + mutex_unlock(&ipa_ctx->lock); + return result; + } + memcpy(tx->tx, entry->tx, entry->num_tx_props * + sizeof(struct ipa_ioc_tx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf_rx_props() - qeury RX props of an interface + * @rx: [inout] interface rx attributes + * + * Obtain the rx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (rx == NULL) { + IPAERR("invalid param rx=%p\n", rx); + return result; + } + + mutex_lock(&ipa_ctx->lock); + rx->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, rx->name)) { + /* add the entry check */ + if (entry->num_rx_props != rx->num_rx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_rx_props, + rx->num_rx_props); + mutex_unlock(&ipa_ctx->lock); + return result; + } + memcpy(rx->rx, entry->rx, entry->num_rx_props * + sizeof(struct ipa_ioc_rx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf_ext_props() - qeury EXT props of an interface + * @ext: [inout] interface ext attributes + * + * Obtain the ext properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (ext == NULL) { + IPAERR("invalid param ext=%p\n", ext); + return result; + } + + mutex_lock(&ipa_ctx->lock); + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, ext->name)) { + /* add the entry check */ + if (entry->num_ext_props != ext->num_ext_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_ext_props, + ext->num_ext_props); + mutex_unlock(&ipa_ctx->lock); + return result; + } + memcpy(ext->ext, entry->ext, entry->num_ext_props * + sizeof(struct ipa_ioc_ext_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +static void ipa2_send_msg_free(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff) +{ + struct ipa_push_msg *msg_dup; + struct ipa_wlan_msg_ex *event_ex_cur_con = NULL; + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_wlan_msg *event_ex_cur_discon = NULL; + void *data_dup = NULL; + struct ipa_push_msg *entry; + struct ipa_push_msg *next; + int cnt = 0, total = 0, max = 0; + uint8_t mac[IPA_MAC_ADDR_SIZE]; + uint8_t mac2[IPA_MAC_ADDR_SIZE]; + + if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) { + /* debug print */ + event_ex_cur_con = buff; + for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) { + if (event_ex_cur_con->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n", + event_ex_cur_con->attribs[cnt].u.mac_addr[0], + event_ex_cur_con->attribs[cnt].u.mac_addr[1], + event_ex_cur_con->attribs[cnt].u.mac_addr[2], + event_ex_cur_con->attribs[cnt].u.mac_addr[3], + event_ex_cur_con->attribs[cnt].u.mac_addr[4], + event_ex_cur_con->attribs[cnt].u.mac_addr[5], + meta->msg_type); + } + } + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + msg_dup = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg_dup == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg_dup->meta = *meta; + if (meta->msg_len > 0 && buff) { + data_dup = kmemdup(buff, meta->msg_len, GFP_KERNEL); + if (data_dup == NULL) { + IPAERR("fail to alloc data_dup container\n"); + kfree(msg_dup); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data_dup, buff, meta->msg_len); + msg_dup->buff = data_dup; + msg_dup->callback = ipa2_send_msg_free; + } + list_add_tail(&msg_dup->link, &ipa_ctx->msg_wlan_client_list); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + } + + /* remove the cache */ + if (meta->msg_type == WLAN_CLIENT_DISCONNECT) { + /* debug print */ + event_ex_cur_discon = buff; + IPADBG("Mac %02x:%02x:%02x:%02x:%02x:%02x,msg %d\n", + event_ex_cur_discon->mac_addr[0], + event_ex_cur_discon->mac_addr[1], + event_ex_cur_discon->mac_addr[2], + event_ex_cur_discon->mac_addr[3], + event_ex_cur_discon->mac_addr[4], + event_ex_cur_discon->mac_addr[5], + meta->msg_type); + memcpy(mac2, + event_ex_cur_discon->mac_addr, + sizeof(mac2)); + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, + &ipa_ctx->msg_wlan_client_list, + link) { + event_ex_list = entry->buff; + max = event_ex_list->num_of_attribs; + for (cnt = 0; cnt < max; cnt++) { + memcpy(mac, + event_ex_list->attribs[cnt].u.mac_addr, + sizeof(mac)); + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + pr_debug("%02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]); + + /* compare to delete one*/ + if (memcmp(mac2, + mac, + sizeof(mac)) == 0) { + IPADBG("clean %d\n", total); + list_del(&entry->link); + kfree(entry); + break; + } + } + } + total++; + } + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + } + return 0; +} + +/** + * ipa2_send_msg() - Send "message" from kernel client to IPA driver + * @meta: [in] message meta-data + * @buff: [in] the payload for message + * @callback: [in] free callback + * + * Client supplies the message meta-data and payload which IPA driver buffers + * till read by user-space. After read from user space IPA driver invokes the + * callback supplied to free the message payload. Client must not touch/free + * the message payload after calling this API. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + struct ipa_push_msg *msg; + void *data = NULL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (meta == NULL || (buff == NULL && callback != NULL) || + (buff != NULL && callback == NULL)) { + IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n", + meta, buff, callback); + return -EINVAL; + } + + if (meta->msg_type >= IPA_EVENT_MAX_NUM) { + IPAERR_RL("unsupported message type %d\n", meta->msg_type); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + return -ENOMEM; + } + + msg->meta = *meta; + if (meta->msg_len > 0 && buff) { + data = kmemdup(buff, meta->msg_len, GFP_KERNEL); + if (data == NULL) { + IPAERR("fail to alloc data container\n"); + kfree(msg); + return -ENOMEM; + } + msg->buff = data; + msg->callback = ipa2_send_msg_free; + } + + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->msg_list); + /* support for softap client event cache */ + if (wlan_msg_process(meta, buff)) + IPAERR("wlan_msg_process failed\n"); + + /* unlock only after process */ + mutex_unlock(&ipa_ctx->msg_lock); + IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]); + + wake_up(&ipa_ctx->msg_waitq); + if (buff) + callback(buff, meta->msg_len, meta->msg_type); + + return 0; +} + +/** + * ipa2_resend_wlan_msg() - Resend cached "message" to IPACM + * + * resend wlan client connect events to user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_resend_wlan_msg(void) +{ + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_push_msg *entry; + struct ipa_push_msg *next; + int cnt = 0, total = 0; + struct ipa_push_msg *msg; + void *data = NULL; + + IPADBG("\n"); + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->msg_wlan_client_list, + link) { + + event_ex_list = entry->buff; + for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) { + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%d-Mac %02x:%02x:%02x:%02x:%02x:%02x\n", + total, + event_ex_list->attribs[cnt].u.mac_addr[0], + event_ex_list->attribs[cnt].u.mac_addr[1], + event_ex_list->attribs[cnt].u.mac_addr[2], + event_ex_list->attribs[cnt].u.mac_addr[3], + event_ex_list->attribs[cnt].u.mac_addr[4], + event_ex_list->attribs[cnt].u.mac_addr[5]); + } + } + + msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->meta = entry->meta; + data = kmemdup(entry->buff, entry->meta.msg_len, GFP_KERNEL); + if (data == NULL) { + IPAERR("fail to alloc data container\n"); + kfree(msg); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->buff = data; + msg->callback = ipa2_send_msg_free; + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->msg_list); + mutex_unlock(&ipa_ctx->msg_lock); + wake_up(&ipa_ctx->msg_waitq); + + total++; + } + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return 0; +} + +/** + * ipa2_register_pull_msg() - register pull message type + * @meta: [in] message meta-data + * @callback: [in] pull callback + * + * Register message callback by kernel client with IPA driver for IPA driver to + * pull message on-demand. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback) +{ + struct ipa_pull_msg *msg; + + if (meta == NULL || callback == NULL) { + IPAERR("invalid param meta=%p callback=%p\n", meta, callback); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa_pull_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + return -ENOMEM; + } + + msg->meta = *meta; + msg->callback = callback; + + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->pull_msg_list); + mutex_unlock(&ipa_ctx->msg_lock); + + return 0; +} + +/** + * ipa2_deregister_pull_msg() - De-register pull message type + * @meta: [in] message meta-data + * + * De-register "message" by kernel client from IPA driver + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + struct ipa_pull_msg *entry; + struct ipa_pull_msg *next; + int result = -EINVAL; + + if (meta == NULL) { + IPAERR("invalid param name=%p\n", meta); + return result; + } + + mutex_lock(&ipa_ctx->msg_lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + list_del(&entry->link); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->msg_lock); + return result; +} + +/** + * ipa_read() - read message from IPA device + * @filp: [in] file pointer + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * @f_pos: [inout] file position + * + * Uer-space should continually read from /dev/ipa, read wll block when there + * are no messages to read. Upon return, user-space should read the ipa_msg_meta + * from the start of the buffer to know what type of message was read and its + * length in the remainder of the buffer. Buffer supplied must be big enough to + * hold the message meta-data and the largest defined message type + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + char __user *start; + struct ipa_push_msg *msg = NULL; + int ret; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int locked; + + start = buf; + + add_wait_queue(&ipa_ctx->msg_waitq, &wait); + while (1) { + mutex_lock(&ipa_ctx->msg_lock); + locked = 1; + if (!list_empty(&ipa_ctx->msg_list)) { + msg = list_first_entry(&ipa_ctx->msg_list, + struct ipa_push_msg, link); + list_del(&msg->link); + } + + if (msg) { + IPADBG("msg=%pK\n", msg); + locked = 0; + mutex_unlock(&ipa_ctx->msg_lock); + if (count < sizeof(struct ipa_msg_meta)) { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + if (copy_to_user(buf, &msg->meta, + sizeof(struct ipa_msg_meta))) { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + buf += sizeof(struct ipa_msg_meta); + count -= sizeof(struct ipa_msg_meta); + if (msg->buff) { + if (count >= msg->meta.msg_len) { + if (copy_to_user(buf, msg->buff, + msg->meta.msg_len)) { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + } else { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + buf += msg->meta.msg_len; + count -= msg->meta.msg_len; + msg->callback(msg->buff, msg->meta.msg_len, + msg->meta.msg_type); + } + IPA_STATS_INC_CNT( + ipa_ctx->stats.msg_r[msg->meta.msg_type]); + kfree(msg); + msg = NULL; + } + + ret = -EAGAIN; + if (filp->f_flags & O_NONBLOCK) + break; + + ret = -EINTR; + if (signal_pending(current)) + break; + + if (start != buf) + break; + + locked = 0; + mutex_unlock(&ipa_ctx->msg_lock); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + + remove_wait_queue(&ipa_ctx->msg_waitq, &wait); + if (start != buf && ret != -EFAULT) + ret = buf - start; + + if (locked) + mutex_unlock(&ipa_ctx->msg_lock); + + return ret; +} + +/** + * ipa_pull_msg() - pull the specified message from client + * @meta: [in] message meta-data + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * + * Populate the supplied buffer with the pull message which is fetched + * from client, the message must have previously been registered with + * the IPA driver + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count) +{ + struct ipa_pull_msg *entry; + int result = -EINVAL; + + if (meta == NULL || buff == NULL || !count) { + IPAERR_RL("invalid param name=%p buff=%p count=%zu\n", + meta, buff, count); + return result; + } + + mutex_lock(&ipa_ctx->msg_lock); + list_for_each_entry(entry, &ipa_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + result = entry->callback(buff, count, meta->msg_type); + break; + } + } + mutex_unlock(&ipa_ctx->msg_lock); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c new file mode 100644 index 0000000000000000000000000000000000000000..72f4b20069f49b535e0a71357bb76876d8ed2e23 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_qmi_service.h" + +#define IPA_MHI_DRV_NAME "ipa_mhi" +#define IPA_MHI_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_ERR(fmt, args...) \ + do { \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_FUNC_ENTRY() \ + IPA_MHI_DBG_LOW("ENTRY\n") +#define IPA_MHI_FUNC_EXIT() \ + IPA_MHI_DBG_LOW("EXIT\n") + + +bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client) +{ + u32 pipe_idx; + bool pending; + + pipe_idx = ipa2_get_ep_mapping(client); + if (sps_pipe_pending_desc(ipa_ctx->bam_handle, + pipe_idx, &pending)) { + IPA_MHI_ERR("sps_pipe_pending_desc failed\n"); + WARN_ON(1); + return false; + } + + return !pending; +} + +int ipa2_disable_sps_pipe(enum ipa_client_type client) +{ + int ipa_ep_index; + int res; + + ipa_ep_index = ipa2_get_ep_mapping(client); + + res = sps_pipe_disable(ipa_ctx->bam_handle, ipa_ep_index); + if (res) { + IPA_MHI_ERR("sps_pipe_disable fail %d\n", res); + return res; + } + + return 0; +} + +int ipa2_mhi_reset_channel_internal(enum ipa_client_type client) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_disable_data_path(ipa2_get_ep_mapping(client)); + if (res) { + IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +int ipa2_mhi_start_channel_internal(enum ipa_client_type client) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_enable_data_path(ipa2_get_ep_mapping(client)); + if (res) { + IPA_MHI_ERR("ipa_enable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if (ipa2_uc_state_check()) { + IPA_MHI_ERR("IPA uc is not loaded\n"); + return -EAGAIN; + } + + /* Initialize IPA MHI engine */ + res = ipa_uc_mhi_init_engine(params->uC.msi, params->uC.mmio_addr, + params->uC.host_ctrl_addr, params->uC.host_data_addr, + params->uC.first_ch_idx, params->uC.first_er_idx); + if (res) { + IPA_MHI_ERR("failed to start MHI engine %d\n", res); + goto fail_init_engine; + } + + /* Update UL/DL sync if valid */ + res = ipa2_uc_mhi_send_dl_ul_sync_info( + params->uC.ipa_cached_dl_ul_sync_info); + if (res) { + IPA_MHI_ERR("failed to update ul/dl sync %d\n", res); + goto fail_init_engine; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_init_engine: + return res; +} + +/** + * ipa2_connect_mhi_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * This function is doing the following: + * - Send command to uC to start corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + int res; + + IPA_MHI_FUNC_ENTRY(); + + if (!in || !clnt_hdl) { + IPA_MHI_ERR("NULL args\n"); + return -EINVAL; + } + + if (in->sys->client >= IPA_CLIENT_MAX) { + IPA_MHI_ERR("bad parm client:%d\n", in->sys->client); + return -EINVAL; + } + + ipa_ep_idx = ipa2_get_ep_mapping(in->sys->client); + if (ipa_ep_idx == -1) { + IPA_MHI_ERR("Invalid client.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n", + in->sys->client, in->start.uC.index, in->start.uC.id); + + if (ep->valid == 1) { + IPA_MHI_ERR("EP already allocated.\n"); + goto fail_ep_exists; + } + + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); + ep->valid = 1; + ep->skip_ep_cfg = in->sys->skip_ep_cfg; + ep->client = in->sys->client; + ep->client_notify = in->sys->notify; + ep->priv = in->sys->priv; + ep->keep_ipa_awake = in->sys->keep_ipa_awake; + + /* start channel in uC */ + if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_INVALID) { + IPA_MHI_DBG("Initializing channel\n"); + res = ipa_uc_mhi_init_channel(ipa_ep_idx, in->start.uC.index, + in->start.uC.id, + (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2)); + if (res) { + IPA_MHI_ERR("init_channel failed %d\n", res); + goto fail_init_channel; + } + } else if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_MHI_DBG("Starting channel\n"); + res = ipa_uc_mhi_resume_channel(in->start.uC.index, false); + if (res) { + IPA_MHI_ERR("init_channel failed %d\n", res); + goto fail_init_channel; + } + } else { + IPA_MHI_ERR("Invalid channel state %d\n", in->start.uC.state); + goto fail_init_channel; + } + + res = ipa_enable_data_path(ipa_ep_idx); + if (res) { + IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res, + ipa_ep_idx); + goto fail_enable_dp; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_ep_cfg; + } + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_ep_cfg; + } + IPA_MHI_DBG("ep configuration successful\n"); + } else { + IPA_MHI_DBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys->client)) + ipa_install_dflt_flt_rules(ipa_ep_idx); + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys->client, + ipa_ep_idx); + + IPA_MHI_FUNC_EXIT(); + + return 0; + +fail_ep_cfg: + ipa_disable_data_path(ipa_ep_idx); +fail_enable_dp: + ipa_uc_mhi_reset_channel(in->start.uC.index); +fail_init_channel: + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); +fail_ep_exists: + return -EPERM; +} + +/** + * ipa2_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa2_disconnect_mhi_pipe(u32 clnt_hdl) +{ + IPA_MHI_FUNC_ENTRY(); + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes) { + IPAERR("invalid handle %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("pipe was not connected %d\n", clnt_hdl); + return -EINVAL; + } + + ipa_ctx->ep[clnt_hdl].valid = 0; + + ipa_delete_dflt_flt_rules(clnt_hdl); + + IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa2_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected); + if (res) { + IPA_MHI_ERR("failed to suspend channel %u error %d\n", + index, res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI driver"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c new file mode 100644 index 0000000000000000000000000000000000000000..eafec59782436d798c5bae1e7a5efda155a2fbd9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c @@ -0,0 +1,877 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define IPA_NAT_PHYS_MEM_OFFSET 0 +#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE + +#define IPA_NAT_SYSTEM_MEMORY 0 +#define IPA_NAT_SHARED_MEMORY 1 +#define IPA_NAT_TEMP_MEM_SIZE 128 + +enum nat_table_type { + IPA_NAT_BASE_TBL = 0, + IPA_NAT_EXPN_TBL = 1, + IPA_NAT_INDX_TBL = 2, + IPA_NAT_INDEX_EXPN_TBL = 3, +}; + +#define NAT_TABLE_ENTRY_SIZE_BYTE 32 +#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4 + +/* + * Max NAT table entries is limited 1000 entries. + * Limit the memory size required by user to prevent kernel memory starvation + */ +#define IPA_TABLE_MAX_ENTRIES 1000 +#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE) + +static int ipa_nat_vma_fault_remap(struct vm_fault *vmf) +{ + IPADBG("\n"); + vmf->page = NULL; + + return VM_FAULT_SIGBUS; +} + +/* VMA related file operations functions */ +static const struct vm_operations_struct ipa_nat_remap_vm_ops = { + .fault = ipa_nat_vma_fault_remap, +}; + +static int ipa_nat_open(struct inode *inode, struct file *filp) +{ + struct ipa_nat_mem *nat_ctx; + + IPADBG("\n"); + nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev); + filp->private_data = nat_ctx; + IPADBG("return\n"); + + return 0; +} + +static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data; + unsigned long phys_addr; + int result; + + mutex_lock(&nat_ctx->lock); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (nat_ctx->is_sys_mem) { + IPADBG("Mapping system memory\n"); + if (nat_ctx->is_mapped) { + IPAERR("mapping already exists, only 1 supported\n"); + result = -EINVAL; + goto bail; + } + IPADBG("map sz=0x%zx\n", nat_ctx->size); + result = + dma_mmap_coherent( + ipa_ctx->pdev, vma, + nat_ctx->vaddr, nat_ctx->dma_handle, + nat_ctx->size); + + if (result) { + IPAERR("unable to map memory. Err:%d\n", result); + goto bail; + } + ipa_ctx->nat_mem.nat_base_address = nat_ctx->vaddr; + } else { + IPADBG("Mapping shared(local) memory\n"); + IPADBG("map sz=0x%lx\n", vsize); + + if ((IPA_NAT_PHYS_MEM_SIZE == 0) || + (vsize > IPA_NAT_PHYS_MEM_SIZE)) { + result = -EINVAL; + goto bail; + } + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST(IPA_NAT_PHYS_MEM_OFFSET); + + if (remap_pfn_range( + vma, vma->vm_start, + phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) { + IPAERR("remap failed\n"); + result = -EAGAIN; + goto bail; + } + ipa_ctx->nat_mem.nat_base_address = (void *)vma->vm_start; + } + nat_ctx->is_mapped = true; + vma->vm_ops = &ipa_nat_remap_vm_ops; + IPADBG("return\n"); + result = 0; +bail: + mutex_unlock(&nat_ctx->lock); + return result; +} + +static const struct file_operations ipa_nat_fops = { + .owner = THIS_MODULE, + .open = ipa_nat_open, + .mmap = ipa_nat_mmap +}; + +/** + * allocate_temp_nat_memory() - Allocates temp nat memory + * + * Called during nat table delete + */ +static void allocate_temp_nat_memory(void) +{ + struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + + nat_ctx->tmp_vaddr = + dma_alloc_coherent(ipa_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE, + &nat_ctx->tmp_dma_handle, gfp_flags); + + if (nat_ctx->tmp_vaddr == NULL) { + IPAERR("Temp Memory alloc failed\n"); + nat_ctx->is_tmp_mem = false; + return; + } + + nat_ctx->is_tmp_mem = true; + IPADBG("IPA NAT allocated temp memory successfully\n"); +} + +/** + * create_nat_device() - Create the NAT device + * + * Called during ipa init to create nat device + * + * Returns: 0 on success, negative on failure + */ +int create_nat_device(void) +{ + struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); + int result; + + IPADBG("\n"); + + mutex_lock(&nat_ctx->lock); + nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME); + if (IS_ERR(nat_ctx->class)) { + IPAERR("unable to create the class\n"); + result = -ENODEV; + goto vaddr_alloc_fail; + } + result = alloc_chrdev_region(&nat_ctx->dev_num, + 0, + 1, + NAT_DEV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err.\n"); + result = -ENODEV; + goto alloc_chrdev_region_fail; + } + + nat_ctx->dev = + device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx, + "%s", NAT_DEV_NAME); + + if (IS_ERR(nat_ctx->dev)) { + IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev)); + result = -ENODEV; + goto device_create_fail; + } + + cdev_init(&nat_ctx->cdev, &ipa_nat_fops); + nat_ctx->cdev.owner = THIS_MODULE; + nat_ctx->cdev.ops = &ipa_nat_fops; + + result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1); + if (result) { + IPAERR("cdev_add err=%d\n", -result); + goto cdev_add_fail; + } + IPADBG("ipa nat dev added successful. major:%d minor:%d\n", + MAJOR(nat_ctx->dev_num), + MINOR(nat_ctx->dev_num)); + + nat_ctx->is_dev = true; + allocate_temp_nat_memory(); + IPADBG("IPA NAT device created successfully\n"); + result = 0; + goto bail; + +cdev_add_fail: + device_destroy(nat_ctx->class, nat_ctx->dev_num); +device_create_fail: + unregister_chrdev_region(nat_ctx->dev_num, 1); +alloc_chrdev_region_fail: + class_destroy(nat_ctx->class); +vaddr_alloc_fail: + if (nat_ctx->vaddr) { + IPADBG("Releasing system memory\n"); + dma_free_coherent( + ipa_ctx->pdev, nat_ctx->size, + nat_ctx->vaddr, nat_ctx->dma_handle); + nat_ctx->vaddr = NULL; + nat_ctx->dma_handle = 0; + nat_ctx->size = 0; + } + +bail: + mutex_unlock(&nat_ctx->lock); + + return result; +} + +/** + * ipa2_allocate_nat_device() - Allocates memory for the NAT device + * @mem: [in/out] memory parameters + * + * Called by NAT client driver to allocate memory for the NAT entries. Based on + * the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + int result; + + IPADBG("passed memory size %zu\n", mem->size); + + mutex_lock(&nat_ctx->lock); + if (strcmp(mem->dev_name, NAT_DEV_NAME)) { + IPAERR_RL("Nat device name mismatch\n"); + IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name); + result = -EPERM; + goto bail; + } + + if (!nat_ctx->is_dev) { + IPAERR("Nat device not created successfully during boot up\n"); + result = -EPERM; + goto bail; + } + + if (nat_ctx->is_dev_init) { + IPAERR("Device already init\n"); + result = 0; + goto bail; + } + + if (mem->size > MAX_ALLOC_NAT_SIZE) { + IPAERR("Trying allocate more size = %zu, Max allowed = %d\n", + mem->size, MAX_ALLOC_NAT_SIZE); + result = -EPERM; + goto bail; + } + + if (mem->size <= 0 || + nat_ctx->is_dev_init) { + IPAERR_RL("Invalid Parameters or device is already init\n"); + result = -EPERM; + goto bail; + } + + if (mem->size > IPA_NAT_PHYS_MEM_SIZE) { + IPADBG("Allocating system memory\n"); + nat_ctx->is_sys_mem = true; + nat_ctx->vaddr = + dma_alloc_coherent(ipa_ctx->pdev, mem->size, + &nat_ctx->dma_handle, gfp_flags); + if (nat_ctx->vaddr == NULL) { + IPAERR("memory alloc failed\n"); + result = -ENOMEM; + goto bail; + } + nat_ctx->size = mem->size; + } else { + IPADBG("using shared(local) memory\n"); + nat_ctx->is_sys_mem = false; + } + + nat_ctx->is_dev_init = true; + IPADBG("IPA NAT dev init successfully\n"); + result = 0; + +bail: + mutex_unlock(&nat_ctx->lock); + + return result; +} + +/* IOCTL function handlers */ +/** + * ipa2_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ +#define TBL_ENTRY_SIZE 32 +#define INDX_TBL_ENTRY_SIZE 4 + + struct ipa_register_write *reg_write_nop; + struct ipa_desc desc[2]; + struct ipa_ip_v4_nat_init *cmd; + u16 size = sizeof(struct ipa_ip_v4_nat_init); + int result; + u32 offset = 0; + size_t tmp; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mutex_lock(&ipa_ctx->nat_mem.lock); + + if (!ipa_ctx->nat_mem.is_dev_init) { + IPAERR_RL("Nat table not initialized\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + IPADBG("\n"); + if (init->table_entries == 0) { + IPADBG("Table entries is zero\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->ipv4_rules_offset > + (UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Table Entry offset is not + * beyond allocated size + */ + tmp = init->ipv4_rules_offset + + (TBL_ENTRY_SIZE * (init->table_entries + 1)); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->ipv4_rules_offset, (init->table_entries + 1), + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->expn_rules_offset > + UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Expn Table Entry offset is not + * beyond allocated size + */ + tmp = init->expn_rules_offset + + (TBL_ENTRY_SIZE * init->expn_table_entries); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Expn Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->expn_rules_offset, init->expn_table_entries, + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->index_offset > + UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Indx Table Entry offset is not + * beyond allocated size + */ + tmp = init->index_offset + + (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1)); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Indx Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->index_offset, (init->table_entries + 1), + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->index_expn_offset > + (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Expn Table entry offset is not + * beyond allocated size + */ + tmp = init->index_expn_offset + + (INDX_TBL_ENTRY_SIZE * init->expn_table_entries); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Indx Expn Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->index_expn_offset, init->expn_table_entries, + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + memset(&desc, 0, sizeof(desc)); + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + desc[0].opcode = IPA_REGISTER_WRITE; + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].pyld = (void *)reg_write_nop; + desc[0].len = sizeof(*reg_write_nop); + + cmd = kmalloc(size, flag); + if (!cmd) { + IPAERR("Failed to alloc immediate command object\n"); + result = -ENOMEM; + goto free_nop; + } + if (ipa_ctx->nat_mem.vaddr) { + IPADBG("using system memory for nat table\n"); + cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY; + cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY; + cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY; + cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY; + + offset = UINT_MAX - ipa_ctx->nat_mem.dma_handle; + + if ((init->ipv4_rules_offset > offset) || + (init->expn_rules_offset > offset) || + (init->index_offset > offset) || + (init->index_expn_offset > offset)) { + IPAERR_RL("Failed due to integer overflow\n"); + IPAERR_RL("nat.mem.dma_handle: 0x%pa\n", + &ipa_ctx->nat_mem.dma_handle); + IPAERR_RL("ipv4_rules_offset: 0x%x\n", + init->ipv4_rules_offset); + IPAERR_RL("expn_rules_offset: 0x%x\n", + init->expn_rules_offset); + IPAERR_RL("index_offset: 0x%x\n", + init->index_offset); + IPAERR_RL("index_expn_offset: 0x%x\n", + init->index_expn_offset); + result = -EPERM; + goto free_mem; + } + cmd->ipv4_rules_addr = + ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset; + IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset); + + cmd->ipv4_expansion_rules_addr = + ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset; + IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset); + + cmd->index_table_addr = + ipa_ctx->nat_mem.dma_handle + init->index_offset; + IPADBG("index_offset:0x%x\n", init->index_offset); + + cmd->index_table_expansion_addr = + ipa_ctx->nat_mem.dma_handle + init->index_expn_offset; + IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset); + } else { + IPADBG("using shared(local) memory for nat table\n"); + cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY; + cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY; + cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY; + cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY; + + cmd->ipv4_rules_addr = init->ipv4_rules_offset + + IPA_RAM_NAT_OFST; + + cmd->ipv4_expansion_rules_addr = init->expn_rules_offset + + IPA_RAM_NAT_OFST; + + cmd->index_table_addr = init->index_offset + + IPA_RAM_NAT_OFST; + + cmd->index_table_expansion_addr = init->index_expn_offset + + IPA_RAM_NAT_OFST; + } + cmd->table_index = init->tbl_index; + IPADBG("Table index:0x%x\n", cmd->table_index); + cmd->size_base_tables = init->table_entries; + IPADBG("Base Table size:0x%x\n", cmd->size_base_tables); + cmd->size_expansion_tables = init->expn_table_entries; + IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables); + cmd->public_ip_addr = init->ip_addr; + IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr); + desc[1].opcode = IPA_IP_V4_NAT_INIT; + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].pyld = (void *)cmd; + desc[1].len = size; + IPADBG("posting v4 init command\n"); + if (ipa_send_cmd(2, desc)) { + IPAERR_RL("Fail to send immediate command\n"); + result = -EPERM; + goto free_mem; + } + + ipa_ctx->nat_mem.public_ip_addr = init->ip_addr; + IPADBG("Table ip address:0x%x", ipa_ctx->nat_mem.public_ip_addr); + + ipa_ctx->nat_mem.ipv4_rules_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset; + IPADBG("ipv4_rules_addr: 0x%p\n", + ipa_ctx->nat_mem.ipv4_rules_addr); + + ipa_ctx->nat_mem.ipv4_expansion_rules_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->expn_rules_offset; + IPADBG("ipv4_expansion_rules_addr: 0x%p\n", + ipa_ctx->nat_mem.ipv4_expansion_rules_addr); + + ipa_ctx->nat_mem.index_table_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->index_offset; + IPADBG("index_table_addr: 0x%p\n", + ipa_ctx->nat_mem.index_table_addr); + + ipa_ctx->nat_mem.index_table_expansion_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->index_expn_offset; + IPADBG("index_table_expansion_addr: 0x%p\n", + ipa_ctx->nat_mem.index_table_expansion_addr); + + IPADBG("size_base_tables: %d\n", init->table_entries); + ipa_ctx->nat_mem.size_base_tables = init->table_entries; + + IPADBG("size_expansion_tables: %d\n", init->expn_table_entries); + ipa_ctx->nat_mem.size_expansion_tables = init->expn_table_entries; + + IPADBG("return\n"); + result = 0; +free_mem: + kfree(cmd); +free_nop: + kfree(reg_write_nop); +bail: + mutex_unlock(&ipa_ctx->nat_mem.lock); + return result; +} + +/** + * ipa2_nat_dma_cmd() - Post NAT_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT client driver to post NAT_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ +#define NUM_OF_DESC 2 + + struct ipa_register_write *reg_write_nop = NULL; + struct ipa_nat_dma *cmd = NULL; + struct ipa_desc *desc = NULL; + u16 size = 0, cnt = 0; + int ret = 0; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (!ipa_ctx->nat_mem.is_dev_init) { + IPAERR_RL("Nat table not initialized\n"); + return -EPERM; + } + + IPADBG("\n"); + if (dma->entries <= 0) { + IPAERR_RL("Invalid number of commands %d\n", + dma->entries); + ret = -EPERM; + goto bail; + } + + for (cnt = 0; cnt < dma->entries; cnt++) { + if (dma->dma[cnt].table_index >= 1) { + IPAERR_RL("Invalid table index %d\n", + dma->dma[cnt].table_index); + ret = -EPERM; + goto bail; + } + + switch (dma->dma[cnt].base_addr) { + case IPA_NAT_BASE_TBL: + if (dma->dma[cnt].offset >= + (ipa_ctx->nat_mem.size_base_tables + 1) * + NAT_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_EXPN_TBL: + if (dma->dma[cnt].offset >= + ipa_ctx->nat_mem.size_expansion_tables * + NAT_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_INDX_TBL: + if (dma->dma[cnt].offset >= + (ipa_ctx->nat_mem.size_base_tables + 1) * + NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_INDEX_EXPN_TBL: + if (dma->dma[cnt].offset >= + ipa_ctx->nat_mem.size_expansion_tables * + NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + default: + IPAERR_RL("Invalid base_addr %d\n", + dma->dma[cnt].base_addr); + ret = -EPERM; + goto bail; + } + } + + size = sizeof(struct ipa_desc) * NUM_OF_DESC; + desc = kzalloc(size, GFP_KERNEL); + if (desc == NULL) { + IPAERR("Failed to alloc memory\n"); + ret = -ENOMEM; + goto bail; + } + + size = sizeof(struct ipa_nat_dma); + cmd = kzalloc(size, flag); + if (cmd == NULL) { + IPAERR("Failed to alloc memory\n"); + ret = -ENOMEM; + goto bail; + } + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("Failed to alloc memory\n"); + ret = -ENOMEM; + goto bail; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].opcode = IPA_REGISTER_WRITE; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].len = sizeof(*reg_write_nop); + desc[0].pyld = (void *)reg_write_nop; + + for (cnt = 0; cnt < dma->entries; cnt++) { + cmd->table_index = dma->dma[cnt].table_index; + cmd->base_addr = dma->dma[cnt].base_addr; + cmd->offset = dma->dma[cnt].offset; + cmd->data = dma->dma[cnt].data; + + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].opcode = IPA_NAT_DMA; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].len = sizeof(struct ipa_nat_dma); + desc[1].pyld = (void *)cmd; + + ret = ipa_send_cmd(NUM_OF_DESC, desc); + if (ret == -EPERM) + IPAERR("Fail to send immediate command %d\n", cnt); + } + +bail: + kfree(cmd); + + kfree(desc); + + kfree(reg_write_nop); + + return ret; +} + +/** + * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device + * @nat_ctx: [in] the IPA NAT memory to free + * + * Called by NAT client driver to free the NAT memory and remove the device + */ +static void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx) +{ + IPADBG("\n"); + mutex_lock(&nat_ctx->lock); + + if (nat_ctx->is_sys_mem) { + IPADBG("freeing the dma memory\n"); + dma_free_coherent( + ipa_ctx->pdev, nat_ctx->size, + nat_ctx->vaddr, nat_ctx->dma_handle); + nat_ctx->size = 0; + nat_ctx->vaddr = NULL; + } + nat_ctx->is_mapped = false; + nat_ctx->is_sys_mem = false; + nat_ctx->is_dev_init = false; + + mutex_unlock(&nat_ctx->lock); + IPADBG("return\n"); +} + +/** + * ipa2_nat_del_cmd() - Delete a NAT table + * @del: [in] delete table table table parameters + * + * Called by NAT client driver to delete the nat table + * + * Returns: 0 on success, negative on failure + */ +int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + struct ipa_register_write *reg_write_nop; + struct ipa_desc desc[2]; + struct ipa_ip_v4_nat_init *cmd; + u16 size = sizeof(struct ipa_ip_v4_nat_init); + u8 mem_type = IPA_NAT_SHARED_MEMORY; + u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET; + int result; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (!ipa_ctx->nat_mem.is_dev_init) { + IPAERR_RL("Nat table not initialized\n"); + return -EPERM; + } + + if (!ipa_ctx->nat_mem.public_ip_addr) { + IPAERR_RL("Public IP addr not assigned and trying to delete\n"); + return -EPERM; + } + + IPADBG("\n"); + if (ipa_ctx->nat_mem.is_tmp_mem) { + IPAERR("using temp memory during nat del\n"); + mem_type = IPA_NAT_SYSTEM_MEMORY; + base_addr = ipa_ctx->nat_mem.tmp_dma_handle; + } + + memset(&desc, 0, sizeof(desc)); + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + desc[0].opcode = IPA_REGISTER_WRITE; + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].pyld = (void *)reg_write_nop; + desc[0].len = sizeof(*reg_write_nop); + + cmd = kmalloc(size, flag); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + result = -ENOMEM; + goto free_nop; + } + cmd->table_index = del->table_index; + cmd->ipv4_rules_addr = base_addr; + cmd->ipv4_rules_addr_type = mem_type; + cmd->ipv4_expansion_rules_addr = base_addr; + cmd->ipv4_expansion_rules_addr_type = mem_type; + cmd->index_table_addr = base_addr; + cmd->index_table_addr_type = mem_type; + cmd->index_table_expansion_addr = base_addr; + cmd->index_table_expansion_addr_type = mem_type; + cmd->size_base_tables = 0; + cmd->size_expansion_tables = 0; + cmd->public_ip_addr = 0; + + desc[1].opcode = IPA_IP_V4_NAT_INIT; + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].pyld = (void *)cmd; + desc[1].len = size; + if (ipa_send_cmd(2, desc)) { + IPAERR("Fail to send immediate command\n"); + result = -EPERM; + goto free_mem; + } + + ipa_ctx->nat_mem.size_base_tables = 0; + ipa_ctx->nat_mem.size_expansion_tables = 0; + ipa_ctx->nat_mem.public_ip_addr = 0; + ipa_ctx->nat_mem.ipv4_rules_addr = NULL; + ipa_ctx->nat_mem.ipv4_expansion_rules_addr = NULL; + ipa_ctx->nat_mem.index_table_addr = NULL; + ipa_ctx->nat_mem.index_table_expansion_addr = NULL; + + ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem); + IPADBG("return\n"); + result = 0; +free_mem: + kfree(cmd); +free_nop: + kfree(reg_write_nop); +bail: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c new file mode 100644 index 0000000000000000000000000000000000000000..d322deebda4fa3246a32691a493f225c37b6f855 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c @@ -0,0 +1,1234 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipa_qmi_service.h" +#include "ipa_ram_mmap.h" +#include "../ipa_common_i.h" + +#define IPA_Q6_SVC_VERS 1 +#define IPA_A5_SVC_VERS 1 +#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ) + +#define IPA_A5_SERVICE_SVC_ID 0x31 +#define IPA_A5_SERVICE_INS_ID 1 +#define IPA_Q6_SERVICE_SVC_ID 0x31 +#define IPA_Q6_SERVICE_INS_ID 2 + +#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000 +#define QMI_SEND_REQ_TIMEOUT_MS 60000 + +#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000 + +static struct qmi_handle *ipa_svc_handle; +static struct workqueue_struct *ipa_clnt_req_workqueue; +static bool qmi_modem_init_fin, qmi_indication_fin; +static uint32_t ipa_wan_platform; +struct ipa_qmi_context *ipa_qmi_ctx; +static bool first_time_handshake; +static atomic_t workqueues_stopped; +static atomic_t ipa_qmi_initialized; +struct mutex ipa_qmi_lock; + +struct ipa_msg_desc { + uint16_t msg_id; + int max_msg_len; + struct qmi_elem_info *ei_array; +}; + +/* QMI A5 service */ + +static void handle_indication_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_indication_reg_req_msg_v01 *indication_req; + struct ipa_indication_reg_resp_msg_v01 resp; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + int rc; + + indication_req = (struct ipa_indication_reg_req_msg_v01 *)decoded_msg; + IPAWANDBG("Received INDICATION Request\n"); + + /* cache the client sq */ + memcpy(&ipa_qmi_ctx->client_sq, sq, sizeof(*sq)); + + memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INDICATION_REGISTER_RESP_V01, + QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01, + ipa_indication_reg_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) { + IPAWANERR("send response for Indication register failed\n"); + return; + } + + qmi_indication_fin = true; + /* check if need sending indication to modem */ + if (qmi_modem_init_fin) { + IPAWANDBG("send indication to modem (%d)\n", + qmi_modem_init_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_indication(qmi_handle, + &(ipa_qmi_ctx->client_sq), + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + ipa_master_driver_init_complt_ind_msg_data_v01_ei, + &ind); + + if (rc < 0) { + IPAWANERR("send indication failed\n"); + qmi_indication_fin = false; + } + } else { + IPAWANERR("not send indication\n"); + } +} + + +static void handle_install_filter_rule_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_install_fltr_rule_req_msg_v01 *rule_req; + struct ipa_install_fltr_rule_resp_msg_v01 resp; + uint32_t rule_hdl[MAX_NUM_Q6_RULE]; + int rc = 0, i; + + rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)decoded_msg; + memset(rule_hdl, 0, sizeof(rule_hdl)); + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + IPAWANDBG("Received install filter Request\n"); + + rc = copy_ul_filter_rule_to_ipa((struct + ipa_install_fltr_rule_req_msg_v01*)decoded_msg, rule_hdl); + if (rc) { + IPAWANERR("copy UL rules from modem is failed\n"); + return; + } + + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + if (rule_req->filter_spec_list_valid == true) { + resp.filter_handle_list_valid = true; + if (rule_req->filter_spec_list_len > MAX_NUM_Q6_RULE) { + resp.filter_handle_list_len = MAX_NUM_Q6_RULE; + IPAWANERR("installed (%d) max Q6-UL rules ", + MAX_NUM_Q6_RULE); + IPAWANERR("but modem gives total (%u)\n", + rule_req->filter_spec_list_len); + } else { + resp.filter_handle_list_len = + rule_req->filter_spec_list_len; + } + } else { + resp.filter_handle_list_valid = false; + } + + /* construct UL filter rules response to Modem*/ + for (i = 0; i < resp.filter_handle_list_len; i++) { + resp.filter_handle_list[i].filter_spec_identifier = + rule_req->filter_spec_list[i].filter_spec_identifier; + resp.filter_handle_list[i].filter_handle = rule_hdl[i]; + } + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INSTALL_FILTER_RULE_RESP_V01, + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01, + ipa_install_fltr_rule_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("install filter rules failed\n"); + else + IPAWANDBG("Replied to install filter request\n"); +} + +static void handle_filter_installed_notify_req( + struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + int rc = 0; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + IPAWANDBG("Received filter_install_notify Request\n"); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01, + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01, + ipa_fltr_installed_notif_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("handle filter rules failed\n"); + else + IPAWANDBG("Responsed filter_install_notify Request\n"); +} + +static void handle_ipa_config_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_config_resp_msg_v01 resp; + int rc; + + memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + IPAWANDBG("Received IPA CONFIG Request\n"); + rc = ipa_mhi_handle_ipa_config_req( + (struct ipa_config_req_msg_v01 *)decoded_msg); + if (rc) { + IPAERR("ipa_mhi_handle_ipa_config_req failed %d\n", rc); + resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; + } + IPAWANDBG("qmi_snd_rsp: result %d, err %d\n", + resp.resp.result, resp.resp.error); + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_CONFIG_RESP_V01, + QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01, + ipa_config_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("QMI_IPA_CONFIG_RESP_V01 failed\n"); + else + IPAWANDBG("Responsed QMI_IPA_CONFIG_RESP_V01\n"); +} + +static void ipa_a5_svc_disconnect_cb(struct qmi_handle *qmi, + unsigned int node, unsigned int port) +{ + IPAWANDBG_LOW("Received QMI client disconnect\n"); +} + +/****************************************************/ +/* QMI A5 client ->Q6 */ +/****************************************************/ +static void ipa_q6_clnt_svc_arrive(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_svc_arrive, ipa_q6_clnt_svc_arrive); +static void ipa_q6_clnt_svc_exit(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_svc_exit, ipa_q6_clnt_svc_exit); +/* Test client port for IPC Router */ +static struct qmi_handle *ipa_q6_clnt; + +static int ipa_check_qmi_response(int rc, + int req_id, + enum ipa_qmi_result_type_v01 result, + enum ipa_qmi_error_type_v01 error, + char *resp_type) +{ + if (rc < 0) { + if (rc == -ETIMEDOUT && ipa_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Timeout for qmi request id %d\n", req_id); + return rc; + } + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR( + "SSR while waiting for qmi request id %d\n", req_id); + return rc; + } + IPAWANERR("Error sending qmi request id %d, rc = %d\n", + req_id, rc); + return rc; + } + if (result != IPA_QMI_RESULT_SUCCESS_V01 && + ipa_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Got bad response %d from request id %d (error %d)\n", + req_id, result, error); + return result; + } + IPAWANDBG_LOW("Received %s successfully\n", resp_type); + return 0; +} + +static int ipa_qmi_send_req_wait(struct qmi_handle *client_handle, + struct ipa_msg_desc *req_desc, void *req, + struct ipa_msg_desc *resp_desc, void *resp, + unsigned long timeout_ms) +{ + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(client_handle, &txn, resp_desc->ei_array, resp); + + if (ret < 0) { + IPAWANERR("QMI txn init failed, ret= %d\n", ret); + return ret; + } + + ret = qmi_send_request(client_handle, + &ipa_qmi_ctx->server_sq, + &txn, + req_desc->msg_id, + req_desc->max_msg_len, + req_desc->ei_array, + req); + + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + ret = qmi_txn_wait(&txn, msecs_to_jiffies(timeout_ms)); + + return ret; +} + +static int qmi_init_modem_send_sync_msg(void) +{ + struct ipa_init_modem_driver_req_msg_v01 req; + struct ipa_init_modem_driver_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + u16 smem_restr_bytes = ipa2_get_smem_restr_bytes(); + + memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01)); + + req.platform_type_valid = true; + req.platform_type = ipa_wan_platform; + + req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0); + req.hdr_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes; + req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) + + smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1; + + req.v4_route_tbl_info_valid = true; + req.v4_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v4_rt_ofst) + + smem_restr_bytes; + req.v4_route_tbl_info.num_indices = IPA_MEM_PART(v4_modem_rt_index_hi); + req.v6_route_tbl_info_valid = true; + + req.v6_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v6_rt_ofst) + + smem_restr_bytes; + req.v6_route_tbl_info.num_indices = IPA_MEM_PART(v6_modem_rt_index_hi); + + req.v4_filter_tbl_start_addr_valid = true; + req.v4_filter_tbl_start_addr = + IPA_MEM_PART(v4_flt_ofst) + smem_restr_bytes; + + req.v6_filter_tbl_start_addr_valid = true; + req.v6_filter_tbl_start_addr = + IPA_MEM_PART(v6_flt_ofst) + smem_restr_bytes; + + req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0); + req.modem_mem_info.block_start_addr = + IPA_MEM_PART(modem_ofst) + smem_restr_bytes; + req.modem_mem_info.size = IPA_MEM_PART(modem_size); + + req.ctrl_comm_dest_end_pt_valid = true; + req.ctrl_comm_dest_end_pt = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + + req.hdr_proc_ctx_tbl_info_valid = + (IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0); + req.hdr_proc_ctx_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes; + req.hdr_proc_ctx_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + + IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1; + + req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0); + req.zip_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes; + req.zip_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_comp_decomp_ofst) + + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1; + + if (!ipa_uc_loaded_check()) { /* First time boot */ + req.is_ssr_bootup_valid = false; + req.is_ssr_bootup = 0; + } else { /* After SSR boot */ + req.is_ssr_bootup_valid = true; + req.is_ssr_bootup = 1; + } + + IPAWANDBG("platform_type %d\n", req.platform_type); + IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n", + req.hdr_tbl_info.modem_offset_start); + IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n", + req.hdr_tbl_info.modem_offset_end); + IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n", + req.v4_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v4_route_tbl_info.num_indices %d\n", + req.v4_route_tbl_info.num_indices); + IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n", + req.v6_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v6_route_tbl_info.num_indices %d\n", + req.v6_route_tbl_info.num_indices); + IPAWANDBG("v4_filter_tbl_start_addr %d\n", + req.v4_filter_tbl_start_addr); + IPAWANDBG("v6_filter_tbl_start_addr %d\n", + req.v6_filter_tbl_start_addr); + IPAWANDBG("modem_mem_info.block_start_addr %d\n", + req.modem_mem_info.block_start_addr); + IPAWANDBG("modem_mem_info.size %d\n", + req.modem_mem_info.size); + IPAWANDBG("ctrl_comm_dest_end_pt %d\n", + req.ctrl_comm_dest_end_pt); + IPAWANDBG("is_ssr_bootup %d\n", + req.is_ssr_bootup); + + req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01; + req_desc.ei_array = ipa_init_modem_driver_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01; + resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei; + + pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, + rc); + return rc; + } + + pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n"); + return ipa_check_qmi_response(rc, + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_init_modem_driver_resp_msg_v01"); +} + +/* sending filter-install-request to modem*/ +int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + struct ipa_install_fltr_rule_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + int i; + + /* check if modem up */ + if (!qmi_indication_fin || + !qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + + /* check if the filter rules from IPACM is valid */ + if (req->filter_spec_list_len == 0) { + IPAWANDBG("IPACM pass zero rules to Q6\n"); + } else { + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->filter_spec_list_len); + } + + if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) { + IPAWANDBG( + "IPACM passes the number of filtering rules exceed limit\n"); + return -EINVAL; + } else if (req->source_pipe_index_valid != 0) { + IPAWANDBG( + "IPACM passes source_pipe_index_valid not zero 0 != %d\n", + req->source_pipe_index_valid); + return -EINVAL; + } else if (req->source_pipe_index >= ipa_ctx->ipa_num_pipes) { + IPAWANDBG( + "IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } + for (i = 0; i < req->filter_spec_list_len; i++) { + if ((req->filter_spec_list[i].ip_type != + QMI_IPA_IP_TYPE_V4_V01) && + (req->filter_spec_list[i].ip_type != + QMI_IPA_IP_TYPE_V6_V01)) + return -EINVAL; + if (req->filter_spec_list[i].is_mux_id_valid == false) + return -EINVAL; + if (req->filter_spec_list[i].is_routing_table_index_valid + == false) + return -EINVAL; + if ((req->filter_spec_list[i].filter_action <= + QMI_IPA_FILTER_ACTION_INVALID_V01) && + (req->filter_spec_list[i].filter_action > + QMI_IPA_FILTER_ACTION_EXCEPTION_V01)) + return -EINVAL; + } + mutex_lock(&ipa_qmi_lock); + if (ipa_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[ + ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg]), + req, + sizeof(struct ipa_install_fltr_rule_req_msg_v01)); + ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg++; + ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10; + } + mutex_unlock(&ipa_qmi_lock); + + req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01; + req_desc.ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01; + resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, + req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + rc); + return rc; + } + + return ipa_check_qmi_response(rc, + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_install_filter"); +} + + +int qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_enable_force_clear_datapath_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req || !req->source_pipe_bitmask) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + req_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = ipa_enable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa_enable_force_clear_datapath_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + &resp_desc, &resp, + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01, + rc); + return rc; + } + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +int qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_disable_force_clear_datapath_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + req_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = + ipa_disable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa_disable_force_clear_datapath_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + &resp_desc, &resp, + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01, + rc); + return rc; + } + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +/* sending filter-installed-notify-request to modem*/ +int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0, i = 0; + + /* check if the filter rules from IPACM is valid */ + if (req->filter_index_list_len == 0) { + IPAWANDBG(" delete UL filter rule for pipe %d\n", + req->source_pipe_index); + } else if (req->filter_index_list_len > QMI_IPA_MAX_FILTERS_V01) { + IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n", + req->source_pipe_index, + req->filter_index_list_len); + return -EINVAL; + } else if (req->filter_index_list[0].filter_index == 0 && + req->source_pipe_index != + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD)) { + IPAWANERR(" get index wrong for pipe %d\n", + req->source_pipe_index); + for (i = 0; i < req->filter_index_list_len; i++) + IPAWANERR(" %d-st handle %d index %d\n", + i, + req->filter_index_list[i].filter_handle, + req->filter_index_list[i].filter_index); + return -EINVAL; + } + + if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR(" UL filter rule for pipe %d install_status = %d\n", + req->source_pipe_index, req->install_status); + return -EINVAL; + } else if (req->source_pipe_index >= ipa_ctx->ipa_num_pipes) { + IPAWANERR("IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } else if (((req->embedded_pipe_index_valid != true) || + (req->embedded_call_mux_id_valid != true)) && + ((req->embedded_pipe_index_valid != false) || + (req->embedded_call_mux_id_valid != false))) { + IPAWANERR( + "IPACM passes embedded pipe and mux valid not valid\n"); + return -EINVAL; + } else if (req->embedded_pipe_index >= ipa_ctx->ipa_num_pipes) { + IPAWANERR("IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } + + mutex_lock(&ipa_qmi_lock); + if (ipa_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[ + ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]), + req, + sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++; + ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10; + } + mutex_unlock(&ipa_qmi_lock); + req_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01; + req_desc.ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01; + resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + rc); + return rc; + } + + return ipa_check_qmi_response(rc, + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_fltr_installed_notif_resp"); +} + +static void ipa_q6_clnt_quota_reached_ind_cb(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct ipa_data_usage_quota_reached_ind_msg_v01 *qmi_ind; + + if (handle != ipa_q6_clnt) { + IPAWANERR("Wrong client\n"); + return; + } + + qmi_ind = (struct ipa_data_usage_quota_reached_ind_msg_v01 *) data; + + IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n", + qmi_ind->apn.mux_id, + (unsigned long) qmi_ind->apn.num_Mbytes); + ipa_broadcast_quota_reach_ind(qmi_ind->apn.mux_id, + IPA_UPSTEAM_MODEM); +} + +static void ipa_q6_clnt_svc_arrive(struct work_struct *work) +{ + int rc; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + + rc = kernel_connect(ipa_q6_clnt->sock, + (struct sockaddr *) &ipa_qmi_ctx->server_sq, + sizeof(ipa_qmi_ctx->server_sq), + 0); + + if (rc < 0) { + IPAWANERR("Couldnt connect Server\n"); + return; + } + + IPAWANDBG("Q6 QMI service available now\n"); + /* Initialize modem IPA-driver */ + IPAWANDBG("send qmi_init_modem_send_sync_msg to modem\n"); + rc = qmi_init_modem_send_sync_msg(); + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR("qmi_init_modem_send_sync_msg failed due to SSR!\n"); + /* Cleanup will take place when ipa_wwan_remove is called */ + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; + return; + } + if (rc != 0) { + IPAWANERR("qmi_init_modem_send_sync_msg failed\n"); + /* + * This is a very unexpected scenario, which requires a kernel + * panic in order to force dumps for QMI/Q6 side analysis. + */ + ipa_assert(); + return; + } + qmi_modem_init_fin = true; + + /* In cold-bootup, first_time_handshake = false */ + ipa_q6_handshake_complete(first_time_handshake); + first_time_handshake = true; + + IPAWANDBG("complete, qmi_modem_init_fin : %d\n", + qmi_modem_init_fin); + + if (qmi_indication_fin) { + IPAWANDBG("send indication to modem (%d)\n", + qmi_indication_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_indication(ipa_svc_handle, + &ipa_qmi_ctx->client_sq, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + ipa_master_driver_init_complt_ind_msg_data_v01_ei, + &ind); + + IPAWANDBG("ipa_qmi_service_client good\n"); + } else { + IPAWANERR("not send indication (%d)\n", + qmi_indication_fin); + } +} + + +static void ipa_q6_clnt_svc_exit(struct work_struct *work) +{ + + if (ipa_qmi_ctx != NULL) { + ipa_qmi_ctx->server_sq.sq_family = 0; + ipa_qmi_ctx->server_sq.sq_node = 0; + ipa_qmi_ctx->server_sq.sq_port = 0; + } +} + +static int ipa_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, + struct qmi_service *service) +{ + IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", + service->service, service->version, service->instance, + service->node, service->port); + + if (ipa_qmi_ctx != NULL) { + ipa_qmi_ctx->server_sq.sq_family = AF_QIPCRTR; + ipa_qmi_ctx->server_sq.sq_node = service->node; + ipa_qmi_ctx->server_sq.sq_port = service->port; + } + if (!atomic_read(&workqueues_stopped)) { + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_arrive, 0); + } + return 0; +} + +static void ipa_q6_clnt_svc_event_notify_net_reset(struct qmi_handle *qmi) +{ + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_exit, 0); +} + +static void ipa_q6_clnt_svc_event_notify_svc_exit(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", svc->service, + svc->version, svc->instance, svc->node, svc->port); + + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_exit, 0); +} + +static struct qmi_ops server_ops = { + .del_client = ipa_a5_svc_disconnect_cb, +}; + +static struct qmi_ops client_ops = { + .new_server = ipa_q6_clnt_svc_event_notify_svc_new, + .del_server = ipa_q6_clnt_svc_event_notify_svc_exit, + .net_reset = ipa_q6_clnt_svc_event_notify_net_reset, +}; + +static struct qmi_msg_handler server_handlers[] = { + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01, + .ei = ipa_indication_reg_req_msg_data_v01_ei, + .decoded_size = sizeof(struct ipa_indication_reg_req_msg_v01), + .fn = handle_indication_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + .ei = ipa_install_fltr_rule_req_msg_data_v01_ei, + .decoded_size = sizeof( + struct ipa_install_fltr_rule_req_msg_v01), + .fn = handle_install_filter_rule_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + .ei = ipa_fltr_installed_notif_req_msg_data_v01_ei, + .decoded_size = sizeof( + struct ipa_fltr_installed_notif_req_msg_v01), + .fn = handle_filter_installed_notify_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_CONFIG_REQ_V01, + .ei = ipa_config_req_msg_data_v01_ei, + .decoded_size = sizeof(struct ipa_config_req_msg_v01), + .fn = handle_ipa_config_req, + }, +}; + +static struct qmi_msg_handler client_handlers[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01, + .ei = ipa_data_usage_quota_reached_ind_msg_data_v01_ei, + .decoded_size = sizeof( + struct ipa_data_usage_quota_reached_ind_msg_v01), + .fn = ipa_q6_clnt_quota_reached_ind_cb, + }, +}; + +static void ipa_qmi_service_init_worker(void) +{ + int rc; + + /* Initialize QMI-service*/ + IPAWANDBG("IPA A7 QMI init OK :>>>>\n"); + + /* start the QMI msg cache */ + ipa_qmi_ctx = vzalloc(sizeof(*ipa_qmi_ctx)); + if (!ipa_qmi_ctx) { + IPAWANERR(":kzalloc err.\n"); + return; + } + ipa_qmi_ctx->modem_cfg_emb_pipe_flt = + ipa2_get_modem_cfg_emb_pipe_flt(); + + ipa_svc_handle = vzalloc(sizeof(*ipa_svc_handle)); + if (!ipa_svc_handle) + goto destroy_ipa_A7_svc_wq; + + rc = qmi_handle_init(ipa_svc_handle, + QMI_IPA_MAX_MSG_LEN, + &server_ops, + server_handlers); + + if (rc < 0) { + IPAWANERR("Initializing ipa_a5 svc failed %d\n", rc); + goto destroy_qmi_handle; + } + + rc = qmi_add_server(ipa_svc_handle, + IPA_A5_SERVICE_SVC_ID, + IPA_A5_SVC_VERS, + IPA_A5_SERVICE_INS_ID); + + if (rc < 0) { + IPAWANERR("Registering ipa_a5 svc failed %d\n", + rc); + goto deregister_qmi_srv; + } + /* Initialize QMI-client */ + + ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req"); + if (!ipa_clnt_req_workqueue) { + IPAWANERR("Creating clnt_req workqueue failed\n"); + goto deregister_qmi_srv; + } + + /* Create a Local client port for QMI communication */ + ipa_q6_clnt = vzalloc(sizeof(*ipa_q6_clnt)); + + if (!ipa_q6_clnt) + goto destroy_clnt_req_wq; + + rc = qmi_handle_init(ipa_q6_clnt, + QMI_IPA_MAX_MSG_LEN, + &client_ops, + client_handlers); + + if (rc < 0) { + IPAWANERR("Creating clnt handle failed\n"); + goto destroy_qmi_client_handle; + } + + rc = qmi_add_lookup(ipa_q6_clnt, + IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID); + + if (rc < 0) { + IPAWANERR("Adding Q6 Svc failed\n"); + goto deregister_qmi_client; + } + /* get Q6 service and start send modem-initial to Q6 */ + IPAWANDBG("wait service available\n"); + return; + +deregister_qmi_client: + qmi_handle_release(ipa_q6_clnt); +destroy_qmi_client_handle: + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; +destroy_clnt_req_wq: + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; +deregister_qmi_srv: + qmi_handle_release(ipa_svc_handle); +destroy_qmi_handle: + vfree(ipa_qmi_ctx); +destroy_ipa_A7_svc_wq: + vfree(ipa_svc_handle); + ipa_svc_handle = NULL; + ipa_qmi_ctx = NULL; +} + +int ipa_qmi_service_init(uint32_t wan_platform_type) +{ + ipa_wan_platform = wan_platform_type; + qmi_modem_init_fin = false; + qmi_indication_fin = false; + atomic_set(&workqueues_stopped, 0); + + if (atomic_read(&ipa_qmi_initialized) == 0) + ipa_qmi_service_init_worker(); + return 0; +} + +void ipa_qmi_service_exit(void) +{ + + atomic_set(&workqueues_stopped, 1); + + /* qmi-service */ + if (ipa_svc_handle != NULL) { + qmi_handle_release(ipa_svc_handle); + vfree(ipa_svc_handle); + ipa_svc_handle = NULL; + } + + /* qmi-client */ + + + /* Release client handle */ + if (ipa_q6_clnt != NULL) { + qmi_handle_release(ipa_q6_clnt); + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; + if (ipa_clnt_req_workqueue) { + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; + } + } + + /* clean the QMI msg cache */ + mutex_lock(&ipa_qmi_lock); + if (ipa_qmi_ctx != NULL) { + vfree(ipa_qmi_ctx); + ipa_qmi_ctx = NULL; + } + mutex_unlock(&ipa_qmi_lock); + qmi_modem_init_fin = false; + qmi_indication_fin = false; + atomic_set(&ipa_qmi_initialized, 0); +} + +void ipa_qmi_stop_workqueues(void) +{ + IPAWANDBG("Stopping all QMI workqueues\n"); + + /* Stopping all workqueues so new work won't be scheduled */ + atomic_set(&workqueues_stopped, 1); + + /* Making sure that the current scheduled work won't be executed */ + cancel_delayed_work(&work_svc_arrive); + cancel_delayed_work(&work_svc_exit); +} + +/* voting for bus BW to ipa_rm*/ +int vote_for_bus_bw(uint32_t *bw_mbps) +{ + struct ipa_rm_perf_profile profile; + int ret; + + if (bw_mbps == NULL) { + IPAWANERR("Bus BW is invalid\n"); + return -EINVAL; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = *bw_mbps; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (ret) + IPAWANERR("Failed to set perf profile to BW %u\n", + profile.max_supported_bandwidth_mbps); + else + IPAWANDBG("Succeeded to set perf profile to BW %u\n", + profile.max_supported_bandwidth_mbps); + + return ret; +} + +int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa_get_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_GET_DATA_STATS_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_data_stats_resp_msg_v01"); +} + +int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa_get_apn_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01"); +} + +int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + struct ipa_set_data_usage_quota_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa_set_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + &resp_desc, &resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01"); +} + +int ipa_qmi_stop_data_qouta(void) +{ + struct ipa_stop_data_usage_quota_req_msg_v01 req; + struct ipa_stop_data_usage_quota_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa_stop_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, + &resp_desc, &resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01"); +} + +void ipa_qmi_init(void) +{ + mutex_init(&ipa_qmi_lock); +} + +void ipa_qmi_cleanup(void) +{ + mutex_destroy(&ipa_qmi_lock); +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h new file mode 100644 index 0000000000000000000000000000000000000000..8a4c9797ecd50da6862655d7e2c4688119048d42 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef IPA_QMI_SERVICE_H +#define IPA_QMI_SERVICE_H + +#include +#include +#include +#include +#include "ipa_i.h" +#include + +/** + * name of the DL wwan default routing tables for v4 and v6 + */ +#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr" +#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt" +#define MAX_NUM_Q6_RULE 35 +#define MAX_NUM_QMI_RULE_CACHE 10 +#define DEV_NAME "ipa-wan" +#define SUBSYS_MODEM "modem" + +#define IPAWANDBG(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANDBG_LOW(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR(fmt, args...) \ + do { \ + pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANINFO(fmt, args...) \ + do { \ + pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +extern struct ipa_qmi_context *ipa_qmi_ctx; +extern struct mutex ipa_qmi_lock; + +struct ipa_qmi_context { +struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE]; +u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE]; +int num_ipa_install_fltr_rule_req_msg; +struct ipa_install_fltr_rule_req_msg_v01 + ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +int num_ipa_fltr_installed_notif_req_msg; +struct ipa_fltr_installed_notif_req_msg_v01 + ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +bool modem_cfg_emb_pipe_flt; +struct sockaddr_qrtr client_sq; +struct sockaddr_qrtr server_sq; +}; + +struct rmnet_mux_val { + uint32_t mux_id; + int8_t vchannel_name[IFNAMSIZ]; + bool mux_channel_set; + bool ul_flt_reg; + bool mux_hdr_set; + uint32_t hdr_hdl; +}; + +extern struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_indication_reg_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_config_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_config_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[]; + +/** + * struct ipa_rmnet_context - IPA rmnet context + * @ipa_rmnet_ssr: support modem SSR + * @polling_interval: Requested interval for polling tethered statistics + * @metered_mux_id: The mux ID on which quota has been set + */ +struct ipa_rmnet_context { + bool ipa_rmnet_ssr; + u64 polling_interval; + u32 metered_mux_id; +}; + +extern struct ipa_rmnet_context ipa_rmnet_ctx; + +#ifdef CONFIG_RMNET_IPA + +int ipa_qmi_service_init(uint32_t wan_platform_type); + +void ipa_qmi_service_exit(void); + +/* sending filter-install-request to modem*/ +int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req); + +/* sending filter-installed-notify-request to modem*/ +int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req); + +/* voting for bus BW to ipa_rm*/ +int vote_for_bus_bw(uint32_t *bw_mbps); + +int qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); + +int qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); + +int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req, uint32_t *rule_hdl); + +int wwan_update_mux_channel_prop(void); + +int wan_ioctl_init(void); + +void wan_ioctl_stop_qmi_messages(void); + +void wan_ioctl_enable_qmi_messages(void); + +void wan_ioctl_deinit(void); + +void ipa_qmi_stop_workqueues(void); + +int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data); + +int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data); + +void ipa_broadcast_quota_reach_ind(uint32_t mux_id, + enum ipa_upstream_type upstream_type); + +int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe + *data); + +int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset); + +int rmnet_ipa_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data); + +int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); + +int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp); + +int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp); + +int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req); + +int ipa_qmi_stop_data_qouta(void); + +void ipa_q6_handshake_complete(bool ssr_bootup); + +void ipa_qmi_init(void); + +void ipa_qmi_cleanup(void); + +#else /* CONFIG_RMNET_IPA */ + +static inline int ipa_qmi_service_init(uint32_t wan_platform_type) +{ + return -EPERM; +} + +static inline void ipa_qmi_service_exit(void) { } + +/* sending filter-install-request to modem*/ +static inline int qmi_filter_request_send( + struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + return -EPERM; +} + +/* sending filter-installed-notify-request to modem*/ +static inline int qmi_filter_notify_send( + struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int copy_ul_filter_rule_to_ipa( + struct ipa_install_fltr_rule_req_msg_v01 *rule_req, uint32_t *rule_hdl) +{ + return -EPERM; +} + +static inline int wwan_update_mux_channel_prop(void) +{ + return -EPERM; +} + +static inline int wan_ioctl_init(void) +{ + return -EPERM; +} + +static inline void wan_ioctl_stop_qmi_messages(void) { } + +static inline void wan_ioctl_enable_qmi_messages(void) { } + +static inline void wan_ioctl_deinit(void) { } + +static inline void ipa_qmi_stop_workqueues(void) { } + +static inline int vote_for_bus_bw(uint32_t *bw_mbps) +{ + return -EPERM; +} + +static inline int rmnet_ipa_poll_tethering_stats( + struct wan_ioctl_poll_tethering_stats *data) +{ + return -EPERM; +} + +static inline int rmnet_ipa_set_data_quota( + struct wan_ioctl_set_data_quota *data) +{ + return -EPERM; +} + +static inline void ipa_broadcast_quota_reach_ind +( + uint32_t mux_id, + enum ipa_upstream_type upstream_type) +{ +} + +static inline int rmnet_ipa_reset_tethering_stats +( + struct wan_ioctl_reset_tether_stats *data +) +{ + return -EPERM; + +} + +static inline int ipa_qmi_get_data_stats( + struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa_qmi_get_network_stats( + struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa_qmi_set_data_quota( + struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa_qmi_stop_data_qouta(void) +{ + return -EPERM; +} + +static inline void ipa_q6_handshake_complete(bool ssr_bootup) { } + +static inline void ipa_qmi_init(void) +{ +} + +static inline void ipa_qmi_cleanup(void) +{ +} + +#endif /* CONFIG_RMNET_IPA */ + +#endif /* IPA_QMI_SERVICE_H */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c new file mode 100644 index 0000000000000000000000000000000000000000..137458876a9e2a03af93b44d129324170c39f04d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c @@ -0,0 +1,2419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2017, 2020, The Linux Foundation. All rights reserved. + */ + +#include + +#include + +#include "ipa_qmi_service.h" + +/* Type Definitions */ +static struct qmi_elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_route_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + route_tbl_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + num_indices), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_modem_mem_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + block_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_low), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_high), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + rule_eq_bitmap), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + tos_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tos_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_range_16), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_range_eq_16_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_range_16), + .ei_array = ipa_ipfltr_range_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + offset_meq_32), + .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq_present), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16), + .ei_array = ipa_ipfltr_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32), + .ei_array = ipa_ipfltr_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_meq_32), + .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_128), + }, + { + .data_type = QMI_STRUCT, + .elem_len = + QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_mask_eq_128_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + offset_meq_128), + .ei_array = ipa_ipfltr_mask_eq_128_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32), + .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ipv4_frag_eq_present), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_rule), + .ei_array = ipa_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_action), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_routing_table_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + route_table_index), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + mux_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info + ipa_filter_rule_identifier_to_handle_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_handle), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_handle), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_index), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info), + .ei_array = ipa_hdr_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info), + .ei_array = ipa_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info), + .ei_array = ipa_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_modem_mem_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info), + .ei_array = ipa_modem_mem_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info), + .ei_array = ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info), + .ei_array = ipa_zip_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_indication_reg_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + ipa_master_driver_init_complt_ind_msg_v01, + master_driver_init_status), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_filter_spec_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list), + .ei_array = ipa_filter_spec_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_rule_identifier_to_handle_map_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list), + .ei_array = + ipa_filter_rule_identifier_to_handle_map_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + install_status), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_handle_to_index_map_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list), + .ei_array = ipa_filter_handle_to_index_map_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + source_pipe_bitmask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_config_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_config_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + pipe_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_bytes), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + filter_rule_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + num_packets), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list), + .ei_array = ipa_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list), + .ei_array = ipa_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list), + .ei_array = ipa_stats_type_filter_rule_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_bytes), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_apn_data_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list), + .ei_array = ipa_apn_data_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + num_Mbytes), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list), + .ei_array = ipa_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_set_data_usage_quota_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_data_usage_quota_reached_ind_msg_v01, + apn), + .ei_array = ipa_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { + /* ipa_stop_data_usage_quota_req_msg is empty */ + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_stop_data_usage_quota_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h new file mode 100644 index 0000000000000000000000000000000000000000..bbc0c1619bb2d9782e1bf6283dee4853f38e10ad --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2015, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_RAM_MMAP_H_ +#define _IPA_RAM_MMAP_H_ + +/* + * This header defines the memory map of the IPA RAM (not all SRAM is + * available for SW use) + * In case of restricted bytes the actual starting address will be + * advanced by the number of needed bytes + */ + +#define IPA_RAM_NAT_OFST 0 +#define IPA_RAM_NAT_SIZE 0 + +#define IPA_MEM_v1_RAM_HDR_OFST (IPA_RAM_NAT_OFST + IPA_RAM_NAT_SIZE) +#define IPA_MEM_v1_RAM_HDR_SIZE 1664 +#define IPA_MEM_v1_RAM_V4_FLT_OFST (IPA_MEM_v1_RAM_HDR_OFST +\ + IPA_MEM_v1_RAM_HDR_SIZE) +#define IPA_MEM_v1_RAM_V4_FLT_SIZE 2176 +#define IPA_MEM_v1_RAM_V4_RT_OFST (IPA_MEM_v1_RAM_V4_FLT_OFST +\ + IPA_MEM_v1_RAM_V4_FLT_SIZE) +#define IPA_MEM_v1_RAM_V4_RT_SIZE 512 +#define IPA_MEM_v1_RAM_V6_FLT_OFST (IPA_MEM_v1_RAM_V4_RT_OFST +\ + IPA_MEM_v1_RAM_V4_RT_SIZE) +#define IPA_MEM_v1_RAM_V6_FLT_SIZE 1792 +#define IPA_MEM_v1_RAM_V6_RT_OFST (IPA_MEM_v1_RAM_V6_FLT_OFST +\ + IPA_MEM_v1_RAM_V6_FLT_SIZE) +#define IPA_MEM_v1_RAM_V6_RT_SIZE 512 +#define IPA_MEM_v1_RAM_END_OFST (IPA_MEM_v1_RAM_V6_RT_OFST +\ + IPA_MEM_v1_RAM_V6_RT_SIZE) + +#define IPA_MEM_RAM_V6_RT_SIZE_DDR 16384 +#define IPA_MEM_RAM_V4_RT_SIZE_DDR 16384 +#define IPA_MEM_RAM_V6_FLT_SIZE_DDR 16384 +#define IPA_MEM_RAM_V4_FLT_SIZE_DDR 16384 +#define IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR 0 + +#define IPA_MEM_CANARY_SIZE 4 +#define IPA_MEM_CANARY_VAL 0xdeadbeef + +#define IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE 256 +/* + * IPA v2.0 and v2.1 SRAM memory layout: + * +-------------+ + * | V4 FLT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | CANARY | + * +-------------+ + * | V6 FLT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | CANARY | + * +-------------+ + * | V4 RT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | V6 RT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | MODEM HDR | + * +-------------+ + * | APPS HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | MODEM MEM | + * +-------------+ + * | CANARY | + * +-------------+ + * | APPS V4 FLT | + * +-------------+ + * | APPS V6 FLT | + * +-------------+ + * | CANARY | + * +-------------+ + * | UC INFO | + * +-------------+ + */ +#define IPA_MEM_v2_RAM_OFST_START 128 +#define IPA_MEM_v2_RAM_V4_FLT_OFST IPA_MEM_v2_RAM_OFST_START +#define IPA_MEM_v2_RAM_V4_FLT_SIZE 88 + +/* V4 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V4_FLT_OFST & 7) +#error V4 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_V6_FLT_OFST (IPA_MEM_v2_RAM_V4_FLT_OFST + \ + IPA_MEM_v2_RAM_V4_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_V6_FLT_SIZE 88 + +/* V6 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V6_FLT_OFST & 7) +#error V6 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_V4_RT_OFST (IPA_MEM_v2_RAM_V6_FLT_OFST + \ + IPA_MEM_v2_RAM_V6_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_V4_NUM_INDEX 11 +#define IPA_MEM_v2_V4_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_V4_MODEM_RT_INDEX_HI 3 +#define IPA_MEM_v2_V4_APPS_RT_INDEX_LO 4 +#define IPA_MEM_v2_V4_APPS_RT_INDEX_HI 10 +#define IPA_MEM_v2_RAM_V4_RT_SIZE (IPA_MEM_v2_RAM_V4_NUM_INDEX * 4) + +/* V4 routing header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V4_RT_OFST & 7) +#error V4 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_V6_RT_OFST (IPA_MEM_v2_RAM_V4_RT_OFST + \ + IPA_MEM_v2_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_V6_NUM_INDEX 11 +#define IPA_MEM_v2_V6_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_V6_MODEM_RT_INDEX_HI 3 +#define IPA_MEM_v2_V6_APPS_RT_INDEX_LO 4 +#define IPA_MEM_v2_V6_APPS_RT_INDEX_HI 10 +#define IPA_MEM_v2_RAM_V6_RT_SIZE (IPA_MEM_v2_RAM_V6_NUM_INDEX * 4) + +/* V6 routing header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V6_RT_OFST & 7) +#error V6 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_MODEM_HDR_OFST (IPA_MEM_v2_RAM_V6_RT_OFST + \ + IPA_MEM_v2_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_MODEM_HDR_SIZE 320 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_MODEM_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_APPS_HDR_OFST (IPA_MEM_v2_RAM_MODEM_HDR_OFST + \ + IPA_MEM_v2_RAM_MODEM_HDR_SIZE) +#define IPA_MEM_v2_RAM_APPS_HDR_SIZE 72 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_APPS_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_MODEM_OFST (IPA_MEM_v2_RAM_APPS_HDR_OFST + \ + IPA_MEM_v2_RAM_APPS_HDR_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_MODEM_SIZE 3532 + +/* modem memory is 4B aligned */ +#if (IPA_MEM_v2_RAM_MODEM_OFST & 3) +#error modem memory is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_RAM_MODEM_OFST + \ + IPA_MEM_v2_RAM_MODEM_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE 1920 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST + \ + IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE) +#define IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE 1372 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_UC_INFO_OFST (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST + \ + IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_UC_INFO_SIZE 292 + +/* uC info 4B aligned */ +#if (IPA_MEM_v2_RAM_UC_INFO_OFST & 3) +#error uC info is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_END_OFST (IPA_MEM_v2_RAM_UC_INFO_OFST + \ + IPA_MEM_v2_RAM_UC_INFO_SIZE) +#define IPA_MEM_v2_RAM_APPS_V4_RT_OFST IPA_MEM_v2_RAM_END_OFST +#define IPA_MEM_v2_RAM_APPS_V4_RT_SIZE 0 +#define IPA_MEM_v2_RAM_APPS_V6_RT_OFST IPA_MEM_v2_RAM_END_OFST +#define IPA_MEM_v2_RAM_APPS_V6_RT_SIZE 0 +#define IPA_MEM_v2_RAM_HDR_SIZE_DDR 4096 + +/* + * IPA v2.5/v2.6 SRAM memory layout: + * +----------------+ + * | UC INFO | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM HDR | + * +----------------+ + * | APPS HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM PROC CTX | + * +----------------+ + * | APPS PROC CTX | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM MEM | + * +----------------+ + * | CANARY | + * +----------------+ + */ + +#define IPA_MEM_v2_5_RAM_UC_MEM_SIZE 128 +#define IPA_MEM_v2_5_RAM_UC_INFO_OFST IPA_MEM_v2_5_RAM_UC_MEM_SIZE +#define IPA_MEM_v2_5_RAM_UC_INFO_SIZE 512 + +/* uC info 4B aligned */ +#if (IPA_MEM_v2_5_RAM_UC_INFO_OFST & 3) +#error uC info is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_OFST_START (IPA_MEM_v2_5_RAM_UC_INFO_OFST + \ + IPA_MEM_v2_5_RAM_UC_INFO_SIZE) + +#define IPA_MEM_v2_5_RAM_V4_FLT_OFST (IPA_MEM_v2_5_RAM_OFST_START + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V4_FLT_SIZE 88 + +/* V4 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V4_FLT_OFST & 7) +#error V4 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_V6_FLT_OFST (IPA_MEM_v2_5_RAM_V4_FLT_OFST + \ + IPA_MEM_v2_5_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V6_FLT_SIZE 88 + +/* V6 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V6_FLT_OFST & 7) +#error V6 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_V4_RT_OFST (IPA_MEM_v2_5_RAM_V6_FLT_OFST + \ + IPA_MEM_v2_5_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V4_NUM_INDEX 15 +#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_5_RAM_V4_NUM_INDEX - 1) +#define IPA_MEM_v2_5_RAM_V4_RT_SIZE (IPA_MEM_v2_5_RAM_V4_NUM_INDEX * 4) + +/* V4 routing header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V4_RT_OFST & 7) +#error V4 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_V6_RT_OFST (IPA_MEM_v2_5_RAM_V4_RT_OFST + \ + IPA_MEM_v2_5_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V6_NUM_INDEX 15 +#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_5_RAM_V6_NUM_INDEX - 1) +#define IPA_MEM_v2_5_RAM_V6_RT_SIZE (IPA_MEM_v2_5_RAM_V6_NUM_INDEX * 4) + +/* V6 routing header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V6_RT_OFST & 7) +#error V6 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_MODEM_HDR_OFST (IPA_MEM_v2_5_RAM_V6_RT_OFST + \ + IPA_MEM_v2_5_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE 320 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_HDR_OFST (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST + \ + IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_HDR_SIZE 0 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST \ + (IPA_MEM_v2_5_RAM_APPS_HDR_OFST + IPA_MEM_v2_5_RAM_APPS_HDR_SIZE + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE 512 + +/* header processing context table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST & 7) +#error header processing context table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST \ + (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST + \ + IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE 512 + +/* header processing context table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST & 7) +#error header processing context table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_MODEM_OFST (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST + \ + IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_MODEM_SIZE 5800 + +/* modem memory is 4B aligned */ +#if (IPA_MEM_v2_5_RAM_MODEM_OFST & 3) +#error modem memory is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_5_RAM_MODEM_OFST + \ + IPA_MEM_v2_5_RAM_MODEM_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST + \ + IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_END_OFST (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST + \ + IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST IPA_MEM_v2_5_RAM_END_OFST +#define IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE 0 +#define IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST IPA_MEM_v2_5_RAM_END_OFST +#define IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE 0 +#define IPA_MEM_v2_5_RAM_HDR_SIZE_DDR 2048 + +/* + * IPA v2.6Lite SRAM memory layout: + * +----------------+ + * | UC INFO | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | COMP / DECOMP | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM MEM | + * +----------------+ + * | CANARY | + * +----------------+ + */ + +#define IPA_MEM_v2_6L_RAM_UC_MEM_SIZE 128 +#define IPA_MEM_v2_6L_RAM_UC_INFO_OFST IPA_MEM_v2_6L_RAM_UC_MEM_SIZE +#define IPA_MEM_v2_6L_RAM_UC_INFO_SIZE 512 + +/* uC info 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_UC_INFO_OFST & 3) +#error uC info is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_OFST_START (IPA_MEM_v2_6L_RAM_UC_INFO_OFST + \ + IPA_MEM_v2_6L_RAM_UC_INFO_SIZE) + +#define IPA_MEM_v2_6L_RAM_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_OFST_START + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V4_FLT_SIZE 88 + +/* V4 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V4_FLT_OFST & 7) +#error V4 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_V6_FLT_OFST (IPA_MEM_v2_6L_RAM_V4_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V6_FLT_SIZE 88 + +/* V6 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V6_FLT_OFST & 7) +#error V6 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_V4_RT_OFST (IPA_MEM_v2_6L_RAM_V6_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V4_NUM_INDEX 15 +#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX - 1) +#define IPA_MEM_v2_6L_RAM_V4_RT_SIZE (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX * 4) + +/* V4 routing header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V4_RT_OFST & 7) +#error V4 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_V6_RT_OFST (IPA_MEM_v2_6L_RAM_V4_RT_OFST + \ + IPA_MEM_v2_6L_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V6_NUM_INDEX 15 +#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX - 1) +#define IPA_MEM_v2_6L_RAM_V6_RT_SIZE (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX * 4) + +/* V6 routing header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V6_RT_OFST & 7) +#error V6 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST (IPA_MEM_v2_6L_RAM_V6_RT_OFST + \ + IPA_MEM_v2_6L_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE 320 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_APPS_HDR_OFST (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST + \ + IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE) +#define IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE 0 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST \ + (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST + IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE 512 + +/* comp/decomp memory region is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST & 7) +#error header processing context table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_MODEM_OFST \ + (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST + \ + IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_MODEM_SIZE 6376 + +/* modem memory is 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_MODEM_OFST & 3) +#error modem memory is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_MODEM_OFST + \ + IPA_MEM_v2_6L_RAM_MODEM_SIZE) +#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST \ + (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE) +#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_END_OFST (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE) + +#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST +#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE 0 +#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST +#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE 0 +#define IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR 2048 + +#endif /* _IPA_RAM_MMAP_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..873d8e472547b4540f592503261f0ab2120022e5 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __IPA_REG_H__ +#define __IPA_REG_H__ + +/* + * IPA's BAM specific registers + * Used for IPA HW 1.0 only + */ + +#define IPA_BAM_REG_BASE_OFST 0x00004000 +#define IPA_BAM_CNFG_BITS_OFST 0x7c +#define IPA_BAM_REMAP_SIZE (0x1000) + +#define IPA_FILTER_FILTER_EN_BMSK 0x1 +#define IPA_FILTER_FILTER_EN_SHFT 0x0 +#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094 +#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16 +#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8 + +#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090 +#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094 + +#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1 +#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe + +#define IPA_AGGREGATION_MODE_MSK 0x1 +#define IPA_AGGREGATION_MODE_SHFT 31 +#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff + +#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000 + +#define IPA_FILTER_FILTER_EN_BMSK 0x1 +#define IPA_FILTER_FILTER_EN_SHFT 0x0 + +#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT 2 +#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK 0x4 + +#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044 + +/* + * End of IPA 1.0 Registers + */ + + +/* + * IPA HW 2.0 Registers + */ +#define IPA_REG_BASE 0x0 + +#define IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001008 + 0x1000 * (n)) +#define IPA_IRQ_STTS_EE_n_MAXn 3 + +#define IPA_IRQ_EN_EE_n_ADDR(n) (IPA_REG_BASE + 0x0000100c + 0x1000 * (n)) +#define IPA_IRQ_EN_EE_n_MAXn 3 + + +#define IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001010 + 0x1000 * (n)) +#define IPA_IRQ_CLR_EE_n_MAXn 3 + +#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(n) \ + (IPA_REG_BASE + 0x00001098 + 0x1000 * (n)) +#define IPA_IRQ_SUSPEND_INFO_EE_n_MAXn 3 +/* + * End of IPA 2.0 Registers + */ + +/* + * IPA HW 2.5 Registers + */ +#define IPA_BCR_OFST 0x000005B0 +#define IPA_COUNTER_CFG_OFST 0x000005E8 +#define IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK 0xF +#define IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT 0x0 +#define IPA_COUNTER_CFG_AGGR_GRAN_BMSK 0x1F0 +#define IPA_COUNTER_CFG_AGGR_GRAN_SHFT 0x4 + /* + * End of IPA 2.5 Registers + */ + +/* + * IPA HW 2.6/2.6L Registers + */ +#define IPA_ENABLED_PIPES_OFST 0x000005DC +#define IPA_YELLOW_MARKER_SYS_CFG_OFST 0x00000728 +/* + * End of IPA 2.6/2.6L Registers + */ + +/* + * Common Registers + */ +#define IPA_REG_BASE_OFST_v2_0 0x00020000 +#define IPA_REG_BASE_OFST_v2_5 0x00040000 +#define IPA_REG_BASE_OFST_v2_6L IPA_REG_BASE_OFST_v2_5 +#define IPA_COMP_SW_RESET_OFST 0x0000003c + +#define IPA_VERSION_OFST 0x00000034 +#define IPA_COMP_HW_VERSION_OFST 0x00000030 + +#define IPA_SHARED_MEM_SIZE_OFST_v1_1 0x00000050 +#define IPA_SHARED_MEM_SIZE_OFST_v2_0 0x00000050 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0 0xffff0000 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0 0x10 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0 0xffff +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0 0x0 + +#define IPA_ENDP_INIT_AGGR_N_OFST_v1_1(n) (0x000001c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_AGGR_N_OFST_v2_0(n) (0x00000320 + 0x4 * (n)) + +#define IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(n) (0x00000220 + 0x4 * (n)) +#define IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(n) (0x00000370 + 0x4 * (n)) +#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK 0x1f +#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT 0x0 + +#define IPA_ROUTE_OFST_v1_1 0x00000044 + +#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0 +#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11 + +#define IPA_FILTER_OFST_v1_1 0x00000048 + +#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v1_1(n) (0x00004000 + 0x4 * (n)) +#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(n) (0x00005000 + 0x4 * (n)) +#define IPA_SRAM_DIRECT_ACCESS_N_OFST(n) (0x00004000 + 0x4 * (n)) +#define IPA_SRAM_SW_FIRST_v2_5 0x00005000 +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40 +#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0 +#define IPA_COMP_CFG_OFST 0x00000038 + +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf +#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK 0x7c00 +#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT 0xa +#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK 0x3e0 +#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT 0x5 +#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK 0x1c +#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT 0x2 +#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK 0x3 +#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_MODE_N_OFST_v1_1(n) (0x00000170 + 0x4 * (n)) +#define IPA_ENDP_INIT_MODE_N_OFST_v2_0(n) (0x000002c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_MODE_N_RMSK 0x7f +#define IPA_ENDP_INIT_MODE_N_MAX 19 +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1 0x7c +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1 0x2 +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0 0x1f0 +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0 0x4 +#define IPA_ENDP_INIT_MODE_N_MODE_BMSK 0x7 +#define IPA_ENDP_INIT_MODE_N_MODE_SHFT 0x0 + +#define IPA_ENDP_INIT_HDR_N_OFST_v1_1(n) (0x00000120 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_N_OFST_v2_0(n) (0x00000170 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK 0x3f +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000 +#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK 0x3f00000 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT 0x14 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13 +#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000 +#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2 0x1c +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000 +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b +#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK 0x4000000 +#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT 0x1a +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK 0x40 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT 0x6 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT 0x7 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK 0x1f80 + +#define IPA_ENDP_INIT_NAT_N_OFST_v1_1(n) (0x000000c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_NAT_N_OFST_v2_0(n) (0x00000120 + 0x4 * (n)) +#define IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK 0x3 +#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0 + + +#define IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(n) (0x000001c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0 0x1c00 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5 0x3c00 + + + +/* + * IPA HW 1.1 specific Registers + */ + +#define IPA_FILTER_FILTER_DIS_BMSK 0x1 +#define IPA_FILTER_FILTER_DIS_SHFT 0x0 +#define IPA_SINGLE_NDP_MODE_OFST 0x00000064 +#define IPA_QCNCM_OFST 0x00000060 + +#define IPA_ENDP_INIT_CTRL_N_OFST(n) (0x00000070 + 0x4 * (n)) +#define IPA_ENDP_INIT_CTRL_N_RMSK 0x1 +#define IPA_ENDP_INIT_CTRL_N_MAX 19 +#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK 0x1 +#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT 0x0 +#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK 0x2 +#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT 0x1 + +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(n) (0x00000270 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(n) (0x000003c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_RMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_MAX 19 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(n) (0x00000470 + 0x04 * (n)) +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x40 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x6 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8 +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000 +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10 + +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(n) (0x000002c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(n) (0x00000420 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_RMSK 0x1ff +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_MAX 19 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK 0x1ff +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT 0x0 + +#define IPA_DEBUG_CNT_REG_N_OFST_v1_1(n) (0x00000340 + 0x4 * (n)) +#define IPA_DEBUG_CNT_REG_N_OFST_v2_0(n) (0x00000600 + 0x4 * (n)) +#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_MAX 15 +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0 + +#define IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(n) (0x00000380 + 0x4 * (n)) +#define IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(n) (0x00000640 + 0x4 * (n)) +#define IPA_DEBUG_CNT_CTRL_N_RMSK 0x1ff1f171 +#define IPA_DEBUG_CNT_CTRL_N_MAX 15 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_SHFT 0x14 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_SHFT 0xc +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_BMSK 0x100 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_SHFT 0x8 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_BMSK 0x70 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_SHFT 0x4 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_BMSK 0x1 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_SHFT 0x0 + +#define IPA_ENDP_STATUS_n_OFST(n) (0x000004c0 + 0x4 * (n)) +#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e +#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_CFG_n_OFST(n) (0x000000c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_CFG_n_RMSK 0x7f +#define IPA_ENDP_INIT_CFG_n_MAXn 19 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(n) (0x00000220 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 19 +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0 + +#define IPA_ENDP_INIT_HDR_METADATA_n_OFST(n) (0x00000270 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000 +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10 + +#define IPA_IRQ_EE_UC_n_OFFS(n) (0x0000101c + 0x1000 * (n)) +#define IPA_IRQ_EE_UC_n_RMSK 0x1 +#define IPA_IRQ_EE_UC_n_MAXn 3 +#define IPA_IRQ_EE_UC_n_INT_BMSK 0x1 +#define IPA_IRQ_EE_UC_n_INT_SHFT 0x0 + +#define IPA_UC_MAILBOX_m_n_OFFS(m, n) (0x0001a000 + 0x80 * (m) + 0x4 * (n)) +#define IPA_UC_MAILBOX_m_n_OFFS_v2_5(m, n) (0x00022000 + 0x80 * (m) + 0x4 * (n)) + +#define IPA_SYS_PKT_PROC_CNTXT_BASE_OFST (0x000005d8) +#define IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST (0x000005e0) + +#endif diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c new file mode 100644 index 0000000000000000000000000000000000000000..fe9e5fa451a6a404140e06719dbfe8db08984865 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -0,0 +1,1669 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_i.h" + +#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1) +#define IPA_RT_TABLE_WORD_SIZE (4) +#define IPA_RT_INDEX_BITMAP_SIZE (32) +#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127) +#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3) +#define IPA_RT_BIT_MASK (0x1) +#define IPA_RT_STATUS_OF_ADD_FAILED (-1) +#define IPA_RT_STATUS_OF_DEL_FAILED (-1) +#define IPA_RT_STATUS_OF_MDFY_FAILED (-1) + +/** + * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf) +{ + struct ipa_rt_rule_hw_hdr *rule_hdr; + const struct ipa_rt_rule *rule = + (const struct ipa_rt_rule *)&entry->rule; + u16 en_rule = 0; + u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; + u8 *start; + int pipe_idx; + struct ipa_hdr_entry *hdr_entry; + + if (buf == NULL) { + memset(tmp, 0, (IPA_RT_FLT_HW_RULE_BUF_SIZE/4)); + buf = (u8 *)tmp; + } + + start = buf; + rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf; + pipe_idx = ipa2_get_ep_mapping(entry->rule.dst); + if (pipe_idx == -1) { + IPAERR("Wrong destination pipe specified in RT rule\n"); + WARN_ON(1); + return -EPERM; + } + if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) { + IPAERR("No RT rule on IPA_client_producer pipe.\n"); + IPAERR("pipe_idx: %d dst_pipe: %d\n", + pipe_idx, entry->rule.dst); + WARN_ON(1); + return -EPERM; + } + rule_hdr->u.hdr.pipe_dest_idx = pipe_idx; + rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl; + + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } + if (entry->hdr) { + if (entry->hdr->cookie == IPA_HDR_COOKIE) { + rule_hdr->u.hdr.hdr_offset = + entry->hdr->offset_entry->offset >> 2; + } else { + IPAERR("Entry hdr deleted by user = %d cookie = %u\n", + entry->hdr->user_deleted, entry->hdr->cookie); + WARN_ON(1); + rule_hdr->u.hdr.hdr_offset = 0; + } + } else { + rule_hdr->u.hdr.hdr_offset = 0; + } + buf += sizeof(struct ipa_rt_rule_hw_hdr); + + if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + + IPADBG_LOW("en_rule 0x%x\n", en_rule); + + rule_hdr->u.hdr.en_rule = en_rule; + ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr); + + if (entry->hw_len == 0) { + entry->hw_len = buf - start; + } else if (entry->hw_len != (buf - start)) { + IPAERR( + "hw_len differs b/w passes passed=0x%x calc=0x%zxtd\n", + entry->hw_len, + (buf - start)); + return -EPERM; + } + + return 0; +} + +/** + * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf) +{ + struct ipa_rt_rule_hw_hdr *rule_hdr; + const struct ipa_rt_rule *rule = + (const struct ipa_rt_rule *)&entry->rule; + u16 en_rule = 0; + u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; + u8 *start; + int pipe_idx; + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *hdr_proc_entry; + + if (buf == NULL) { + memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); + buf = (u8 *)tmp; + } + + start = buf; + rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf; + pipe_idx = ipa2_get_ep_mapping(entry->rule.dst); + if (pipe_idx == -1) { + IPAERR("Wrong destination pipe specified in RT rule\n"); + WARN_ON(1); + return -EPERM; + } + if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) { + IPAERR("No RT rule on IPA_client_producer pipe.\n"); + IPAERR("pipe_idx: %d dst_pipe: %d\n", + pipe_idx, entry->rule.dst); + WARN_ON(1); + return -EPERM; + } + rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx; + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EPERM; + } + } + if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) { + struct ipa_hdr_proc_ctx_entry *proc_ctx; + + proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx; + rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl; + ipa_assert_on(proc_ctx->offset_entry->offset & 31); + rule_hdr->u.hdr_v2_5.proc_ctx = 1; + rule_hdr->u.hdr_v2_5.hdr_offset = + (proc_ctx->offset_entry->offset + + ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5; + } else if (entry->hdr) { + rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl; + ipa_assert_on(entry->hdr->offset_entry->offset & 3); + rule_hdr->u.hdr_v2_5.proc_ctx = 0; + rule_hdr->u.hdr_v2_5.hdr_offset = + entry->hdr->offset_entry->offset >> 2; + } else { + rule_hdr->u.hdr_v2_5.proc_ctx = 0; + rule_hdr->u.hdr_v2_5.hdr_offset = 0; + } + buf += sizeof(struct ipa_rt_rule_hw_hdr); + + if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + + IPADBG("en_rule 0x%x\n", en_rule); + + rule_hdr->u.hdr_v2_5.en_rule = en_rule; + ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr); + + if (entry->hw_len == 0) { + entry->hw_len = buf - start; + } else if (entry->hw_len != (buf - start)) { + IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%zxtd\n", + entry->hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +/** + * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means that the caller wants to know the size + * of the rule as seen by HW so they did not pass a valid buffer, we will + * use a scratch buffer instead. + * With this scheme we are going to generate the rule twice, once to know + * size using scratch buffer and second to write the rule to the actual + * caller supplied buffer which is of required size. + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf) +{ + /* Same implementation as IPAv2 */ + return __ipa_generate_rt_hw_rule_v2(ip, entry, buf); +} + +/** + * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table + * @ip: the ip address family type + * @hdr_sz: header size + * @max_rt_idx: maximal index + * + * Returns: size on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl + */ +static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz, + int *max_rt_idx) +{ + struct ipa_rt_tbl_set *set; + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + u32 total_sz = 0; + u32 tbl_sz; + u32 bitmap = ipa_ctx->rt_idx_bitmap[ip]; + int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND; + int i; + int res; + + *hdr_sz = 0; + set = &ipa_ctx->rt_tbl_set[ip]; + + for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) { + if (bitmap & IPA_RT_BIT_MASK) + highest_bit_set = i; + bitmap >>= 1; + } + + *max_rt_idx = highest_bit_set; + if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) { + IPAERR("no rt tbls present\n"); + total_sz = IPA_RT_TABLE_WORD_SIZE; + *hdr_sz = IPA_RT_TABLE_WORD_SIZE; + return total_sz; + } + + *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE; + total_sz += *hdr_sz; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + tbl_sz = 0; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule( + ip, + entry, + NULL); + if (res) { + IPAERR("failed to find HW RT rule size\n"); + return -EPERM; + } + tbl_sz += entry->hw_len; + } + + if (tbl_sz) + tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE; + + if (tbl->in_sys) + continue; + + if (tbl_sz) { + /* add the terminator */ + total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE); + /* every rule-set should start at word boundary */ + total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) & + ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT; + } + } + + IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip); + + return total_sz; +} + +static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr, + u32 body_ofst, u32 apps_start_idx) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + struct ipa_rt_tbl_set *set; + u32 offset; + u8 *body; + struct ipa_mem_buffer rt_tbl_mem; + u8 *rt_tbl_mem_body; + int res; + + /* build the rt tbl in the DMA buffer to submit to IPA HW */ + body = base; + + set = &ipa_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (!tbl->in_sys) { + offset = body - base + body_ofst; + if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) { + IPAERR("offset is not word multiple %d\n", + offset); + goto proc_err; + } + + /* convert offset to words from bytes */ + offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT; + /* rule is at an offset from base */ + offset |= IPA_RT_BIT_MASK; + + /* update the hdr at the right index */ + ipa_write_32(offset, hdr + + ((tbl->idx - apps_start_idx) * + IPA_RT_TABLE_WORD_SIZE)); + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule( + ip, + entry, + body); + if (res) { + IPAERR("failed to gen HW RT rule\n"); + goto proc_err; + } + body += entry->hw_len; + } + + /* write the rule-set terminator */ + body = ipa_write_32(0, body); + if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) + /* advance body to next word boundary */ + body = body + (IPA_RT_TABLE_WORD_SIZE - + ((long)body & + IPA_RT_ENTRY_MEMORY_ALLIGNMENT)); + } else { + if (tbl->sz == 0) { + IPAERR("cannot generate 0 size table\n"); + goto proc_err; + } + + /* allocate memory for the RT tbl */ + rt_tbl_mem.size = tbl->sz; + rt_tbl_mem.base = + dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size, + &rt_tbl_mem.phys_base, GFP_KERNEL); + if (!rt_tbl_mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", + rt_tbl_mem.size); + WARN_ON(1); + goto proc_err; + } + + WARN_ON(rt_tbl_mem.phys_base & + IPA_RT_ENTRY_MEMORY_ALLIGNMENT); + rt_tbl_mem_body = rt_tbl_mem.base; + memset(rt_tbl_mem.base, 0, rt_tbl_mem.size); + /* update the hdr at the right index */ + ipa_write_32(rt_tbl_mem.phys_base, + hdr + ((tbl->idx - apps_start_idx) * + IPA_RT_TABLE_WORD_SIZE)); + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule( + ip, + entry, + rt_tbl_mem_body); + if (res) { + IPAERR("failed to gen HW RT rule\n"); + WARN_ON(1); + goto rt_table_mem_alloc_failed; + } + rt_tbl_mem_body += entry->hw_len; + } + + /* write the rule-set terminator */ + rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body); + + if (tbl->curr_mem.phys_base) { + WARN_ON(tbl->prev_mem.phys_base); + tbl->prev_mem = tbl->curr_mem; + } + tbl->curr_mem = rt_tbl_mem; + } + } + + return 0; + +rt_table_mem_alloc_failed: + dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size, + rt_tbl_mem.base, rt_tbl_mem.phys_base); +proc_err: + return -EPERM; +} + + +/** + * ipa_generate_rt_hw_tbl() - generates the routing hardware table + * @ip: [in] the ip address family type + * @mem: [out] buffer to put the filtering table + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem) +{ + u32 hdr_sz; + u8 *hdr; + u8 *body; + u8 *base; + int max_rt_idx; + int i; + int res; + + res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx); + if (res < 0) { + IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res); + goto error; + } + + mem->size = res; + mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) & + ~IPA_RT_TABLE_MEMORY_ALLIGNMENT; + + if (mem->size == 0) { + IPAERR("rt tbl empty ip=%d\n", ip); + goto error; + } + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + goto error; + } + + /* build the rt tbl in the DMA buffer to submit to IPA HW */ + base = hdr = (u8 *)mem->base; + body = base + hdr_sz; + + /* setup all indices to point to the empty sys rt tbl */ + for (i = 0; i <= max_rt_idx; i++) + ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base, + hdr + (i * IPA_RT_TABLE_WORD_SIZE)); + + if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) { + IPAERR("fail to generate RT tbl\n"); + goto proc_err; + } + + return 0; + +proc_err: + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); + mem->base = NULL; +error: + return -EPERM; +} + +static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_tbl *next; + struct ipa_rt_tbl_set *set; + + set = &ipa_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (tbl->prev_mem.phys_base) { + IPADBG_LOW("reaping rt"); + IPADBG_LOW("tbl name=%s ip=%d\n", + tbl->name, ip); + dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, + tbl->prev_mem.base, + tbl->prev_mem.phys_base); + memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); + } + } + + set = &ipa_ctx->reap_rt_tbl_set[ip]; + list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) { + list_del(&tbl->link); + WARN_ON(tbl->prev_mem.phys_base != 0); + if (tbl->curr_mem.phys_base) { + IPADBG_LOW("reaping sys"); + IPADBG_LOW("rt tbl name=%s ip=%d\n", + tbl->name, ip); + dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size, + tbl->curr_mem.base, + tbl->curr_mem.phys_base); + kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl); + } + } +} + +int __ipa_commit_rt_v1_1(enum ipa_ip_type ip) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer *mem; + void *cmd; + struct ipa_ip_v4_routing_init *v4; + struct ipa_ip_v6_routing_init *v6; + u16 avail; + u16 size; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); + if (!mem) { + IPAERR("failed to alloc memory object\n"); + goto fail_alloc_mem; + } + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE : + IPA_MEM_PART(v4_rt_size_ddr); + size = sizeof(struct ipa_ip_v4_routing_init); + } else { + avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE : + IPA_MEM_PART(v6_rt_size_ddr); + size = sizeof(struct ipa_ip_v6_routing_init); + } + cmd = kmalloc(size, flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_alloc_cmd; + } + + if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) { + IPAERR("fail to generate RT HW TBL ip %d\n", ip); + goto fail_hw_tbl_gen; + } + + if (mem->size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail); + goto fail_send_cmd; + } + + if (ip == IPA_IP_v4) { + v4 = (struct ipa_ip_v4_routing_init *)cmd; + desc.opcode = IPA_IP_V4_ROUTING_INIT; + v4->ipv4_rules_addr = mem->phys_base; + v4->size_ipv4_rules = mem->size; + v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST; + IPADBG("putting Routing IPv4 rules to phys 0x%x", + v4->ipv4_addr); + } else { + v6 = (struct ipa_ip_v6_routing_init *)cmd; + desc.opcode = IPA_IP_V6_ROUTING_INIT; + v6->ipv6_rules_addr = mem->phys_base; + v6->size_ipv6_rules = mem->size; + v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST; + IPADBG("putting Routing IPv6 rules to phys 0x%x", + v6->ipv6_addr); + } + + desc.pyld = cmd; + desc.len = size; + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + goto fail_send_cmd; + } + + __ipa_reap_sys_rt_tbls(ip); + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); + kfree(cmd); + kfree(mem); + + return 0; + +fail_send_cmd: + if (mem->base) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +fail_hw_tbl_gen: + kfree(cmd); +fail_alloc_cmd: + kfree(mem); +fail_alloc_mem: + return -EPERM; +} + +static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head) +{ + u32 hdr_sz; + u8 *hdr; + u8 *body; + u8 *base; + int max_rt_idx; + int i; + u32 *entr; + int num_index; + u32 body_start_offset; + u32 apps_start_idx; + int res; + + if (ip == IPA_IP_v4) { + num_index = IPA_MEM_PART(v4_apps_rt_index_hi) - + IPA_MEM_PART(v4_apps_rt_index_lo) + 1; + body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) - + IPA_MEM_PART(v4_rt_ofst); + apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo); + } else { + num_index = IPA_MEM_PART(v6_apps_rt_index_hi) - + IPA_MEM_PART(v6_apps_rt_index_lo) + 1; + body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) - + IPA_MEM_PART(v6_rt_ofst); + apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo); + } + + head->size = num_index * 4; + head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size, + &head->phys_base, GFP_KERNEL); + if (!head->base) { + IPAERR("fail to alloc DMA buff of size %d\n", head->size); + goto err; + } + entr = (u32 *)head->base; + hdr = (u8 *)head->base; + for (i = 1; i <= num_index; i++) { + *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; + entr++; + } + + res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx); + if (res < 0) { + IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res); + goto base_err; + } + + mem->size = res; + mem->size -= hdr_sz; + mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) & + ~IPA_RT_TABLE_MEMORY_ALLIGNMENT; + + if (mem->size > 0) { + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", + mem->size); + goto base_err; + } + } + + /* build the rt tbl in the DMA buffer to submit to IPA HW */ + body = base = (u8 *)mem->base; + + if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset, + apps_start_idx)) { + IPAERR("fail to generate RT tbl\n"); + goto proc_err; + } + + return 0; + +proc_err: + if (mem->size) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +base_err: + dma_free_coherent(ipa_ctx->pdev, head->size, head->base, + head->phys_base); +err: + return -EPERM; +} + +int __ipa_commit_rt_v2(enum ipa_ip_type ip) +{ + struct ipa_desc desc[2]; + struct ipa_mem_buffer body; + struct ipa_mem_buffer head; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u16 avail; + u32 num_modem_rt_index; + int rc = 0; + u32 local_addr1; + u32 local_addr2; + bool lcl; + + memset(desc, 0, 2 * sizeof(struct ipa_desc)); + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_rt_tbl_lcl ? + IPA_MEM_PART(apps_v4_rt_size) : + IPA_MEM_PART(v4_rt_size_ddr); + num_modem_rt_index = + IPA_MEM_PART(v4_modem_rt_index_hi) - + IPA_MEM_PART(v4_modem_rt_index_lo) + 1; + local_addr1 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_ofst) + + num_modem_rt_index * 4; + local_addr2 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_rt_ofst); + lcl = ipa_ctx->ip4_rt_tbl_lcl; + } else { + avail = ipa_ctx->ip6_rt_tbl_lcl ? + IPA_MEM_PART(apps_v6_rt_size) : + IPA_MEM_PART(v6_rt_size_ddr); + num_modem_rt_index = + IPA_MEM_PART(v6_modem_rt_index_hi) - + IPA_MEM_PART(v6_modem_rt_index_lo) + 1; + local_addr1 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_ofst) + + num_modem_rt_index * 4; + local_addr2 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_rt_ofst); + lcl = ipa_ctx->ip6_rt_tbl_lcl; + } + + if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) { + IPAERR("fail to generate RT HW TBL ip %d\n", ip); + rc = -EFAULT; + goto fail_gen; + } + + if (body.size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", body.size, avail); + rc = -EFAULT; + goto fail_send_cmd; + } + + cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + flag); + if (cmd1 == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd1->size = head.size; + cmd1->system_addr = head.phys_base; + cmd1->local_addr = local_addr1; + desc[0].opcode = IPA_DMA_SHARED_MEM; + desc[0].pyld = (void *)cmd1; + desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[0].type = IPA_IMM_CMD_DESC; + + if (lcl) { + cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + flag); + if (cmd2 == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + + cmd2->size = body.size; + cmd2->system_addr = body.phys_base; + cmd2->local_addr = local_addr2; + + desc[1].opcode = IPA_DMA_SHARED_MEM; + desc[1].pyld = (void *)cmd2; + desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[1].type = IPA_IMM_CMD_DESC; + + if (ipa_send_cmd(2, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd2; + } + } else { + if (ipa_send_cmd(1, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd1; + } + } + + IPADBG("HEAD\n"); + IPA_DUMP_BUFF(head.base, head.phys_base, head.size); + if (body.size) { + IPADBG("BODY\n"); + IPA_DUMP_BUFF(body.base, body.phys_base, body.size); + } + __ipa_reap_sys_rt_tbls(ip); + +fail_send_cmd2: + kfree(cmd2); +fail_send_cmd1: + kfree(cmd1); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base); + if (body.size) + dma_free_coherent(ipa_ctx->pdev, body.size, body.base, + body.phys_base); +fail_gen: + return rc; +} + +/** + * __ipa_find_rt_tbl() - find the routing table + * which name is given as parameter + * @ip: [in] the ip address family type of the wanted routing table + * @name: [in] the name of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name) +{ + struct ipa_rt_tbl *entry; + struct ipa_rt_tbl_set *set; + + set = &ipa_ctx->rt_tbl_set[ip]; + list_for_each_entry(entry, &set->head_rt_tbl_list, link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa2_query_rt_index() - find the routing table index + * which name and ip type are given as parameters + * @in: [out] the index of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + struct ipa_rt_tbl *entry; + + if (in->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + /* check if this table exists */ + in->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_rt_tbl(in->ip, in->name); + if (!entry) { + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + in->idx = entry->idx; + mutex_unlock(&ipa_ctx->lock); + return 0; +} + +static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, + const char *name) +{ + struct ipa_rt_tbl *entry; + struct ipa_rt_tbl_set *set; + int i; + int id; + + if (ip >= IPA_IP_MAX || name == NULL) { + IPAERR("bad parm\n"); + goto error; + } + + set = &ipa_ctx->rt_tbl_set[ip]; + /* check if this table exists */ + entry = __ipa_find_rt_tbl(ip, name); + if (!entry) { + entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc RT tbl object\n"); + goto error; + } + /* find a routing tbl index */ + for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) { + if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) { + entry->idx = i; + set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]); + break; + } + } + if (i == IPA_RT_INDEX_BITMAP_SIZE) { + IPAERR("not free RT tbl indices left\n"); + goto fail_rt_idx_alloc; + } + + INIT_LIST_HEAD(&entry->head_rt_rule_list); + INIT_LIST_HEAD(&entry->link); + strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); + entry->set = set; + entry->cookie = IPA_RT_TBL_COOKIE; + entry->in_sys = (ip == IPA_IP_v4) ? + !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl; + set->tbl_cnt++; + list_add(&entry->link, &set->head_rt_tbl_list); + + IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx, + set->tbl_cnt, ip); + + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + } + + return entry; + +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); +fail_rt_idx_alloc: + entry->cookie = 0; + kmem_cache_free(ipa_ctx->rt_tbl_cache, entry); +error: + return NULL; +} + +static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) +{ + enum ipa_ip_type ip = IPA_IP_MAX; + u32 id; + + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("bad parms\n"); + return -EINVAL; + } + id = entry->id; + if (ipa_id_find(id) == NULL) { + IPAERR_RL("lookup failed\n"); + return -EPERM; + } + + if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON(1); + return -EPERM; + } + + + if (!entry->in_sys) { + list_del(&entry->link); + clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG_LOW("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx, + entry->set->tbl_cnt); + kmem_cache_free(ipa_ctx->rt_tbl_cache, entry); + } else { + list_move(&entry->link, + &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list); + clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG_LOW("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx, + entry->set->tbl_cnt); + } + + /* remove the handle from the database */ + ipa_id_remove(id); + return 0; +} + +static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, + const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, + bool user) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + struct ipa_hdr_entry *hdr = NULL; + struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL; + int id; + + if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) { + IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n"); + goto error; + } + + if (rule->hdr_hdl) { + hdr = ipa_id_find(rule->hdr_hdl); + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { + IPAERR("rt rule does not point to valid hdr\n"); + goto error; + } + } else if (rule->hdr_proc_ctx_hdl) { + proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl); + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR("rt rule does not point to valid proc ctx\n"); + goto error; + } + } + + + tbl = __ipa_add_rt_tbl(ip, name); + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR("bad params\n"); + goto error; + } + /* + * do not allow any rules to be added at end of the "default" routing + * tables + */ + if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && + (tbl->rule_cnt > 0)) { + IPAERR_RL("cannot add rules to default rt table\n"); + goto error; + } + + entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc RT rule object\n"); + goto error; + } + INIT_LIST_HEAD(&entry->link); + entry->cookie = IPA_RT_RULE_COOKIE; + entry->rule = *rule; + entry->tbl = tbl; + entry->hdr = hdr; + entry->proc_ctx = proc_ctx; + if (at_rear) + list_add_tail(&entry->link, &tbl->head_rt_rule_list); + else + list_add(&entry->link, &tbl->head_rt_rule_list); + tbl->rule_cnt++; + if (entry->hdr) + entry->hdr->ref_cnt++; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt++; + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + IPADBG_LOW("add rt rule tbl_idx=%d", tbl->idx); + IPADBG_LOW("rule_cnt=%d\n", tbl->rule_cnt); + *rule_hdl = id; + entry->id = id; + entry->ipacm_installed = user; + + return 0; + +ipa_insert_failed: + if (entry->hdr) + entry->hdr->ref_cnt--; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt--; + list_del(&entry->link); + kmem_cache_free(ipa_ctx->rt_rule_cache, entry); +error: + return -EPERM; +} + +/** + * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + return ipa2_add_rt_rule_usr(rules, false); +} + +/** + * ipa2_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate installed by userspace module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ + int i; + int ret; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].rt_rule_hdl, + user_only)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return ret; +} + +int __ipa_del_rt_rule(u32 rule_hdl) +{ + struct ipa_rt_entry *entry; + int id; + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *hdr_proc_entry; + + entry = ipa_id_find(rule_hdl); + + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPADBG("Deleting rule from default rt table idx=%u\n", + entry->tbl->idx); + if (entry->tbl->rule_cnt == 1) { + IPAERR_RL("Default tbl last rule cannot be deleted\n"); + return -EINVAL; + } + } + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EINVAL; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EINVAL; + } + } + + if (entry->hdr) + __ipa_release_hdr(entry->hdr->id); + else if (entry->proc_ctx) + __ipa_release_hdr_proc_ctx(entry->proc_ctx->id); + list_del(&entry->link); + entry->tbl->rule_cnt--; + IPADBG_LOW("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx, + entry->tbl->rule_cnt); + if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) { + if (__ipa_del_rt_tbl(entry->tbl)) + IPAERR_RL("fail to del RT tbl\n"); + } + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->rt_rule_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + + return 0; +} + +/** + * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally + * commit to IPA HW + * @hdls: [inout] set of routing rules to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + int i; + int ret; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del rt rule %i\n", i); + hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return ret; +} + +/** + * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type + * to IPA HW + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_commit_rt(enum ipa_ip_type ip) +{ + int ret; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + /* + * issue a commit on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa2_commit_flt(ip)) + return -EPERM; + + mutex_lock(&ipa_ctx->lock); + if (ipa_ctx->ctrl->ipa_commit_rt(ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return ret; +} + +/** + * ipa2_reset_rt() - reset the current SW routing table of specified type + * (does not commit to HW) + * @ip: [in] The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_tbl *tbl_next; + struct ipa_rt_tbl_set *set; + struct ipa_rt_entry *rule; + struct ipa_rt_entry *rule_next; + struct ipa_rt_tbl_set *rset; + u32 apps_start_idx; + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *hdr_proc_entry; + int id; + bool tbl_user = false; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + if (ip == IPA_IP_v4) + apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo); + else + apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo); + } else { + apps_start_idx = 0; + } + + /* + * issue a reset on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa2_reset_flt(ip, user_only)) + IPAERR_RL("fail to reset flt ip=%d\n", ip); + + set = &ipa_ctx->rt_tbl_set[ip]; + rset = &ipa_ctx->reap_rt_tbl_set[ip]; + mutex_lock(&ipa_ctx->lock); + IPADBG("reset rt ip=%d\n", ip); + list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + tbl_user = false; + list_for_each_entry_safe(rule, rule_next, + &tbl->head_rt_rule_list, link) { + if (ipa_id_find(rule->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + /* indicate if tbl used for user-specified rules*/ + if (rule->ipacm_installed) { + IPADBG("tbl_user %d, tbl-index %d\n", + tbl_user, tbl->id); + tbl_user = true; + } + /* + * for the "default" routing tbl, remove all but the + * last rule + */ + if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) + continue; + if (!user_only || + rule->ipacm_installed) { + list_del(&rule->link); + if (rule->hdr) { + hdr_entry = ipa_id_find( + rule->rule.hdr_hdl); + if (!hdr_entry || + hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL( + "Header already deleted\n"); + mutex_unlock(&ipa_ctx->lock); + return -EINVAL; + } + } else if (rule->proc_ctx) { + hdr_proc_entry = + ipa_id_find( + rule->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != + IPA_PROC_HDR_COOKIE) { + IPAERR_RL( + "Proc entry already deleted\n"); + mutex_unlock(&ipa_ctx->lock); + return -EINVAL; + } + } + tbl->rule_cnt--; + if (rule->hdr) + __ipa_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa_release_hdr_proc_ctx( + rule->proc_ctx->id); + rule->cookie = 0; + id = rule->id; + kmem_cache_free(ipa_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + + if (ipa_id_find(tbl->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + id = tbl->id; + + /* do not remove the "default" routing tbl which has index 0 */ + if (tbl->idx != apps_start_idx) { + if (!user_only || tbl_user) { + if (!tbl->in_sys) { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, + &ipa_ctx->rt_idx_bitmap[ip]); + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + tbl->idx, set->tbl_cnt); + kmem_cache_free(ipa_ctx->rt_tbl_cache, + tbl); + } else { + list_move(&tbl->link, + &rset->head_rt_tbl_list); + clear_bit(tbl->idx, + &ipa_ctx->rt_idx_bitmap[ip]); + set->tbl_cnt--; + IPADBG("rst tbl_idx=%d cnt=%d\n", + tbl->idx, set->tbl_cnt); + } + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + } + + /* commit the change to IPA-HW */ + if (ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v4) || + ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v6)) { + IPAERR("fail to commit rt-rule\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa_ctx->lock); + return -EPERM; + } + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +/** + * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if + * it exists, if lookup succeeds the routing table ref cnt is increased + * @lookup: [inout] routing table to lookup and its handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_rt_tbl later if this function succeeds + */ +int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + struct ipa_rt_tbl *entry; + int result = -EFAULT; + + if (lookup == NULL || lookup->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_rt_tbl(lookup->ip, lookup->name); + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { + if (entry->ref_cnt == U32_MAX) { + IPAERR("fail: ref count crossed limit\n"); + goto ret; + } + entry->ref_cnt++; + lookup->hdl = entry->id; + + /* commit for get */ + if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip)) + IPAERR_RL("fail to commit RT tbl\n"); + + result = 0; + } + +ret: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_put_rt_tbl() - Release the specified routing table handle + * @rt_tbl_hdl: [in] the routing table handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_put_rt_tbl(u32 rt_tbl_hdl) +{ + struct ipa_rt_tbl *entry; + enum ipa_ip_type ip = IPA_IP_MAX; + int result = 0; + + mutex_lock(&ipa_ctx->lock); + entry = ipa_id_find(rt_tbl_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto ret; + } + + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { + IPAERR_RL("bad parms\n"); + result = -EINVAL; + goto ret; + } + + if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON(1); + result = -EINVAL; + goto ret; + } + + entry->ref_cnt--; + if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { + if (__ipa_del_rt_tbl(entry)) + IPAERR_RL("fail to del RT tbl\n"); + /* commit for put */ + if (ipa_ctx->ctrl->ipa_commit_rt(ip)) + IPAERR_RL("fail to commit RT tbl\n"); + } + + result = 0; + +ret: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + + +static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) +{ + struct ipa_rt_entry *entry; + struct ipa_hdr_entry *hdr = NULL; + struct ipa_hdr_entry *hdr_entry; + + if (rtrule->rule.hdr_hdl) { + hdr = ipa_id_find(rtrule->rule.hdr_hdl); + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid hdr\n"); + goto error; + } + } + + entry = ipa_id_find(rtrule->rt_rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPAERR_RL("Default tbl rule cannot be modified\n"); + return -EINVAL; + } + + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } + if (entry->hdr) + entry->hdr->ref_cnt--; + + entry->rule = rtrule->rule; + entry->hdr = hdr; + + if (entry->hdr) + entry->hdr->ref_cnt++; + + return 0; + +error: + return -EPERM; +} + +/** + * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + if (__ipa_mdfy_rt_rule(&hdls->rules[i])) { + IPAERR_RL("failed to mdfy rt rule %i\n", i); + hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..d07bbaad0e09fd4ecee770bd2a8a13f5f026e89a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipa +#define TRACE_INCLUDE_FILE ipa_trace + +#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _IPA_TRACE_H + +#include + +TRACE_EVENT( + intr_to_poll, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + poll_to_intr, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_enter, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_exit, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + rmnet_ipa_netifni, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netifrx, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netif_rcv_skb, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); +#endif /* _IPA_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c new file mode 100644 index 0000000000000000000000000000000000000000..6a268ca091e1ca90964852766279058044d29c89 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c @@ -0,0 +1,938 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2017, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" +#include + +#define IPA_RAM_UC_SMEM_SIZE 128 +#define IPA_HW_INTERFACE_VERSION 0x0111 +#define IPA_PKT_FLUSH_TO_US 100 +#define IPA_UC_POLL_SLEEP_USEC 100 +#define IPA_UC_POLL_MAX_RETRY 10000 +#define HOLB_WORKQUEUE_NAME "ipa_holb_wq" + +static struct workqueue_struct *ipa_holb_wq; +static void ipa_start_monitor_holb(struct work_struct *work); +static DECLARE_WORK(ipa_holb_work, ipa_start_monitor_holb); + +/** + * enum ipa_cpu_2_hw_commands - Values that represent the commands from the CPU + * IPA_CPU_2_HW_CMD_NO_OP : No operation is required. + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior + * of HW. + * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW. + * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information. + * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal + * handling. + * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state. + * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state. + * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB. + * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug. + */ +enum ipa_cpu_2_hw_commands { + IPA_CPU_2_HW_CMD_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_CPU_2_HW_CMD_UPDATE_FLAGS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_CPU_2_HW_CMD_DEBUG_GET_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_CPU_2_HW_CMD_ERR_FATAL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_CPU_2_HW_CMD_CLK_GATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5), + IPA_CPU_2_HW_CMD_CLK_UNGATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6), + IPA_CPU_2_HW_CMD_MEMCPY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7), + IPA_CPU_2_HW_CMD_RESET_PIPE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8), + IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9), +}; + +/** + * enum ipa_hw_2_cpu_responses - Values that represent common HW responses + * to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once + * boot sequence is completed and HW is ready to serve commands from CPU + * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands + */ +enum ipa_hw_2_cpu_responses { + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), +}; + +/** + * enum ipa_hw_2_cpu_events - Values that represent HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the + * device + * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information + */ +enum ipa_hw_2_cpu_events { + IPA_HW_2_CPU_EVENT_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_EVENT_LOG_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), +}; + +/** + * enum ipa_hw_errors - Common error types. + * @IPA_HW_ERROR_NONE : No error persists + * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell + * @IPA_HW_DMA_ERROR : Unexpected DMA error + * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset. + * @IPA_HW_INVALID_OPCODE : Invalid opcode sent + * @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error + */ +enum ipa_hw_errors { + IPA_HW_ERROR_NONE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_INVALID_DOORBELL_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_DMA_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_HW_FATAL_SYSTEM_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_HW_INVALID_OPCODE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_HW_ZIP_ENGINE_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5) +}; + +/** + * struct IpaHwResetPipeCmdData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_MEMCPY command. + * + * The parameters are passed as immediate params in the shared memory + */ +struct IpaHwMemCopyData_t { + u32 destination_addr; + u32 source_addr; + u32 dest_buffer_size; + u32 source_buffer_size; +}; + +/** + * union IpaHwResetPipeCmdData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_RESET_PIPE command. + * @pipeNum : Pipe number to be reset + * @direction : 1 - IPA Producer, 0 - IPA Consumer + * @reserved_02_03 : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwResetPipeCmdData_t { + struct IpaHwResetPipeCmdParams_t { + u8 pipeNum; + u8 direction; + u32 reserved_02_03; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwmonitorHolbCmdData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING command. + * @monitorPipe : Indication whether to monitor the pipe. 0 – Do not Monitor + * Pipe, 1 – Monitor Pipe + * @pipeNum : Pipe to be monitored/not monitored + * @reserved_02_03 : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwmonitorHolbCmdData_t { + struct IpaHwmonitorHolbCmdParams_t { + u8 monitorPipe; + u8 pipeNum; + u32 reserved_02_03:16; + } __packed params; + u32 raw32b; +} __packed; + + +/** + * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters + * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response. + * @originalCmdOp : The original command opcode + * @status : 0 for success indication, otherwise failure + * @reserved : Reserved + * + * Parameters are sent as 32b immediate parameters. + */ +union IpaHwCpuCmdCompletedResponseData_t { + struct IpaHwCpuCmdCompletedResponseParams_t { + u32 originalCmdOp:8; + u32 status:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwErrorEventData_t - HW->CPU Common Events + * @errorType : Entered when a system error is detected by the HW. Type of + * error is specified by IPA_HW_ERRORS + * @reserved : Reserved + */ +union IpaHwErrorEventData_t { + struct IpaHwErrorEventParams_t { + u32 errorType:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command + * @newFlags: SW flags defined the behavior of HW. + * This field is expected to be used as bitmask for enum ipa_hw_flags + */ +union IpaHwUpdateFlagsCmdData_t { + struct IpaHwUpdateFlagsCmdParams_t { + u32 newFlags; + } params; + u32 raw32b; +}; + +static struct ipa_uc_hdlrs uc_hdlrs[IPA_HW_NUM_FEATURES] = { { NULL } }; + +static inline const char *ipa_hw_error_str(enum ipa_hw_errors err_type) +{ + const char *str; + + switch (err_type) { + case IPA_HW_ERROR_NONE: + str = "IPA_HW_ERROR_NONE"; + break; + case IPA_HW_INVALID_DOORBELL_ERROR: + str = "IPA_HW_INVALID_DOORBELL_ERROR"; + break; + case IPA_HW_FATAL_SYSTEM_ERROR: + str = "IPA_HW_FATAL_SYSTEM_ERROR"; + break; + case IPA_HW_INVALID_OPCODE: + str = "IPA_HW_INVALID_OPCODE"; + break; + case IPA_HW_ZIP_ENGINE_ERROR: + str = "IPA_HW_ZIP_ENGINE_ERROR"; + break; + default: + str = "INVALID ipa_hw_errors type"; + } + + return str; +} + +static void ipa_log_evt_hdlr(void) +{ + int i; + + if (!ipa_ctx->uc_ctx.uc_event_top_ofst) { + ipa_ctx->uc_ctx.uc_event_top_ofst = + ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; + if (ipa_ctx->uc_ctx.uc_event_top_ofst + + sizeof(struct IpaHwEventLogInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_top 0x%x outside SRAM\n", + ipa_ctx->uc_ctx.uc_event_top_ofst); + goto bad_uc_top_ofst; + } + + ipa_ctx->uc_ctx.uc_event_top_mmio = ioremap( + ipa_ctx->ipa_wrapper_base + + ipa_ctx->uc_ctx.uc_event_top_ofst, + sizeof(struct IpaHwEventLogInfoData_t)); + if (!ipa_ctx->uc_ctx.uc_event_top_mmio) { + IPAERR("fail to ioremap uc top\n"); + goto bad_uc_top_ofst; + } + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (uc_hdlrs[i].ipa_uc_event_log_info_hdlr) + uc_hdlrs[i].ipa_uc_event_log_info_hdlr + (ipa_ctx->uc_ctx.uc_event_top_mmio); + } + } else { + + if (ipa_ctx->uc_ctx.uc_sram_mmio->eventParams != + ipa_ctx->uc_ctx.uc_event_top_ofst) { + IPAERR("uc top ofst changed new=%u cur=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventParams, + ipa_ctx->uc_ctx.uc_event_top_ofst); + } + } + return; + +bad_uc_top_ofst: + ipa_ctx->uc_ctx.uc_event_top_ofst = 0; +} + +/** + * ipa2_uc_state_check() - Check the status of the uC interface + * + * Return value: 0 if the uC is loaded, interface is initialized + * and there was no recent failure in one of the commands. + * A negative value is returned otherwise. + */ +int ipa2_uc_state_check(void) +{ + if (!ipa_ctx->uc_ctx.uc_inited) { + IPAERR("uC interface not initialized\n"); + return -EFAULT; + } + + if (!ipa_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded\n"); + return -EFAULT; + } + + if (ipa_ctx->uc_ctx.uc_failed) { + IPAERR("uC has failed its last command\n"); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(ipa2_uc_state_check); + +/** + * ipa_uc_loaded_check() - Check the uC has been loaded + * + * Return value: 1 if the uC is loaded, 0 otherwise + */ +int ipa_uc_loaded_check(void) +{ + return ipa_ctx->uc_ctx.uc_loaded; +} +EXPORT_SYMBOL(ipa_uc_loaded_check); + +static void ipa_uc_event_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwErrorEventData_t evt; + u8 feature; + + WARN_ON(private_data != ipa_ctx); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + IPADBG("uC evt opcode=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + + + feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + /* Feature specific handling */ + if (uc_hdlrs[feature].ipa_uc_event_hdlr) + uc_hdlrs[feature].ipa_uc_event_hdlr + (ipa_ctx->uc_ctx.uc_sram_mmio); + + /* General handling */ + if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_ERROR) { + evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; + IPAERR("uC Error, evt errorType = %s\n", + ipa_hw_error_str(evt.params.errorType)); + ipa_ctx->uc_ctx.uc_failed = true; + ipa_ctx->uc_ctx.uc_error_type = evt.params.errorType; + if (evt.params.errorType == IPA_HW_ZIP_ENGINE_ERROR) { + IPAERR("IPA has encountered a ZIP engine error\n"); + ipa_ctx->uc_ctx.uc_zip_error = true; + } + ipa_assert(); + } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_LOG_INFO) { + IPADBG("uC evt log info ofst=0x%x\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventParams); + ipa_log_evt_hdlr(); + } else { + IPADBG("unsupported uC evt opcode=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +} + +static int ipa_uc_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int result = 0; + struct ipa_active_client_logging_info log_info; + + IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr); + + result = ipa2_uc_state_check(); + if (result) + goto fail; + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + if (ipa2_inc_client_enable_clks_no_block(&log_info)) + goto fail; + + ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp = + IPA_CPU_2_HW_CMD_ERR_FATAL; + /* ensure write to shared memory is done before triggering uc */ + wmb(); + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1); + /* give uc enough time to save state */ + udelay(IPA_PKT_FLUSH_TO_US); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("err_fatal issued\n"); + +fail: + return NOTIFY_DONE; +} + +static struct notifier_block ipa_uc_panic_blk = { + .notifier_call = ipa_uc_panic_notifier, +}; + +void ipa_register_panic_hdlr(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &ipa_uc_panic_blk); +} + +static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + u8 feature; + int res; + int i; + + WARN_ON(private_data != ipa_ctx); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + IPADBG("uC rsp opcode=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); + + feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + + /* Feature specific handling */ + if (uc_hdlrs[feature].ipa_uc_response_hdlr) { + res = uc_hdlrs[feature].ipa_uc_response_hdlr( + ipa_ctx->uc_ctx.uc_sram_mmio, + &ipa_ctx->uc_ctx.uc_status); + if (res == 0) { + IPADBG("feature %d specific response handler\n", + feature); + complete_all(&ipa_ctx->uc_ctx.uc_completion); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + } + + /* General handling */ + if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) { + ipa_ctx->uc_ctx.uc_loaded = true; + IPAERR("IPA uC loaded\n"); + /* + * The proxy vote is held until uC is loaded to ensure that + * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received. + */ + ipa2_proxy_clk_unvote(); + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (uc_hdlrs[i].ipa_uc_loaded_hdlr) + uc_hdlrs[i].ipa_uc_loaded_hdlr(); + } + /* Queue the work to enable holb monitoring on IPA-USB Producer + * pipe if valid. + */ + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) + queue_work(ipa_holb_wq, &ipa_holb_work); + } else if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->responseParams; + IPADBG("uC cmd response opcode=%u status=%u\n", + uc_rsp.params.originalCmdOp, + uc_rsp.params.status); + if (uc_rsp.params.originalCmdOp == + ipa_ctx->uc_ctx.pending_cmd) { + ipa_ctx->uc_ctx.uc_status = uc_rsp.params.status; + complete_all(&ipa_ctx->uc_ctx.uc_completion); + } else { + IPAERR("Expected cmd=%u rcvd cmd=%u\n", + ipa_ctx->uc_ctx.pending_cmd, + uc_rsp.params.originalCmdOp); + } + } else { + IPAERR("Unsupported uC rsp opcode = %u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa_uc_interface_init() - Initialize the interface with the uC + * + * Return value: 0 on success, negative value otherwise + */ +int ipa_uc_interface_init(void) +{ + int result; + unsigned long phys_addr; + + if (ipa_ctx->uc_ctx.uc_inited) { + IPADBG("uC interface already initialized\n"); + return 0; + } + + ipa_holb_wq = create_singlethread_workqueue( + HOLB_WORKQUEUE_NAME); + if (!ipa_holb_wq) { + IPAERR("HOLB workqueue creation failed\n"); + return -ENOMEM; + } + + mutex_init(&ipa_ctx->uc_ctx.uc_lock); + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_SW_FIRST_v2_5; + } else { + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0( + ipa_ctx->smem_restricted_bytes / 4); + } + ipa_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr, + IPA_RAM_UC_SMEM_SIZE); + if (!ipa_ctx->uc_ctx.uc_sram_mmio) { + IPAERR("Fail to ioremap IPA uC SRAM\n"); + result = -ENOMEM; + goto remap_fail; + } + + result = ipa2_add_interrupt_handler(IPA_UC_IRQ_0, + ipa_uc_event_handler, true, + ipa_ctx); + if (result) { + IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail0; + } + + result = ipa2_add_interrupt_handler(IPA_UC_IRQ_1, + ipa_uc_response_hdlr, true, + ipa_ctx); + if (result) { + IPAERR("fail to register for UC_IRQ1 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail1; + } + + ipa_ctx->uc_ctx.uc_inited = true; + + IPADBG("IPA uC interface is initialized\n"); + return 0; + +irq_fail1: + ipa2_remove_interrupt_handler(IPA_UC_IRQ_0); +irq_fail0: + iounmap(ipa_ctx->uc_ctx.uc_sram_mmio); +remap_fail: + return result; +} +EXPORT_SYMBOL(ipa_uc_interface_init); + +/** + * ipa_uc_send_cmd() - Send a command to the uC + * + * Note: In case the operation times out (No response from the uC) or + * polling maximal amount of retries has reached, the logic + * considers it as an invalid state of the uC/IPA, and + * issues a kernel panic. + * + * Returns: 0 on success. + * -EINVAL in case of invalid input. + * -EBADF in case uC interface is not initialized / + * or the uC has failed previously. + * -EFAULT in case the received status doesn't match + * the expected. + */ +int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies) +{ + int index; + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + int retries = 0; + + mutex_lock(&ipa_ctx->uc_ctx.uc_lock); + + if (ipa2_uc_state_check()) { + IPADBG("uC send command aborted\n"); + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + return -EBADF; + } + +send_cmd: + init_completion(&ipa_ctx->uc_ctx.uc_completion); + + ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd; + ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode; + ipa_ctx->uc_ctx.pending_cmd = opcode; + + ipa_ctx->uc_ctx.uc_sram_mmio->responseOp = 0; + ipa_ctx->uc_ctx.uc_sram_mmio->responseParams = 0; + + ipa_ctx->uc_ctx.uc_status = 0; + + /* ensure write to shared memory is done before triggering uc */ + wmb(); + + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1); + + if (polling_mode) { + for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) { + if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = + ipa_ctx->uc_ctx.uc_sram_mmio->responseParams; + if (uc_rsp.params.originalCmdOp == + ipa_ctx->uc_ctx.pending_cmd) { + ipa_ctx->uc_ctx.pending_cmd = -1; + break; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC); + } + + if (index == IPA_UC_POLL_MAX_RETRY) { + IPAERR("uC max polling retries reached\n"); + if (ipa_ctx->uc_ctx.uc_failed) { + IPAERR("uC reported on Error, errorType = %s\n", + ipa_hw_error_str( + ipa_ctx->uc_ctx.uc_error_type)); + } + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + ipa_assert(); + return -EFAULT; + } + } else { + if (wait_for_completion_timeout(&ipa_ctx->uc_ctx.uc_completion, + timeout_jiffies) == 0) { + IPAERR("uC timed out\n"); + if (ipa_ctx->uc_ctx.uc_failed) { + IPAERR("uC reported on Error,errorType = %s\n", + ipa_hw_error_str( + ipa_ctx->uc_ctx.uc_error_type)); + } + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + ipa_assert(); + return -EFAULT; + } + } + + if (ipa_ctx->uc_ctx.uc_status != expected_status) { + if (IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR == + ipa_ctx->uc_ctx.uc_status) { + retries++; + if (retries == IPA_BAM_STOP_MAX_RETRY) { + IPAERR("Failed after %d tries\n", retries); + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + /* + * Max retry reached, + * assert to check why cmd send failed. + */ + ipa_assert(); + } else { + /* sleep for short period to flush IPA */ + usleep_range(IPA_UC_WAIT_MIN_SLEEP, + IPA_UC_WAII_MAX_SLEEP); + goto send_cmd; + } + } + + IPAERR("Recevied status %u, Expected status %u\n", + ipa_ctx->uc_ctx.uc_status, expected_status); + ipa_ctx->uc_ctx.pending_cmd = -1; + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + return -EFAULT; + } + + ipa_ctx->uc_ctx.pending_cmd = -1; + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + + IPADBG("uC cmd %u send succeeded\n", opcode); + + return 0; +} +EXPORT_SYMBOL(ipa_uc_send_cmd); + +/** + * ipa_uc_register_handlers() - Registers event, response and log event + * handlers for a specific feature.Please note + * that currently only one handler can be + * registered per feature. + * + * Return value: None + */ +void ipa_uc_register_handlers(enum ipa_hw_features feature, + struct ipa_uc_hdlrs *hdlrs) +{ + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Feature %u is invalid, not registering hdlrs\n", + feature); + return; + } + + mutex_lock(&ipa_ctx->uc_ctx.uc_lock); + uc_hdlrs[feature] = *hdlrs; + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + + IPADBG("uC handlers registered for feature %u\n", feature); +} +EXPORT_SYMBOL(ipa_uc_register_handlers); + +/** + * ipa_uc_reset_pipe() - reset a BAM pipe using the uC interface + * @ipa_client: [in] ipa client handle representing the pipe + * + * The function uses the uC interface in order to issue a BAM + * PIPE reset request. The uC makes sure there's no traffic in + * the TX command queue before issuing the reset. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_reset_pipe(enum ipa_client_type ipa_client) +{ + union IpaHwResetPipeCmdData_t cmd; + int ep_idx; + int ret; + + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPAERR("Invalid IPA client\n"); + return 0; + } + + /* + * If the uC interface has not been initialized yet, + * continue with the sequence without resetting the + * pipe. + */ + if (ipa2_uc_state_check()) { + IPADBG("uC interface will not be used to reset %s pipe %d\n", + IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", + ep_idx); + return 0; + } + + /* + * IPA consumer = 0, IPA producer = 1. + * IPA driver concept of PROD/CONS is the opposite of the + * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER, + * and vice-versa. + */ + cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1); + cmd.params.pipeNum = (u8)ep_idx; + + IPADBG("uC pipe reset on IPA %s pipe %d\n", + IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx); + + ret = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0, + false, 10*HZ); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_reset_pipe); + +/** + * ipa_uc_monitor_holb() - Enable/Disable holb monitoring of a producer pipe. + * @ipa_client: [in] ipa client handle representing the pipe + * + * The function uses the uC interface in order to disable/enable holb + * monitoring. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable) +{ + union IpaHwmonitorHolbCmdData_t cmd; + int ep_idx; + int ret; + + /* + * HOLB monitoring is applicable to 2.6L. + * And also could be enabled from dtsi node. + */ + if (ipa_ctx->ipa_hw_type != IPA_HW_v2_6L || + !ipa_ctx->ipa_uc_monitor_holb) { + IPADBG("Not applicable on this target\n"); + return 0; + } + + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPAERR("Invalid IPA client\n"); + return 0; + } + + /* + * If the uC interface has not been initialized yet, + * continue with the sequence without resetting the + * pipe. + */ + if (ipa2_uc_state_check()) { + IPADBG("uC interface will not be used to reset %s pipe %d\n", + IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", + ep_idx); + return 0; + } + + /* + * IPA consumer = 0, IPA producer = 1. + * IPA driver concept of PROD/CONS is the opposite of the + * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER, + * and vice-versa. + */ + cmd.params.monitorPipe = (u8)(enable ? 1 : 0); + cmd.params.pipeNum = (u8)ep_idx; + + IPADBG("uC holb monitoring on IPA pipe %d, Enable: %d\n", + ep_idx, enable); + + ret = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING, 0, + false, 10*HZ); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_monitor_holb); + +/** + * ipa_start_monitor_holb() - Send HOLB command to monitor IPA-USB + * producer pipe. + * + * This function is called after uc is loaded to start monitoring + * IPA pipe towrds USB in case if USB is already connected. + * + * Return codes: + * None + */ +static void ipa_start_monitor_holb(struct work_struct *work) +{ + IPADBG("starting holb monitoring on IPA_CLIENT_USB_CONS\n"); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_uc_monitor_holb(IPA_CLIENT_USB_CONS, true); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + + +/** + * ipa_uc_notify_clk_state() - notify to uC of clock enable / disable + * @enabled: true if clock are enabled + * + * The function uses the uC interface in order to notify uC before IPA clocks + * are disabled to make sure uC is not in the middle of operation. + * Also after clocks are enabled ned to notify uC to start processing. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_notify_clk_state(bool enabled) +{ + u32 opcode; + + /* + * If the uC interface has not been initialized yet, + * don't notify the uC on the enable/disable + */ + if (ipa2_uc_state_check()) { + IPADBG("uC interface will not notify the UC on clock state\n"); + return 0; + } + + IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE"); + + opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE : + IPA_CPU_2_HW_CMD_CLK_GATE; + + return ipa_uc_send_cmd(0, opcode, 0, true, 0); +} +EXPORT_SYMBOL(ipa_uc_notify_clk_state); + +/** + * ipa_uc_update_hw_flags() - send uC the HW flags to be used + * @flags: This field is expected to be used as bitmask for enum ipa_hw_flags + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_update_hw_flags(u32 flags) +{ + union IpaHwUpdateFlagsCmdData_t cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.newFlags = flags; + return ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0, + false, HZ); +} +EXPORT_SYMBOL(ipa_uc_update_hw_flags); + +/** + * ipa_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMemCopyData_t *cmd; + + IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len); + mem.size = sizeof(cmd); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + cmd = (struct IpaHwMemCopyData_t *)mem.base; + memset(cmd, 0, sizeof(*cmd)); + cmd->destination_addr = dest; + cmd->dest_buffer_size = len; + cmd->source_addr = src; + cmd->source_buffer_size = len; + res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0, + true, 10 * HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto free_coherent; + } + + res = 0; +free_coherent: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c new file mode 100644 index 0000000000000000000000000000000000000000..2a7cada8cce1fe46151f45df7d926416ddcd1009 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c @@ -0,0 +1,959 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_i.h" + +/* MHI uC interface definitions */ +#define IPA_HW_INTERFACE_MHI_VERSION 0x0004 + +#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2 +#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2 +#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1) + +/** + * Values that represent the MHI commands from CPU to IPA HW. + * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready + * to serve MHI transfers. Once initialization was completed HW shall + * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * IPA_HW_MHI_CHANNEL_STATE_ENABLE + * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel + * processing state following host request. Once operation was completed + * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization. + * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing. + */ +enum ipa_cpu_2_hw_mhi_commands { + IPA_CPU_2_HW_CMD_MHI_INIT + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3), + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5) +}; + +/** + * Values that represent MHI related HW responses to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to + * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands. + */ +enum ipa_hw_2_cpu_mhi_responses { + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), +}; + +/** + * Values that represent MHI related HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an + * error in an element from the transfer ring associated with the channel + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam + * interrupt was asserted when MHI engine is suspended + */ +enum ipa_hw_2_cpu_mhi_events { + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), +}; + +/** + * Channel error types. + * @IPA_HW_CHANNEL_ERROR_NONE: No error persists. + * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected + */ +enum ipa_hw_channel_errors { + IPA_HW_CHANNEL_ERROR_NONE, + IPA_HW_CHANNEL_INVALID_RE_ERROR +}; + +/** + * MHI error types. + * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space + * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array + * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array + * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on + * secondary event ring + * @IPA_HW_LINK_ERROR: Link error + */ +enum ipa_hw_mhi_errors { + IPA_HW_INVALID_MMIO_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_INVALID_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_HW_INVALID_EVENT_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_HW_NO_ED_IN_RING_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_HW_LINK_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5), +}; + + +/** + * Structure referring to the common and MHI section of 128B shared memory + * located in offset zero of SW Partition in IPA SRAM. + * The shared memory is used for communication between IPA HW and CPU. + * @common: common section in IPA SRAM + * @interfaceVersionMhi: The MHI interface version as reported by HW + * @mhiState: Overall MHI state + * @reserved_2B: reserved + * @mhiCnl0State: State of MHI channel 0. + * The state carries information regarding the error type. + * See IPA_HW_MHI_CHANNEL_STATES. + * @mhiCnl0State: State of MHI channel 1. + * @mhiCnl0State: State of MHI channel 2. + * @mhiCnl0State: State of MHI channel 3 + * @mhiCnl0State: State of MHI channel 4. + * @mhiCnl0State: State of MHI channel 5. + * @mhiCnl0State: State of MHI channel 6. + * @mhiCnl0State: State of MHI channel 7. + * @reserved_37_34: reserved + * @reserved_3B_38: reserved + * @reserved_3F_3C: reserved + */ +struct IpaHwSharedMemMhiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u16 interfaceVersionMhi; + u8 mhiState; + u8 reserved_2B; + u8 mhiCnl0State; + u8 mhiCnl1State; + u8 mhiCnl2State; + u8 mhiCnl3State; + u8 mhiCnl4State; + u8 mhiCnl5State; + u8 mhiCnl6State; + u8 mhiCnl7State; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; +}; + + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command. + * Parameters are sent as pointer thus should be reside in address accessible + * to HW. + * @msiAddress: The MSI base (in device space) used for asserting the interrupt + * (MSI) associated with the event ring + * mmioBaseAddress: The address (in device space) of MMIO structure in + * host space + * deviceMhiCtrlBaseAddress: Base address of the memory region in the device + * address space where the MHI control data structures are allocated by + * the host, including channel context array, event context array, + * and rings. This value is used for host/device address translation. + * deviceMhiDataBaseAddress: Base address of the memory region in the device + * address space where the MHI data buffers are allocated by the host. + * This value is used for host/device address translation. + * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel + * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this + * event ring. + */ +struct IpaHwMhiInitCmdData_t { + u32 msiAddress; + u32 mmioBaseAddress; + u32 deviceMhiCtrlBaseAddress; + u32 deviceMhiDataBaseAddress; + u32 firstChannelIndex; + u32 firstEventRingIndex; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + * command. Parameters are sent as 32b immediate parameters. + * @hannelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is + * used as an index in channel context array structures. + * @bamPipeId: The BAM pipe number for pipe dedicated for this channel + * @channelDirection: The direction of the channel as defined in the channel + * type field (CHTYPE) in the channel context data structure. + * @reserved: reserved. + */ +union IpaHwMhiInitChannelCmdData_t { + struct IpaHwMhiInitChannelCmdParams_t { + u32 channelHandle:8; + u32 contexArrayIndex:8; + u32 bamPipeId:6; + u32 channelDirection:2; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command. + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwMhiMsiCmdData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command. + * Parameters are sent as 32b immediate parameters. + * @requestedState: The requested channel state as was indicated from Host. + * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @LPTransitionRejected: Indication that low power state transition was + * rejected + * @reserved: reserved + */ +union IpaHwMhiChangeChannelStateCmdData_t { + struct IpaHwMhiChangeChannelStateCmdParams_t { + u32 requestedState:8; + u32 channelHandle:8; + u32 LPTransitionRejected:8; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiStopEventUpdateData_t { + struct IpaHwMhiStopEventUpdateDataParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response. + * Parameters are sent as 32b immediate parameters. + * @state: The new channel state. In case state is not as requested this is + * error indication for the last command + * @channelHandle: The channel identifier + * @additonalParams: For stop: the number of pending bam descriptors currently + * queued + */ +union IpaHwMhiChangeChannelStateResponseData_t { + struct IpaHwMhiChangeChannelStateResponseParams_t { + u32 state:8; + u32 channelHandle:8; + u32 additonalParams:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event. + * Parameters are sent as 32b immediate parameters. + * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelErrorEventData_t { + struct IpaHwMhiChannelErrorEventParams_t { + u32 errorType:8; + u32 channelHandle:8; + u32 reserved:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelWakeupEventData_t { + struct IpaHwMhiChannelWakeupEventParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the MHI Common statistics + * @numULDLSync: Number of times UL activity trigged due to DL activity + * @numULTimerExpired: Number of times UL Accm Timer expired + */ +struct IpaHwStatsMhiCmnInfoData_t { + u32 numULDLSync; + u32 numULTimerExpired; + u32 numChEvCtxWpRead; + u32 reserved; +}; + +/** + * Structure holding the MHI Channel statistics + * @doorbellInt: The number of doorbell int + * @reProccesed: The number of ring elements processed + * @bamFifoFull: Number of times Bam Fifo got full + * @bamFifoEmpty: Number of times Bam Fifo got empty + * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75% + * @bamFifoUsageLow: Number of times Bam fifo usage went below 25% + * @bamInt: Number of BAM Interrupts + * @ringFull: Number of times Transfer Ring got full + * @ringEmpty: umber of times Transfer Ring got empty + * @ringUsageHigh: Number of times Transfer Ring usage went above 75% + * @ringUsageLow: Number of times Transfer Ring usage went below 25% + * @delayedMsi: Number of times device triggered MSI to host after + * Interrupt Moderation Timer expiry + * @immediateMsi: Number of times device triggered MSI to host immediately + * @thresholdMsi: Number of times device triggered MSI due to max pending + * events threshold reached + * @numSuspend: Number of times channel was suspended + * @numResume: Number of times channel was suspended + * @num_OOB: Number of times we indicated that we are OOB + * @num_OOB_timer_expiry: Number of times we indicated that we are OOB + * after timer expiry + * @num_OOB_moderation_timer_start: Number of times we started timer after + * sending OOB and hitting OOB again before we processed threshold + * number of packets + * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode + */ +struct IpaHwStatsMhiCnlInfoData_t { + u32 doorbellInt; + u32 reProccesed; + u32 bamFifoFull; + u32 bamFifoEmpty; + u32 bamFifoUsageHigh; + u32 bamFifoUsageLow; + u32 bamInt; + u32 ringFull; + u32 ringEmpty; + u32 ringUsageHigh; + u32 ringUsageLow; + u32 delayedMsi; + u32 immediateMsi; + u32 thresholdMsi; + u32 numSuspend; + u32 numResume; + u32 num_OOB; + u32 num_OOB_timer_expiry; + u32 num_OOB_moderation_timer_start; + u32 num_db_mode_evt; +}; + +/** + * Structure holding the MHI statistics + * @mhiCmnStats: Stats pertaining to MHI + * @mhiCnlStats: Stats pertaining to each channel + */ +struct IpaHwStatsMhiInfoData_t { + struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats; + struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; +}; + +/** + * Structure holding the MHI Common Config info + * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled + * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is + * enabled + * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events + * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events + */ +struct IpaHwConfigMhiCmnInfoData_t { + u8 isDlUlSyncEnabled; + u8 UlAccmVal; + u8 ulMsiEventThreshold; + u8 dlMsiEventThreshold; +}; + +/** + * Structure holding the parameters for MSI info data + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwConfigMhiMsiInfoData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the MHI Channel Config info + * @transferRingSize: The Transfer Ring size in terms of Ring Elements + * @transferRingIndex: The Transfer Ring channel number as defined by host + * @eventRingIndex: The Event Ring Index associated with this Transfer Ring + * @bamPipeIndex: The BAM Pipe associated with this channel + * @isOutChannel: Indication for the direction of channel + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiCnlInfoData_t { + u16 transferRingSize; + u8 transferRingIndex; + u8 eventRingIndex; + u8 bamPipeIndex; + u8 isOutChannel; + u8 reserved_0; + u8 reserved_1; +}; + +/** + * Structure holding the MHI Event Config info + * @msiVec: msi vector to invoke MSI interrupt + * @intmodtValue: Interrupt moderation timer (in milliseconds) + * @eventRingSize: The Event Ring size in terms of Ring Elements + * @eventRingIndex: The Event Ring number as defined by host + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + * @reserved_2: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiEventInfoData_t { + u32 msiVec; + u16 intmodtValue; + u16 eventRingSize; + u8 eventRingIndex; + u8 reserved_0; + u8 reserved_1; + u8 reserved_2; +}; + +/** + * Structure holding the MHI Config info + * @mhiCmnCfg: Common Config pertaining to MHI + * @mhiMsiCfg: Config pertaining to MSI config + * @mhiCnlCfg: Config pertaining to each channel + * @mhiEvtCfg: Config pertaining to each event Ring + */ +struct IpaHwConfigMhiInfoData_t { + struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg; + struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg; + struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; + struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[ + IPA_HW_MAX_NUMBER_OF_EVENTRINGS]; +}; + + +struct ipa_uc_mhi_ctx { + u8 expected_responseOp; + u32 expected_responseParams; + void (*ready_cb)(void); + void (*wakeup_request_cb)(void); + u32 mhi_uc_stats_ofst; + struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio; +}; + +#define PRINT_COMMON_STATS(x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x)) + +#define PRINT_CHANNEL_STATS(ch, x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x)) + +struct ipa_uc_mhi_ctx *ipa_uc_mhi_ctx; + +static int ipa_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio, u32 *uc_status) +{ + IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp); + if (uc_sram_mmio->responseOp == ipa_uc_mhi_ctx->expected_responseOp && + uc_sram_mmio->responseParams == + ipa_uc_mhi_ctx->expected_responseParams) { + *uc_status = 0; + return 0; + } + return -EINVAL; +} + +static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) +{ + if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) { + union IpaHwMhiChannelErrorEventData_t evt; + + IPAERR("Channel error\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPAERR("errorType=%d channelHandle=%d reserved=%d\n", + evt.params.errorType, evt.params.channelHandle, + evt.params.reserved); + } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) { + union IpaHwMhiChannelWakeupEventData_t evt; + + IPADBG("WakeUp channel request\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("channelHandle=%d reserved=%d\n", + evt.params.channelHandle, evt.params.reserved); + ipa_uc_mhi_ctx->wakeup_request_cb(); + } +} + +static void ipa_uc_mhi_event_log_info_hdlr( + struct IpaHwEventLogInfoData_t *uc_event_top_mmio) + +{ + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) { + IPAERR("MHI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + +if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].params.size != + sizeof(struct IpaHwStatsMhiInfoData_t)) { + IPAERR("mhi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsMhiInfoData_t), + uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].params.size + ); + return; +} + +ipa_uc_mhi_ctx->mhi_uc_stats_ofst = +uc_event_top_mmio->statsInfo.baseAddrOffset + +uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].params.offset; +IPAERR("MHI stats ofst=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst); + + if (ipa_uc_mhi_ctx->mhi_uc_stats_ofst + + sizeof(struct IpaHwStatsMhiInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_mhi_stats 0x%x outside SRAM\n", + ipa_uc_mhi_ctx->mhi_uc_stats_ofst); + return; + } + + ipa_uc_mhi_ctx->mhi_uc_stats_mmio = + ioremap(ipa_ctx->ipa_wrapper_base + + ipa_uc_mhi_ctx->mhi_uc_stats_ofst, + sizeof(struct IpaHwStatsMhiInfoData_t)); + if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("fail to ioremap uc mhi stats\n"); + return; + } +} + +int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) +{ + struct ipa_uc_hdlrs hdlrs; + + if (ipa_uc_mhi_ctx) { + IPAERR("Already initialized\n"); + return -EFAULT; + } + + ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL); + if (!ipa_uc_mhi_ctx) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + ipa_uc_mhi_ctx->ready_cb = ready_cb; + ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb; + + memset(&hdlrs, 0, sizeof(hdlrs)); + hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb; + hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr; + hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr; + hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr; + ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs); + + IPADBG("Done\n"); + return 0; +} + +void ipa2_uc_mhi_cleanup(void) +{ + struct ipa_uc_hdlrs null_hdlrs = { NULL }; + + IPADBG("Enter\n"); + + if (!ipa_uc_mhi_ctx) { + IPAERR("ipa3_uc_mhi_ctx is not initialized\n"); + return; + } + ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs); + kfree(ipa_uc_mhi_ctx); + ipa_uc_mhi_ctx = NULL; + + IPADBG("Done\n"); +} + +int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMhiInitCmdData_t *init_cmd_data; + struct IpaHwMhiMsiCmdData_t *msi_cmd; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa_uc_update_hw_flags(0); + if (res) { + IPAERR("ipa_uc_update_hw_flags failed %d\n", res); + goto disable_clks; + } + + mem.size = sizeof(*init_cmd_data); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; + init_cmd_data->msiAddress = msi->addr_low; + init_cmd_data->mmioBaseAddress = mmio_addr; + init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; + init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; + init_cmd_data->firstChannelIndex = first_ch_idx; + init_cmd_data->firstEventRingIndex = first_evt_idx; + res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, + false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = sizeof(*msi_cmd); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + + msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; + msi_cmd->msiAddress_hi = msi->addr_hi; + msi_cmd->msiAddress_low = msi->addr_low; + msi_cmd->msiData = msi->data; + msi_cmd->msiMask = msi->mask; + res = ipa_uc_send_cmd((u32)mem.phys_base, + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; + +} + +int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection) + +{ + int res; + union IpaHwMhiInitChannelCmdData_t init_cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa_ctx->ipa_num_pipes) { + IPAERR("Invalid ipa_ep_idx.\n"); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&init_cmd, 0, sizeof(init_cmd)); + init_cmd.params.channelHandle = channelHandle; + init_cmd.params.contexArrayIndex = contexArrayIndex; + init_cmd.params.bamPipeId = ipa_ep_idx; + init_cmd.params.channelDirection = channelDirection; + + res = ipa_uc_send_cmd(init_cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + + +int ipa2_uc_mhi_reset_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + cmd.params.channelHandle = channelHandle; + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_suspend_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + cmd.params.channelHandle = channelHandle; + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; + cmd.params.channelHandle = channelHandle; + cmd.params.LPTransitionRejected = LPTransitionRejected; + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_stop_event_update_channel(int channelHandle) +{ + union IpaHwMhiStopEventUpdateData_t cmd; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.channelHandle = channelHandle; + + ipa_uc_mhi_ctx->expected_responseOp = + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; + ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b; + + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd) +{ + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", + cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal); + IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", + cmd->params.ulMsiEventThreshold, + cmd->params.dlMsiEventThreshold); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa_uc_send_cmd(cmd->raw32b, + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_print_stats(char *dbg_buff, int size) +{ + int nBytes = 0; + int i; + + if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("MHI uc stats is not valid\n"); + return 0; + } + + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Common Stats:\n"); + PRINT_COMMON_STATS(numULDLSync); + PRINT_COMMON_STATS(numULTimerExpired); + PRINT_COMMON_STATS(numChEvCtxWpRead); + + for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) { + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Channel %d Stats:\n", i); + PRINT_CHANNEL_STATS(i, doorbellInt); + PRINT_CHANNEL_STATS(i, reProccesed); + PRINT_CHANNEL_STATS(i, bamFifoFull); + PRINT_CHANNEL_STATS(i, bamFifoEmpty); + PRINT_CHANNEL_STATS(i, bamFifoUsageHigh); + PRINT_CHANNEL_STATS(i, bamFifoUsageLow); + PRINT_CHANNEL_STATS(i, bamInt); + PRINT_CHANNEL_STATS(i, ringFull); + PRINT_CHANNEL_STATS(i, ringEmpty); + PRINT_CHANNEL_STATS(i, ringUsageHigh); + PRINT_CHANNEL_STATS(i, ringUsageLow); + PRINT_CHANNEL_STATS(i, delayedMsi); + PRINT_CHANNEL_STATS(i, immediateMsi); + PRINT_CHANNEL_STATS(i, thresholdMsi); + PRINT_CHANNEL_STATS(i, numSuspend); + PRINT_CHANNEL_STATS(i, numResume); + PRINT_CHANNEL_STATS(i, num_OOB); + PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry); + PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start); + PRINT_CHANNEL_STATS(i, num_db_mode_evt); + } + + return nBytes; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c new file mode 100644 index 0000000000000000000000000000000000000000..d49acc534f698328403962ab0e9b69f5bad2dbc7 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" + +#define IPA_UC_NTN_DB_PA_TX 0x79620DC +#define IPA_UC_NTN_DB_PA_RX 0x79620D8 + +static void ipa_uc_ntn_event_handler( + struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio) +{ + union IpaHwNTNErrorEventData_t ntn_evt; + + if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) { + ntn_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n", + ntn_evt.params.ntn_error_type, + ntn_evt.params.ipa_pipe_number, + ntn_evt.params.ntn_ch_err_type); + } +} + +static void ipa_uc_ntn_event_log_info_handler( + struct IpaHwEventLogInfoData_t *uc_event_top_mmio) +{ + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) { + IPAERR("NTN feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + +if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].params.size != + sizeof(struct IpaHwStatsNTNInfoData_t)) { + IPAERR("NTN stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsNTNInfoData_t), + uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].params.size + ); + return; +} + +ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = +uc_event_top_mmio->statsInfo.baseAddrOffset + +uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].params.offset; +IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + + if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst + + sizeof(struct IpaHwStatsNTNInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_ntn_stats 0x%x outside SRAM\n", + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + return; + } + + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio = + ioremap(ipa_ctx->ipa_wrapper_base + + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst, + sizeof(struct IpaHwStatsNTNInfoData_t)); + if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("fail to ioremap uc ntn stats\n"); + return; + } +} + +/** + * ipa2_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats[0].y = \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) stats->rx_ch_stats[0].y = \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("bad parms stats=%p ntn_stats=%p\n", + stats, + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(tail_ptr_val); + TX_STATS(num_db_fired); + TX_STATS(tx_comp_ring_stats.ringFull); + TX_STATS(tx_comp_ring_stats.ringEmpty); + TX_STATS(tx_comp_ring_stats.ringUsageHigh); + TX_STATS(tx_comp_ring_stats.ringUsageLow); + TX_STATS(tx_comp_ring_stats.RingUtilCount); + TX_STATS(bam_stats.bamFifoFull); + TX_STATS(bam_stats.bamFifoEmpty); + TX_STATS(bam_stats.bamFifoUsageHigh); + TX_STATS(bam_stats.bamFifoUsageLow); + TX_STATS(bam_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_unexpected_db); + TX_STATS(num_bam_int_handled); + TX_STATS(num_bam_int_in_non_running_state); + TX_STATS(num_qmb_int_handled); + TX_STATS(num_bam_int_handled_while_wait_for_bam); + TX_STATS(num_bam_int_handled_while_not_in_bam); + + RX_STATS(max_outstanding_pkts); + RX_STATS(num_pkts_processed); + RX_STATS(rx_ring_rp_value); + RX_STATS(rx_ind_ring_stats.ringFull); + RX_STATS(rx_ind_ring_stats.ringEmpty); + RX_STATS(rx_ind_ring_stats.ringUsageHigh); + RX_STATS(rx_ind_ring_stats.ringUsageLow); + RX_STATS(rx_ind_ring_stats.RingUtilCount); + RX_STATS(bam_stats.bamFifoFull); + RX_STATS(bam_stats.bamFifoEmpty); + RX_STATS(bam_stats.bamFifoUsageHigh); + RX_STATS(bam_stats.bamFifoUsageLow); + RX_STATS(bam_stats.bamUtilCount); + RX_STATS(num_bam_int_handled); + RX_STATS(num_db); + RX_STATS(num_unexpected_db); + RX_STATS(num_pkts_in_dis_uninit_state); + RX_STATS(num_bam_int_handled_while_not_in_bam); + RX_STATS(num_bam_int_handled_while_in_bam_state); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data) +{ + int ret; + + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return -ENXIO; + } + + ret = ipa2_uc_state_check(); + if (ret) { + ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb; + ipa_ctx->uc_ntn_ctx.priv = user_data; + return 0; + } + + return -EEXIST; +} + +int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv) +{ + return ipa2_register_ipa_ready_cb(ipauc_ready_cb, priv); +} + +void ipa2_ntn_uc_dereg_rdyCB(void) +{ + ipa_ctx->uc_ntn_ctx.uc_ready_cb = NULL; + ipa_ctx->uc_ntn_ctx.priv = NULL; +} + +static void ipa_uc_ntn_loaded_handler(void) +{ + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) { + ipa_ctx->uc_ntn_ctx.uc_ready_cb( + ipa_ctx->uc_ntn_ctx.priv); + + ipa_ctx->uc_ntn_ctx.uc_ready_cb = + NULL; + ipa_ctx->uc_ntn_ctx.priv = NULL; + } +} + +int ipa_ntn_init(void) +{ + struct ipa_uc_hdlrs uc_ntn_cbs = { NULL }; + + uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler; + uc_ntn_cbs.ipa_uc_event_log_info_hdlr = + ipa_uc_ntn_event_log_info_handler; + uc_ntn_cbs.ipa_uc_loaded_hdlr = + ipa_uc_ntn_loaded_handler; + + ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs); + + return 0; +} + +static int ipa2_uc_send_ntn_setup_pipe_cmd( + struct ipa_ntn_setup_info *ntn_info, u8 dir) +{ + int ipa_ep_idx; + int result = 0; + struct ipa_mem_buffer cmd; + struct IpaHwNtnSetUpCmdData_t *Ntn_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + + if (ntn_info == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx); + + IPADBG("ring_base_pa = 0x%pa\n", + &ntn_info->ring_base_pa); + IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size); + IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa); + IPADBG("num_buffers = %d\n", ntn_info->num_buffers); + IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size); + IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa); + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + + Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params; + Ntn_params->ring_base_pa = ntn_info->ring_base_pa; + Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa; + Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size; + Ntn_params->num_buffers = ntn_info->num_buffers; + Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa; + Ntn_params->data_buff_size = ntn_info->data_buff_size; + Ntn_params->ipa_pipe_number = ipa_ep_idx; + Ntn_params->dir = dir; + + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) + result = -EFAULT; + + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +/** + * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes + */ +int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp) +{ + int ipa_ep_idx_ul, ipa_ep_idx_dl; + struct ipa_ep_context *ep_ul, *ep_dl; + int result = 0; + + if (in == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client); + ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client); + if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + + ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->valid || ep_dl->valid) { + IPAERR("EP already allocated ul:%d dl:%d\n", + ep_ul->valid, ep_dl->valid); + return -EFAULT; + } + + memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys)); + memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* setup ul ep cfg */ + ep_ul->valid = 1; + ep_ul->client = in->ul.client; + ep_ul->client_notify = notify; + ep_ul->priv = priv; + + memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg)); + ep_ul->cfg.nat.nat_en = IPA_SRC_NAT; + ep_ul->cfg.hdr.hdr_len = hdr_len; + ep_ul->cfg.mode.mode = IPA_BASIC; + + if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) { + IPAERR("fail to setup ul pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) { + IPAERR("fail to send cmd to uc for ul pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_install_dflt_flt_rules(ipa_ep_idx_ul); + outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX; + ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + IPAERR("client %d (ep: %d) connected\n", in->ul.client, + ipa_ep_idx_ul); + + /* setup dl ep cfg */ + ep_dl->valid = 1; + ep_dl->client = in->dl.client; + memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg)); + ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT; + ep_dl->cfg.hdr.hdr_len = hdr_len; + ep_dl->cfg.mode.mode = IPA_BASIC; + + if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) { + IPAERR("fail to setup dl pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) { + IPAERR("fail to send cmd to uc for dl pipe\n"); + result = -EFAULT; + goto fail; + } + outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX; + ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + + result = ipa_enable_data_path(ipa_ep_idx_dl); + if (result) { + IPAERR("Enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_dl); + result = -EFAULT; + goto fail; + } + IPAERR("client %d (ep: %d) connected\n", in->dl.client, + ipa_ep_idx_dl); + +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes + */ + +int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params) +{ + struct ipa_mem_buffer cmd; + struct ipa_ep_context *ep_ul, *ep_dl; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + union IpaHwNtnCommonChCmdData_t *tear; + int result = 0; + + IPADBG("ep_ul = %d\n", ipa_ep_idx_ul); + IPADBG("ep_dl = %d\n", ipa_ep_idx_dl); + + ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED || + ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) { + IPAERR("channel bad state: ul %d dl %d\n", + ep_ul->uc_offload_state, ep_dl->uc_offload_state); + return -EFAULT; + } + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + tear = &cmd_data->CommonCh_params.NtnCommonCh_params; + + /* teardown the DL pipe */ + ipa_disable_data_path(ipa_ep_idx_dl); + /* + * Reset ep before sending cmd otherwise disconnect + * during data transfer will result into + * enormous suspend interrupts + */ + memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context)); + IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl); + tear->params.ipa_pipe_number = ipa_ep_idx_dl; + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down dl pipe\n"); + result = -EFAULT; + goto fail; + } + + /* teardown the UL pipe */ + tear->params.ipa_pipe_number = ipa_ep_idx_ul; + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down ul pipe\n"); + result = -EFAULT; + goto fail; + } + + ipa_delete_dflt_flt_rules(ipa_ep_idx_ul); + memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context)); + IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul); + +fail: + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h new file mode 100644 index 0000000000000000000000000000000000000000..9a96ad919ee8bfebd7aed2080b22b68ca74fd316 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UC_OFFLOAD_I_H_ +#define _IPA_UC_OFFLOAD_I_H_ + +#include +#include "ipa_i.h" + +/* + * Neutrino protocol related data structures + */ + +#define IPA_UC_MAX_NTN_TX_CHANNELS 1 +#define IPA_UC_MAX_NTN_RX_CHANNELS 1 + +#define IPA_NTN_TX_DIR 1 +#define IPA_NTN_RX_DIR 2 + +#define IPA_WDI3_TX_DIR 1 +#define IPA_WDI3_RX_DIR 2 + +/** + * @brief Enum value determined based on the feature it + * corresponds to + * +----------------+----------------+ + * | 3 bits | 5 bits | + * +----------------+----------------+ + * | HW_FEATURE | OPCODE | + * +----------------+----------------+ + * + */ +#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode) +#define EXTRACT_UC_FEATURE(value) (value >> 5) + +#define IPA_HW_NUM_FEATURES 0x8 + +/** + * enum ipa_hw_features - Values that represent the features supported in IPA HW + * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW + * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW + * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse + * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW + * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_WDI3 : Feature related to WDI operation in IPA HW + */ +enum ipa_hw_features { + IPA_HW_FEATURE_COMMON = 0x0, + IPA_HW_FEATURE_MHI = 0x1, + IPA_HW_FEATURE_POWER_COLLAPSE = 0x2, + IPA_HW_FEATURE_WDI = 0x3, + IPA_HW_FEATURE_NTN = 0x4, + IPA_HW_FEATURE_OFFLOAD = 0x5, + IPA_HW_FEATURE_WDI3 = 0x6, + IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES +}; + +/** + * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common + * section in 128B shared memory located in offset zero of SW Partition in IPA + * SRAM. + * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS + * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits + * of parameters (immediate parameters) and point on structure in + * system memory (in such case the address must be accessible + * for HW) + * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES + * @responseParams : HW->CPU response parameter. The parameter filed can hold + * 32 bits of parameters (immediate parameters) and point + * on structure in system memory + * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS + * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits + * of parameters (immediate parameters) and point on + * structure in system memory + * @firstErrorAddress : Contains the address of first error-source on SNOC + * @hwState : State of HW. The state carries information regarding the error + * type. + * @warningCounter : The warnings counter. The counter carries information + * regarding non fatal errors in HW + * @interfaceVersionCommon : The Common interface version as reported by HW + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemCommonMapping_t { + u8 cmdOp; + u8 reserved_01; + u16 reserved_03_02; + u32 cmdParams; + u8 responseOp; + u8 reserved_09; + u16 reserved_0B_0A; + u32 responseParams; + u8 eventOp; + u8 reserved_11; + u16 reserved_13_12; + u32 eventParams; + u32 reserved_1B_18; + u32 firstErrorAddress; + u8 hwState; + u8 warningCounter; + u16 reserved_23_22; + u16 interfaceVersionCommon; + u16 reserved_27_26; +} __packed; + +/** + * union IpaHwFeatureInfoData_t - parameters for stats/config blob + * + * @offset : Location of a feature within the EventInfoData + * @size : Size of the feature + */ +union IpaHwFeatureInfoData_t { + struct IpaHwFeatureInfoParams_t { + u32 offset:16; + u32 size:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct IpaHwEventInfoData_t - Structure holding the parameters for + * statistics and config info + * + * @baseAddrOffset : Base Address Offset of the statistics or config + * structure from IPA_WRAPPER_BASE + * @IpaHwFeatureInfoData_t : Location and size of each feature within + * the statistics or config structure + * + * @note Information about each feature in the featureInfo[] + * array is populated at predefined indices per the IPA_HW_FEATURES + * enum definition + */ +struct IpaHwEventInfoData_t { + u32 baseAddrOffset; + union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES]; +} __packed; + +/** + * struct IpaHwEventLogInfoData_t - Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_LOG_INFO Event + * + * @featureMask : Mask indicating the features enabled in HW. + * Refer IPA_HW_FEATURE_MASK + * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event + * Log Buffer structure + * @statsInfo : Statistics related information + * @configInfo : Configuration related information + * + * @note The offset location of this structure from IPA_WRAPPER_BASE + * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO + * Event + */ +struct IpaHwEventLogInfoData_t { + u32 featureMask; + u32 circBuffBaseAddrOffset; + struct IpaHwEventInfoData_t statsInfo; + struct IpaHwEventInfoData_t configInfo; + +} __packed; + +/** + * struct ipa_uc_ntn_ctx + * @ntn_uc_stats_ofst: Neutrino stats offset + * @ntn_uc_stats_mmio: Neutrino stats + * @priv: private data of client + * @uc_ready_cb: uc Ready cb + */ +struct ipa_uc_ntn_ctx { + u32 ntn_uc_stats_ofst; + struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; +}; + +/** + * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event + * to be sent to CPU + * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW + * detected an error in NTN + * + */ +enum ipa_hw_2_cpu_ntn_events { + IPA_HW_2_CPU_EVENT_NTN_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0), +}; + + +/** + * enum ipa_hw_ntn_errors - NTN specific error types. + * @IPA_HW_NTN_ERROR_NONE : No error persists + * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa_hw_ntn_errors { + IPA_HW_NTN_ERROR_NONE = 0, + IPA_HW_NTN_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa_hw_ntn_channel_states - Values that represent NTN + * channel state machine. + * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is + * initialized but disabled + * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running. + * Entered after SET_UP_COMMAND is processed successfully + * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not + * be in use in operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa_hw_ntn_channel_states { + IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2, + IPA_HW_NTN_CHANNEL_STATE_ERROR = 3, + IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa_hw_ntn_channel_errors - List of NTN Channel error + * types. This is present in the event param + * @IPA_HW_NTN_CH_ERR_NONE: No error persists + * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating + * num RE to bring + * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update + * failed in Rx ring + * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_RX_CACHE_NON_EMPTY: + * @IPA_HW_NTN_CH_ERR_RESERVED: + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in. + */ +enum ipa_hw_ntn_channel_errors { + IPA_HW_NTN_CH_ERR_NONE = 0, + IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1, + IPA_HW_NTN_TX_FSM_ERROR = 2, + IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4, + IPA_HW_NTN_RX_FSM_ERROR = 5, + IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6, + IPA_HW_NTN_CH_ERR_RESERVED = 0xFF +}; + + +/** + * struct IpaHwNtnSetUpCmdData_t - Ntn setup command data + * @ring_base_pa: physical address of the base of the Tx/Rx NTN + * ring + * @buff_pool_base_pa: physical address of the base of the Tx/Rx + * buffer pool + * @ntn_ring_size: size of the Tx/Rx NTN ring + * @num_buffers: Rx/tx buffer pool size + * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN + * Ring's tail pointer + * @ipa_pipe_number: IPA pipe number that has to be used for the + * Tx/Rx path + * @dir: Tx/Rx Direction + * @data_buff_size: size of the each data buffer allocated in + * DDR + */ +struct IpaHwNtnSetUpCmdData_t { + u32 ring_base_pa; + u32 buff_pool_base_pa; + u16 ntn_ring_size; + u16 num_buffers; + u32 ntn_reg_base_ptr_pa; + u8 ipa_pipe_number; + u8 dir; + u16 data_buff_size; + +} __packed; + +struct IpaHwWdi3SetUpCmdData_t { + u32 transfer_ring_base_pa; + u32 transfer_ring_base_pa_hi; + + u32 transfer_ring_size; + + u32 transfer_ring_doorbell_pa; + u32 transfer_ring_doorbell_pa_hi; + + u32 event_ring_base_pa; + u32 event_ring_base_pa_hi; + + u32 event_ring_size; + + u32 event_ring_doorbell_pa; + u32 event_ring_doorbell_pa_hi; + + u16 num_pkt_buffers; + u8 ipa_pipe_number; + u8 dir; + + u16 pkt_offset; + u16 reserved0; + + u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]; +} __packed; + +/** + * struct IpaHwNtnCommonChCmdData_t - Structure holding the + * parameters for Ntn Tear down command data params + * + *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe + */ +union IpaHwNtnCommonChCmdData_t { + struct IpaHwNtnCommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + uint32_t raw32b; +} __packed; + +union IpaHwWdi3CommonChCmdData_t { + struct IpaHwWdi3CommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct IpaHwNTNErrorEventData_t - Structure holding the + * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed + * as immediate params in the shared memory + * + *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS) + *@ipa_pipe_number: IPA pipe number on which error has happened + * Applicable only if error type indicates channel error + *@ntn_ch_err_type: Information about the channel error (if + * available) + */ +union IpaHwNTNErrorEventData_t { + struct IpaHwNTNErrorEventParams_t { + u32 ntn_error_type :8; + u32 reserved :8; + u32 ipa_pipe_number :8; + u32 ntn_ch_err_type :8; + } __packed params; + uint32_t raw32b; +} __packed; + +/** + * struct NTNRxInfoData_t - NTN Structure holding the + * Rx pipe information + * + *@max_outstanding_pkts: Number of outstanding packets in Rx + * Ring + *@num_pkts_processed: Number of packets processed - cumulative + *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW + * + *@ntn_ch_err_type: Information about the channel error (if + * available) + *@rx_ind_ring_stats: + *@bam_stats: + *@num_bam_int_handled: Number of Bam Interrupts handled by FW + *@num_db: Number of times the doorbell was rung + *@num_unexpected_db: Number of unexpected doorbells + *@num_pkts_in_dis_uninit_state: + *@num_bam_int_handled_while_not_in_bam: Number of Bam + * Interrupts handled by FW + *@num_bam_int_handled_while_in_bam_state: Number of Bam + * Interrupts handled by FW + */ +struct NTNRxInfoData_t { + u32 max_outstanding_pkts; + u32 num_pkts_processed; + u32 rx_ring_rp_value; + struct IpaHwRingStats_t rx_ind_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_bam_int_handled; + u32 num_db; + u32 num_unexpected_db; + u32 num_pkts_in_dis_uninit_state; + u32 num_bam_int_handled_while_not_in_bam; + u32 num_bam_int_handled_while_in_bam_state; +} __packed; + + +/** + * struct NTNTxInfoData_t - Structure holding the NTN Tx channel + * Ensure that this is always word aligned + * + *@num_pkts_processed: Number of packets processed - cumulative + *@tail_ptr_val: Latest value of doorbell written to copy engine + *@num_db_fired: Number of DB from uC FW to Copy engine + * + *@tx_comp_ring_stats: + *@bam_stats: + *@num_db: Number of times the doorbell was rung + *@num_unexpected_db: Number of unexpected doorbells + *@num_bam_int_handled: Number of Bam Interrupts handled by FW + *@num_bam_int_in_non_running_state: Number of Bam interrupts + * while not in Running state + *@num_qmb_int_handled: Number of QMB interrupts handled + *@num_bam_int_handled_while_wait_for_bam: Number of times the + * Imm Cmd is injected due to fw_desc change + */ +struct NTNTxInfoData_t { + u32 num_pkts_processed; + u32 tail_ptr_val; + u32 num_db_fired; + struct IpaHwRingStats_t tx_comp_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_db; + u32 num_unexpected_db; + u32 num_bam_int_handled; + u32 num_bam_int_in_non_running_state; + u32 num_qmb_int_handled; + u32 num_bam_int_handled_while_wait_for_bam; + u32 num_bam_int_handled_while_not_in_bam; +} __packed; + + +/** + * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx + * channel Ensure that this is always word aligned + * + */ +struct IpaHwStatsNTNInfoData_t { + struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS]; + struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS]; +} __packed; + + +/* + * uC offload related data structures + */ +#define IPA_UC_OFFLOAD_CONNECTED BIT(0) +#define IPA_UC_OFFLOAD_ENABLED BIT(1) +#define IPA_UC_OFFLOAD_RESUMED BIT(2) + +/** + * enum ipa_cpu_2_hw_offload_commands - Values that represent + * the offload commands from CPU + * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down + * Offload protocol's Tx/ Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE : Command to enable + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE : Command to disable + * Offload protocol's Tx/ Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND : Command to suspend + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_RESUME : Command to resume + * Offload protocol's Tx/ Rx Path + */ +enum ipa_cpu_2_hw_offload_commands { + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), +}; + + +/** + * enum ipa_hw_offload_channel_states - Values that represent + * offload channel state machine. + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized + * but disabled + * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after + * SET_UP_COMMAND is processed successfully + * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use + * in operational scenario + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in + */ +enum ipa_hw_offload_channel_states { + IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2, + IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3, + IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF +}; + + +/** + * enum ipa_hw_2_cpu_offload_cmd_resp_status - Values that represent + * offload related command response status to be sent to CPU. + */ +enum ipa_hw_2_cpu_offload_cmd_resp_status { + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0), + IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), + IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7), + IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8), + IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9), + IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11), + IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12), + IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13), + IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14), +}; + +/** + * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related + * command response status to be sent to CPU. + */ +enum ipa_hw_2_cpu_cmd_resp_status { + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), + IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8), + IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9), + IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10), + IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11), + IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12), + IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13), + IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14), + IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15), + IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16), +}; + +/** + * struct IpaHwSetUpCmd - + * + * + */ +union IpaHwSetUpCmd { + struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params; + struct IpaHwWdi3SetUpCmdData_t Wdi3SetupCh_params; +} __packed; + +/** + * struct IpaHwOffloadSetUpCmdData_t - + * + * + */ +struct IpaHwOffloadSetUpCmdData_t { + u8 protocol; + union IpaHwSetUpCmd SetupCh_params; +} __packed; + +/** + * struct IpaHwCommonChCmd - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN + * + * + */ +union IpaHwCommonChCmd { + union IpaHwNtnCommonChCmdData_t NtnCommonCh_params; + union IpaHwWdi3CommonChCmdData_t Wdi3CommonCh_params; +} __packed; + +struct IpaHwOffloadCommonChCmdData_t { + u8 protocol; + union IpaHwCommonChCmd CommonCh_params; +} __packed; + +#endif /* _IPA_UC_OFFLOAD_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c new file mode 100644 index 0000000000000000000000000000000000000000..02ba9d5ea0d39424ed659b335c6ed56923fba2b2 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c @@ -0,0 +1,1892 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" +#include +#include +#include "ipa_qmi_service.h" + +#define IPA_HOLB_TMR_DIS 0x0 + +#define IPA_HW_INTERFACE_WDI_VERSION 0x0001 +#define IPA_HW_WDI_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI_TX_MBOX_START_INDEX 50 +#define IPA_WDI_RING_ALIGNMENT 8 + +#define IPA_WDI_CONNECTED BIT(0) +#define IPA_WDI_ENABLED BIT(1) +#define IPA_WDI_RESUMED BIT(2) +#define IPA_UC_POLL_SLEEP_USEC 100 + +struct ipa_wdi_res { + struct ipa_wdi_buffer_info *res; + unsigned int nents; + bool valid; +}; + +static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES]; + +static void ipa_uc_wdi_loaded_handler(void); + +/** + * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to + * CPU. + * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error + * in WDI + */ +enum ipa_hw_2_cpu_wdi_events { + IPA_HW_2_CPU_EVENT_WDI_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), +}; + +/** + * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state + * machine. + * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but + * disabled + * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in + * suspended state + * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after + * SET_UP_COMMAND is processed successfully + * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in + * operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa_hw_wdi_channel_states { + IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2, + IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3, + IPA_HW_WDI_CHANNEL_STATE_ERROR = 4, + IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa_cpu_2_hw_commands - Values that represent the WDI commands from CPU + * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path + * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel + * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path + */ +enum ipa_cpu_2_hw_wdi_commands { + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_CPU_2_HW_CMD_WDI_CH_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), +}; + +/** + * enum ipa_hw_wdi_errors - WDI specific error types. + * @IPA_HW_WDI_ERROR_NONE : No error persists + * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa_hw_wdi_errors { + IPA_HW_WDI_ERROR_NONE = 0, + IPA_HW_WDI_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present + * in the event param. + * @IPA_HW_WDI_CH_ERR_NONE : No error persists + * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx + * Completion ring + * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition + * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring + * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use + */ +enum ipa_hw_wdi_ch_errors { + IPA_HW_WDI_CH_ERR_NONE = 0, + IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1, + IPA_HW_WDI_TX_FSM_ERROR = 2, + IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_WDI_CH_ERR_RESERVED = 0xFF +}; + +/** + * struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and + * WDI section of 128B shared memory located in offset zero of SW Partition in + * IPA SRAM. + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemWdiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u32 reserved_2B_28; + u32 reserved_2F_2C; + u32 reserved_33_30; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; + u16 interfaceVersionWdi; + u16 reserved_43_42; + u8 wdi_tx_ch_0_state; + u8 wdi_rx_ch_0_state; + u16 reserved_47_46; +} __packed; + +/** + * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command. + * @comp_ring_base_pa : This is the physical address of the base of the Tx + * completion ring + * @comp_ring_size : This is the size of the Tx completion ring + * @reserved_comp_ring : Reserved field for expansion of Completion ring params + * @ce_ring_base_pa : This is the physical address of the base of the Copy + * Engine Source Ring + * @ce_ring_size : Copy Engine Ring size + * @reserved_ce_ring : Reserved field for expansion of CE ring params + * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the + * IPA uC has to write into to trigger the copy engine + * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring + * and the Tx completion ring has to be atleast ( num_tx_buffers + 1) + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Tx path + * @reserved : Reserved field + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiTxSetUpCmdData_t { + u32 comp_ring_base_pa; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; + +struct IpaHwWdi2TxSetUpCmdData_t { + u32 comp_ring_base_pa; + u32 comp_ring_base_pa_hi; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u32 ce_ring_base_pa_hi; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u32 ce_ring_doorbell_pa_hi; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; +/** + * struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command. + * @rx_ring_base_pa : This is the physical address of the base of the Rx ring + * (containing Rx buffers) + * @rx_ring_size : This is the size of the Rx ring + * @rx_ring_rp_pa : This is the physical address of the location through which + * IPA uc is expected to communicate about the Read pointer into the Rx Ring + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Rx path + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiRxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u8 ipa_pipe_number; +} __packed; + +struct IpaHwWdi2RxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_base_pa_hi; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u32 rx_ring_rp_pa_hi; + u32 rx_comp_ring_base_pa; + u32 rx_comp_ring_base_pa_hi; + u32 rx_comp_ring_size; + u32 rx_comp_ring_wp_pa; + u32 rx_comp_ring_wp_pa_hi; + u8 ipa_pipe_number; +} __packed; +/** + * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command. + * @ipa_pipe_number : The IPA pipe number for which this config is passed + * @qmap_id : QMAP ID to be set in the metadata register + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiRxExtCfgCmdData_t { + struct IpaHwWdiRxExtCfgCmdParams_t { + u32 ipa_pipe_number:8; + u32 qmap_id:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command. + * @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiCommonChCmdData_t { + struct IpaHwWdiCommonChCmdParams_t { + u32 ipa_pipe_number:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR + * event. + * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or + * an Rx pipe + * @reserved : Reserved + * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable + * only if error type indicates channel error + * @wdi_ch_err_type : Information about the channel error (if available) + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiErrorEventData_t { + struct IpaHwWdiErrorEventParams_t { + u32 wdi_error_type:8; + u32 reserved:8; + u32 ipa_pipe_number:8; + u32 wdi_ch_err_type:8; + } __packed params; + u32 raw32b; +} __packed; + +static void ipa_uc_wdi_event_log_info_handler( +struct IpaHwEventLogInfoData_t *uc_event_top_mmio) + +{ + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) { + IPAERR("WDI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + +if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].params.size != +sizeof(struct IpaHwStatsWDIInfoData_t)) { + IPAERR("wdi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsWDIInfoData_t), + uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].params.size + ); + return; +} + +ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = +uc_event_top_mmio->statsInfo.baseAddrOffset + +uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].params.offset; +IPAERR("WDI stats ofst=0x%x\n", ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + + if (ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst + + sizeof(struct IpaHwStatsWDIInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_wdi_stats 0x%x outside SRAM\n", + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + return; + } + + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio = + ioremap(ipa_ctx->ipa_wrapper_base + + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst, + sizeof(struct IpaHwStatsWDIInfoData_t)); + if (!ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("fail to ioremap uc wdi stats\n"); + return; + } +} + +static void ipa_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) + +{ + union IpaHwWdiErrorEventData_t wdi_evt; + struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext; + + if (uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_WDI_ERROR) { + wdi_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n", + wdi_evt.params.wdi_error_type, + wdi_evt.params.ipa_pipe_number, + wdi_evt.params.wdi_ch_err_type); + wdi_sram_mmio_ext = + (struct IpaHwSharedMemWdiMapping_t *) + uc_sram_mmio; + IPADBG("tx_ch_state=%u rx_ch_state=%u\n", + wdi_sram_mmio_ext->wdi_tx_ch_0_state, + wdi_sram_mmio_ext->wdi_rx_ch_0_state); + } +} + +/** + * ipa2_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats.y = \ + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y +#define RX_STATS(y) stats->rx_ch_stats.y = \ + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (!stats || !ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("bad parms stats=%p wdi_stats=%p\n", + stats, + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(copy_engine_doorbell_value); + TX_STATS(num_db_fired); + TX_STATS(tx_comp_ring_stats.ringFull); + TX_STATS(tx_comp_ring_stats.ringEmpty); + TX_STATS(tx_comp_ring_stats.ringUsageHigh); + TX_STATS(tx_comp_ring_stats.ringUsageLow); + TX_STATS(tx_comp_ring_stats.RingUtilCount); + TX_STATS(bam_stats.bamFifoFull); + TX_STATS(bam_stats.bamFifoEmpty); + TX_STATS(bam_stats.bamFifoUsageHigh); + TX_STATS(bam_stats.bamFifoUsageLow); + TX_STATS(bam_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_unexpected_db); + TX_STATS(num_bam_int_handled); + TX_STATS(num_bam_int_in_non_running_state); + TX_STATS(num_qmb_int_handled); + TX_STATS(num_bam_int_handled_while_wait_for_bam); + + RX_STATS(max_outstanding_pkts); + RX_STATS(num_pkts_processed); + RX_STATS(rx_ring_rp_value); + RX_STATS(rx_ind_ring_stats.ringFull); + RX_STATS(rx_ind_ring_stats.ringEmpty); + RX_STATS(rx_ind_ring_stats.ringUsageHigh); + RX_STATS(rx_ind_ring_stats.ringUsageLow); + RX_STATS(rx_ind_ring_stats.RingUtilCount); + RX_STATS(bam_stats.bamFifoFull); + RX_STATS(bam_stats.bamFifoEmpty); + RX_STATS(bam_stats.bamFifoUsageHigh); + RX_STATS(bam_stats.bamFifoUsageLow); + RX_STATS(bam_stats.bamUtilCount); + RX_STATS(num_bam_int_handled); + RX_STATS(num_db); + RX_STATS(num_unexpected_db); + RX_STATS(num_pkts_in_dis_uninit_state); + RX_STATS(num_ic_inj_vdev_change); + RX_STATS(num_ic_inj_fw_desc_change); + RX_STATS(num_qmb_int_handled); + RX_STATS(reserved1); + RX_STATS(reserved2); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +int ipa2_wdi_init(void) +{ + struct ipa_uc_hdlrs uc_wdi_cbs = { NULL}; + + uc_wdi_cbs.ipa_uc_event_hdlr = ipa_uc_wdi_event_handler; + uc_wdi_cbs.ipa_uc_event_log_info_hdlr = + ipa_uc_wdi_event_log_info_handler; + uc_wdi_cbs.ipa_uc_loaded_hdlr = + ipa_uc_wdi_loaded_handler; + + ipa_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs); + + return 0; +} + +static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len, + bool device, unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE), + PAGE_SIZE); + int ret; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + ret = ipa_iommu_map(cb->iommu_domain, va, rounddown(pa, PAGE_SIZE), + true_len, + device ? (prot | IOMMU_MMIO) : prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len); + return -EINVAL; + } + + ipa_ctx->wdi_map_cnt++; + cb->next_addr = va + true_len; + *iova = va + pa - rounddown(pa, PAGE_SIZE); + return 0; +} + +static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt, + unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + int ret; + int i; + struct scatterlist *sg; + unsigned long start_iova = va; + phys_addr_t phys; + size_t len; + int count = 0; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return -EINVAL; + } + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + phys = page_to_phys(sg_page(sg)); + len = PAGE_ALIGN(sg->offset + sg->length); + + ret = ipa_iommu_map(cb->iommu_domain, va, phys, len, prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", + &phys, len); + goto bad_mapping; + } + va += len; + ipa_ctx->wdi_map_cnt++; + count++; + } + cb->next_addr = va; + *iova = start_iova; + + return 0; + +bad_mapping: + for_each_sg(sgt->sgl, sg, count, i) + iommu_unmap(cb->iommu_domain, sg_dma_address(sg), + sg_dma_len(sg)); + return -EINVAL; +} + +static void ipa_release_uc_smmu_mappings(enum ipa_client_type client) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + int i; + int j; + int start; + int end; + + if (IPA_CLIENT_IS_CONS(client)) { + start = IPA_WDI_TX_RING_RES; + end = IPA_WDI_CE_DB_RES; + } else { + start = IPA_WDI_RX_RING_RES; + if (ipa_ctx->ipa_wdi2) + end = IPA_WDI_RX_COMP_RING_WP_RES; + else + end = IPA_WDI_RX_RING_RP_RES; + } + + for (i = start; i <= end; i++) { + if (wdi_res[i].valid) { + for (j = 0; j < wdi_res[i].nents; j++) { + iommu_unmap(cb->iommu_domain, + wdi_res[i].res[j].iova, + wdi_res[i].res[j].size); + ipa_ctx->wdi_map_cnt--; + } + kfree(wdi_res[i].res); + wdi_res[i].valid = false; + } + } + + if (ipa_ctx->wdi_map_cnt == 0) + cb->next_addr = cb->va_end; + +} + +static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa, + unsigned long iova, size_t len) +{ + IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &pa, iova, len); + wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res), + GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = 1; + wdi_res[res_idx].valid = true; + wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE); + wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE); + wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa, + PAGE_SIZE), PAGE_SIZE); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova, + wdi_res[res_idx].res->size); +} + +static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt, + unsigned long iova) +{ + int i; + struct scatterlist *sg; + unsigned long curr_iova = iova; + + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return; + } + + wdi_res[res_idx].res = kcalloc(sgt->nents, + sizeof(*wdi_res[res_idx].res), GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = sgt->nents; + wdi_res[res_idx].valid = true; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + wdi_res[res_idx].res[i].pa = page_to_phys(sg_page(sg)); + wdi_res[res_idx].res[i].iova = curr_iova; + wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset + + sg->length); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res[i].pa, + wdi_res[res_idx].res[i].iova, + wdi_res[res_idx].res[i].size); + curr_iova += wdi_res[res_idx].res[i].size; + } +} + +int ipa2_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova) +{ + /* support for SMMU on WLAN but no SMMU on IPA */ + if (wlan_smmu_en && ipa_ctx->smmu_s1_bypass) { + IPAERR("Unsupported SMMU pairing\n"); + return -EINVAL; + } + + /* legacy: no SMMUs on either end */ + if (!wlan_smmu_en && ipa_ctx->smmu_s1_bypass) { + *iova = pa; + return 0; + } + + /* no SMMU on WLAN but SMMU on IPA */ + if (!wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) { + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) { + IPAERR("Fail to create mapping res %d\n", res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + return 0; + } + + /* SMMU on WLAN and SMMU on IPA */ + if (wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) { + switch (res_idx) { + case IPA_WDI_RX_RING_RP_RES: + case IPA_WDI_CE_DB_RES: + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, + iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + break; + case IPA_WDI_RX_RING_RES: + case IPA_WDI_TX_RING_RES: + case IPA_WDI_CE_RING_RES: + if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova); + break; + default: + WARN_ON(1); + } + } + + return 0; +} + +/** + * ipa2_connect_wdi_pipe() - WDI client connect + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa_ep_context *ep; + struct ipa_mem_buffer cmd; + struct IpaHwWdiTxSetUpCmdData_t *tx; + struct IpaHwWdiRxSetUpCmdData_t *rx; + struct IpaHwWdi2TxSetUpCmdData_t *tx_2; + struct IpaHwWdi2RxSetUpCmdData_t *rx_2; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + unsigned long va; + phys_addr_t pa; + u32 len; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. in=%p out=%p\n", in, out); + if (in) + IPAERR("client = %d\n", in->sys.client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT || + in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on TX\n"); + return -EINVAL; + } + } else { + if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on RX\n"); + return -EINVAL; + } + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + ipa_ep_idx = ipa2_get_ep_mapping(in->sys.client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx); + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa_ctx->ipa_wdi2) + cmd.size = sizeof(*tx_2); + else + cmd.size = sizeof(*tx); + IPADBG("comp_ring_base_pa=0x%pa\n", + &in->u.dl.comp_ring_base_pa); + IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); + IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa); + IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + } else { + if (ipa_ctx->ipa_wdi2) { + /* WDI2.0 feature */ + cmd.size = sizeof(*rx_2); + IPADBG("rdy_ring_rp value =%d\n", + *in->u.ul.rdy_ring_rp_va); + IPADBG("rx_comp_ring_wp value=%d\n", + *in->u.ul.rdy_comp_ring_wp_va); + ipa_ctx->uc_ctx.rdy_ring_rp_va = + in->u.ul.rdy_ring_rp_va; + ipa_ctx->uc_ctx.rdy_comp_ring_wp_va = + in->u.ul.rdy_comp_ring_wp_va; + } else { + cmd.size = sizeof(*rx); + } + IPADBG("rx_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_ring_base_pa); + IPADBG("rx_ring_size=%d\n", + in->u.ul.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul.rdy_ring_rp_pa); + + IPADBG("rx_comp_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_base_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_wp_pa); + + ipa_ctx->uc_ctx.rdy_ring_base_pa = + in->u.ul.rdy_ring_base_pa; + ipa_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul.rdy_ring_rp_pa; + ipa_ctx->uc_ctx.rdy_ring_size = + in->u.ul.rdy_ring_size; + ipa_ctx->uc_ctx.rdy_comp_ring_base_pa = + in->u.ul.rdy_comp_ring_base_pa; + ipa_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul.rdy_comp_ring_wp_pa; + ipa_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul.rdy_comp_ring_size; + + /* check if the VA is empty */ + if (!in->u.ul.rdy_ring_rp_va && ipa_ctx->ipa_wdi2) { + IPAERR("rdy_ring_rp_va is empty, wdi2.0(%d)\n", + ipa_ctx->ipa_wdi2); + goto dma_alloc_fail; + } + if (!in->u.ul.rdy_comp_ring_wp_va && ipa_ctx->ipa_wdi2) { + IPAERR("comp_ring_wp_va is empty, wdi2.0(%d)\n", + ipa_ctx->ipa_wdi2); + goto dma_alloc_fail; + } + } + + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + result = -ENOMEM; + goto dma_alloc_fail; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa_ctx->ipa_wdi2) { + tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->comp_ring_size = len; + IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->comp_ring_base_pa_hi, + tx_2->comp_ring_base_pa); + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->ce_ring_size = len; + IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_base_pa_hi, + tx_2->ce_ring_base_pa); + + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_doorbell_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_doorbell_pa_hi, + tx_2->ce_ring_doorbell_pa); + + tx_2->num_tx_buffers = in->u.dl.num_tx_buffers; + tx_2->ipa_pipe_number = ipa_ep_idx; + } else { + tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base; + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->comp_ring_base_pa = va; + tx->comp_ring_size = len; + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_base_pa = va; + tx->ce_ring_size = len; + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_doorbell_pa = va; + tx->num_tx_buffers = in->u.dl.num_tx_buffers; + tx->ipa_pipe_number = ipa_ep_idx; + } + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } + } else { + if (ipa_ctx->ipa_wdi2) { + rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_ring_size = len; + IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_ring_base_pa_hi, + rx_2->rx_ring_base_pa); + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_rp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n", + rx_2->rx_ring_rp_pa_hi, + rx_2->rx_ring_rp_pa); + len = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_size : + in->u.ul.rdy_comp_ring_size; + IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_comp_ring_size, + in->u.ul.rdy_comp_ring_size); + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_comp_ring_base_pa, + &in->u.ul_smmu.rdy_comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_comp_ring_size = len; + IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_base_pa_hi, + rx_2->rx_comp_ring_base_pa); + + pa = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_wp_pa : + in->u.ul.rdy_comp_ring_wp_pa; + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_rng WP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_wp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_wp_pa_hi, + rx_2->rx_comp_ring_wp_pa); + rx_2->ipa_pipe_number = ipa_ep_idx; + } else { + rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping RX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_base_pa = va; + rx->rx_ring_size = len; + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc mapping RX rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_rp_pa = va; + rx->ipa_pipe_number = ipa_ep_idx; + } + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } else { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + } + + ep->valid = 1; + ep->client = in->sys.client; + ep->keep_ipa_awake = in->sys.keep_ipa_awake; + result = ipa_disable_data_path(ipa_ep_idx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto uc_timeout; + } + if (IPA_CLIENT_IS_PROD(in->sys.client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + } + + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CLIENT_IS_CONS(in->sys.client) ? + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ep->skip_ep_cfg = in->sys.skip_ep_cfg; + ep->client_notify = in->sys.notify; + ep->priv = in->sys.priv; + + /* for AP+STA stats update */ + if (in->wdi_notify) + ipa_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify; + else + IPADBG("in->wdi_notify is null\n"); + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + out->clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client)) + ipa_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + ep->uc_offload_state |= IPA_WDI_CONNECTED; + IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx); + + return 0; + +ipa_cfg_ep_fail: + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); +uc_timeout: + ipa_release_uc_smmu_mappings(in->sys.client); + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); +dma_alloc_fail: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); +fail: + return result; +} + + +/** + * ipa2_disconnect_wdi_pipe() - WDI client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disconnect_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t tear; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + tear.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(tear.raw32b, + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ipa_delete_dflt_flt_rules(clnt_hdl); + ipa_release_uc_smmu_mappings(ep->client); + + memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + /* for AP+STA stats update */ + if (ipa_ctx->uc_wdi_ctx.stats_notify) + ipa_ctx->uc_wdi_ctx.stats_notify = NULL; + else + IPADBG("uc_wdi_ctx.stats_notify already null\n"); + +uc_timeout: + return result; +} + +/** + * ipa2_enable_wdi_pipe() - WDI client enable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_enable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t enable; + struct ipa_ep_cfg_holb holb_cfg; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + enable.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(enable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + result = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + ep->uc_offload_state |= IPA_WDI_ENABLED; + IPADBG("client (ep: %d) enabled\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa2_disable_wdi_pipe() - WDI client disable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t disable; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 prod_hdl; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + result = ipa_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + result = -EPERM; + goto uc_timeout; + } + + /** + * To avoid data stall during continuous SAP on/off before + * setting delay to IPA Consumer pipe, remove delay and enable + * holb on IPA Producer pipe + */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + + /* remove delay on wlan-prod pipe*/ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + + prod_hdl = ipa2_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + if (ipa_ctx->ep[prod_hdl].valid == 1) { + result = ipa_disable_data_path(prod_hdl); + if (result) { + IPAERR("disable data path failed\n"); + IPAERR("res=%d clnt=%d\n", + result, prod_hdl); + result = -EPERM; + goto uc_timeout; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + } + + disable.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(disable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + /* Set the delay after disabling IPA Producer pipe */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_ENABLED; + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa2_resume_wdi_pipe() - WDI client resume + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_resume_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t resume; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + resume.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(resume.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_RESUME, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) fail un-susp/delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl); + + ep->uc_offload_state |= IPA_WDI_RESUMED; + IPADBG("client (ep: %d) resumed\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa2_suspend_wdi_pipe() - WDI client suspend + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_suspend_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t suspend; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + suspend.params.ipa_pipe_number = clnt_hdl; + + if (IPA_CLIENT_IS_PROD(ep->client)) { + /* + * For WDI 2.0 need to ensure pipe will be empty before suspend + * as IPA uC will fail to suspend the pipe otherwise. + */ + if (ipa_ctx->ipa_wdi2) { + source_pipe_bitmask = 1 << + ipa_get_ep_mapping(ep->client); + result = ipa2_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); + if (result) { + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", result); + IPAERR("remove delay from SCND reg\n"); + memset(&ep_cfg_ctrl, 0, + sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = false; + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } else { + disable_force_clear = true; + } + } + IPADBG("Post suspend event first for IPA Producer\n"); + IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl); + result = ipa_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + if (IPA_CLIENT_IS_CONS(ep->client)) { + ep_cfg_ctrl.ipa_ep_suspend = true; + result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed to suspend result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) suspended\n", clnt_hdl); + } else { + ep_cfg_ctrl.ipa_ep_delay = true; + result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed to delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) delayed\n", clnt_hdl); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + result = ipa_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + if (disable_force_clear) + ipa2_disable_force_clear(clnt_hdl); + + ipa_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_RESUMED; + IPADBG("client (ep: %d) suspended\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa_broadcast_wdi_quota_reach_ind() - quota reach + * @uint32_t fid: [in] input netdev ID + * @uint64_t num_bytes: [in] used bytes + * + * Returns: 0 on success, negative on failure + */ +int ipa2_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + IPAERR("Quota reached indication on fis(%d) Mbytes(%lu)\n", + fid, + (unsigned long) num_bytes); + ipa_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN); + return 0; +} + +int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiRxExtCfgCmdData_t qmap; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR_RL("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) { + IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + qmap.params.ipa_pipe_number = clnt_hdl; + qmap.params.qmap_id = qmap_id; + + result = ipa_uc_send_cmd(qmap.raw32b, + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id); + +uc_timeout: + return result; +} + +/** + * ipa2_uc_reg_rdyCB() - To register uC + * ready CB if uC not ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa2_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *inout) +{ + int result = 0; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (inout == NULL) { + IPAERR("bad parm. inout=%p ", inout); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) { + inout->is_uC_ready = false; + ipa_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify; + ipa_ctx->uc_wdi_ctx.priv = inout->priv; + } else { + inout->is_uC_ready = true; + } + + return 0; +} + +/** + * ipa2_uc_dereg_rdyCB() - To de-register uC ready CB + * + * Returns: 0 on success, negative on failure + * + */ +int ipa2_uc_dereg_rdyCB(void) +{ + ipa_ctx->uc_wdi_ctx.uc_ready_cb = NULL; + ipa_ctx->uc_wdi_ctx.priv = NULL; + + return 0; +} + + +/** + * ipa2_uc_wdi_get_dbpa() - To retrieve + * doorbell physical address of wlan pipes + * @param: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa2_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *param) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (param == NULL || param->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. param=%p ", param); + if (param) + IPAERR("client = %d\n", param->client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(param->client)) { + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } + } else { + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } else { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + } + + return 0; +} + +static void ipa_uc_wdi_loaded_handler(void) +{ + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa_ctx->uc_wdi_ctx.uc_ready_cb) { + ipa_ctx->uc_wdi_ctx.uc_ready_cb( + ipa_ctx->uc_wdi_ctx.priv); + + ipa_ctx->uc_wdi_ctx.uc_ready_cb = + NULL; + ipa_ctx->uc_wdi_ctx.priv = NULL; + } +} + +int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); + int i; + int ret = 0; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (!info) { + IPAERR("info = %p\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = ipa_iommu_map(cb->iommu_domain, + rounddown(info[i].iova, PAGE_SIZE), + rounddown(info[i].pa, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE), + prot); + } + + return ret; +} + +int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); + int i; + int ret = 0; + + if (!info) { + IPAERR("info = %p\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = iommu_unmap(cb->iommu_domain, + rounddown(info[i].iova, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE)); + } + + return ret; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..fd85997c7f825a2b586d9e736b9b9184c44083e1 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -0,0 +1,5278 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include /* gen_pool_alloc() */ +#include +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL) +#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL) +#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL) +#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) +#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL) +#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1) +#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1) +#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1) +#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL + +#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000) +#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600) + +/* Max pipes + ICs for TAG process */ +#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6) + +#define IPA_TAG_SLEEP_MIN_USEC (1000) +#define IPA_TAG_SLEEP_MAX_USEC (2000) +#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ) +#define IPA_BCR_REG_VAL (0x001FFF7F) +#define IPA_AGGR_GRAN_MIN (1) +#define IPA_AGGR_GRAN_MAX (32) +#define IPA_EOT_COAL_GRAN_MIN (1) +#define IPA_EOT_COAL_GRAN_MAX (16) +#define MSEC 1000 +#define MIN_RX_POLL_TIME 1 +#define MAX_RX_POLL_TIME 5 +#define UPPER_CUTOFF 50 +#define LOWER_CUTOFF 10 + +#define IPA_DEFAULT_SYS_YELLOW_WM 32 + +#define IPA_AGGR_BYTE_LIMIT (\ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT) +#define IPA_AGGR_PKT_LIMIT (\ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT) + +static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0, + IPA_OFFSET_MEQ32_1, -1 }; +static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0, + IPA_OFFSET_MEQ128_1, -1 }; +static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0, + IPA_IHL_OFFSET_RANGE16_1, -1 }; +static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0, + IPA_IHL_OFFSET_MEQ32_1, -1 }; +#define IPA_1_1 (0) +#define IPA_2_0 (1) +#define IPA_2_6L (2) + +#define INVALID_EP_MAPPING_INDEX (-1) + +#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \ + (ARRAY_SIZE(__eq_array) <= (__eq_index)) + +struct ipa_ep_confing { + bool valid; + int pipe_num; +}; + +static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = { + [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = {true, 19}, + [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = {true, 12}, + [IPA_1_1][IPA_CLIENT_USB2_PROD] = {true, 12}, + [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = {true, 13}, + [IPA_1_1][IPA_CLIENT_USB3_PROD] = {true, 13}, + [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = {true, 0}, + [IPA_1_1][IPA_CLIENT_USB4_PROD] = {true, 0}, + [IPA_1_1][IPA_CLIENT_USB_PROD] = {true, 11}, + [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = {true, 15}, + [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = {true, 8}, + [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = {true, 6}, + [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 2}, + [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = {true, 1}, + [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = {true, 5}, + + [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = {true, 14}, + [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = {true, 16}, + [IPA_1_1][IPA_CLIENT_USB2_CONS] = {true, 16}, + [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = {true, 17}, + [IPA_1_1][IPA_CLIENT_USB3_CONS] = {true, 17}, + [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = {true, 18}, + [IPA_1_1][IPA_CLIENT_USB4_CONS] = {true, 18}, + [IPA_1_1][IPA_CLIENT_USB_CONS] = {true, 10}, + [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = {true, 9}, + [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = {true, 7}, + [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = {true, 3}, + [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = {true, 4}, + + + [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = {true, 18}, + [IPA_2_0][IPA_CLIENT_USB2_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_USB3_PROD] = {true, 13}, + [IPA_2_0][IPA_CLIENT_USB4_PROD] = {true, 0}, + [IPA_2_0][IPA_CLIENT_USB_PROD] = {true, 11}, + [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4}, + [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = {true, 3}, + [IPA_2_0][IPA_CLIENT_ODU_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_MHI_PROD] = {true, 18}, + [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = {true, 6}, + [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = {true, 7}, + + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] + = {true, 12}, + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] + = {true, 19}, + [IPA_2_0][IPA_CLIENT_ETHERNET_PROD] = {true, 12}, + /* Only for test purpose */ + [IPA_2_0][IPA_CLIENT_TEST_PROD] = {true, 19}, + [IPA_2_0][IPA_CLIENT_TEST1_PROD] = {true, 19}, + [IPA_2_0][IPA_CLIENT_TEST2_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_TEST3_PROD] = {true, 11}, + [IPA_2_0][IPA_CLIENT_TEST4_PROD] = {true, 0}, + + [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = {true, 13}, + [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = {true, 17}, + [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = {true, 16}, + [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = {true, 14}, + [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = {true, 19}, + [IPA_2_0][IPA_CLIENT_USB_CONS] = {true, 15}, + [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = {true, 0}, + [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = {true, 2}, + [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = {true, 5}, + [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = {true, 13}, + [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = {true, 1}, + [IPA_2_0][IPA_CLIENT_MHI_CONS] = {true, 17}, + [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = {true, 8}, + [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = {true, 9}, + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] + = {true, 13}, + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] + = {true, 16}, + [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] + = {true, 10}, + [IPA_2_0][IPA_CLIENT_ETHERNET_CONS] = {true, 1}, + + /* Only for test purpose */ + [IPA_2_0][IPA_CLIENT_TEST_CONS] = {true, 1}, + [IPA_2_0][IPA_CLIENT_TEST1_CONS] = {true, 1}, + [IPA_2_0][IPA_CLIENT_TEST2_CONS] = {true, 16}, + [IPA_2_0][IPA_CLIENT_TEST3_CONS] = {true, 13}, + [IPA_2_0][IPA_CLIENT_TEST4_CONS] = {true, 15}, + + + [IPA_2_6L][IPA_CLIENT_USB_PROD] = {true, 1}, + [IPA_2_6L][IPA_CLIENT_WLAN1_PROD] = {true, 18}, + [IPA_2_6L][IPA_CLIENT_WLAN1_CONS] = {true, 17}, + [IPA_2_6L][IPA_CLIENT_WLAN2_CONS] = {true, 16}, + [IPA_2_6L][IPA_CLIENT_WLAN3_CONS] = {true, 15}, + [IPA_2_6L][IPA_CLIENT_WLAN4_CONS] = {true, 19}, + [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4}, + [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = {true, 3}, + [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = {true, 6}, + [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = {true, 7}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = {true, 11}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = {true, 13}, + + /* Only for test purpose */ + [IPA_2_6L][IPA_CLIENT_TEST_PROD] = {true, 11}, + [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = {true, 11}, + [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = {true, 12}, + [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = {true, 13}, + [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = {true, 14}, + + [IPA_2_6L][IPA_CLIENT_USB_CONS] = {true, 0}, + [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = {true, 10}, + [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = {true, 2}, + [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = {true, 5}, + [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = {true, 8}, + [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = {true, 9}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = {true, 12}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = {true, 14}, + + /* Only for test purpose */ + [IPA_2_6L][IPA_CLIENT_TEST_CONS] = {true, 15}, + [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = {true, 15}, + [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = {true, 0}, + [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = {true, 1}, + [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = {true, 10}, +}; + +static struct msm_bus_vectors ipa_init_vectors_v1_1[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 0, + .ib = 0, + }, +}; + +static struct msm_bus_vectors ipa_init_vectors_v2_0[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 0, + .ib = 0, + }, +}; + +static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 50000000, + .ib = 960000000, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 50000000, + .ib = 960000000, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 50000000, + .ib = 960000000, + }, +}; + +static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 100000000, + .ib = 1300000000, + }, + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 100000000, + .ib = 1300000000, + }, +}; + +static struct msm_bus_paths ipa_usecases_v1_1[] = { + { + ARRAY_SIZE(ipa_init_vectors_v1_1), + ipa_init_vectors_v1_1, + }, + { + ARRAY_SIZE(ipa_max_perf_vectors_v1_1), + ipa_max_perf_vectors_v1_1, + }, +}; + +static struct msm_bus_paths ipa_usecases_v2_0[] = { + { + ARRAY_SIZE(ipa_init_vectors_v2_0), + ipa_init_vectors_v2_0, + }, + { + ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0), + ipa_nominal_perf_vectors_v2_0, + }, +}; + +static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = { + .usecase = ipa_usecases_v1_1, + .num_usecases = ARRAY_SIZE(ipa_usecases_v1_1), + .name = "ipa", +}; + +static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = { + .usecase = ipa_usecases_v2_0, + .num_usecases = ARRAY_SIZE(ipa_usecases_v2_0), + .name = "ipa", +}; + +void ipa_active_clients_lock(void) +{ + unsigned long flags; + + mutex_lock(&ipa_ctx->ipa_active_clients.mutex); + spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags); + ipa_ctx->ipa_active_clients.mutex_locked = true; + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags); +} + +int ipa_active_clients_trylock(unsigned long *flags) +{ + spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags); + if (ipa_ctx->ipa_active_clients.mutex_locked) { + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, + *flags); + return 0; + } + + return 1; +} + +void ipa_active_clients_trylock_unlock(unsigned long *flags) +{ + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags); +} + +void ipa_active_clients_unlock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags); + ipa_ctx->ipa_active_clients.mutex_locked = false; + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags); + mutex_unlock(&ipa_ctx->ipa_active_clients.mutex); +} + +/** + * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an + * IPA_RM resource + * + * @resource: [IN] IPA Resource Manager resource + * @clients: [OUT] Empty array which will contain the list of clients. The + * caller must initialize this array. + * + * Return codes: 0 on success, negative on failure. + */ +static int ipa_get_clients_from_rm_resource( + enum ipa_rm_resource_name resource, + struct ipa_client_names *clients) +{ + int i = 0; + + if (resource < 0 || + resource >= IPA_RM_RESOURCE_MAX || + !clients) { + IPAERR("Bad parameters\n"); + return -EINVAL; + } + + switch (resource) { + case IPA_RM_RESOURCE_USB_CONS: + clients->names[i++] = IPA_CLIENT_USB_CONS; + break; + case IPA_RM_RESOURCE_HSIC_CONS: + clients->names[i++] = IPA_CLIENT_HSIC1_CONS; + break; + case IPA_RM_RESOURCE_WLAN_CONS: + clients->names[i++] = IPA_CLIENT_WLAN1_CONS; + clients->names[i++] = IPA_CLIENT_WLAN2_CONS; + clients->names[i++] = IPA_CLIENT_WLAN3_CONS; + clients->names[i++] = IPA_CLIENT_WLAN4_CONS; + break; + case IPA_RM_RESOURCE_MHI_CONS: + clients->names[i++] = IPA_CLIENT_MHI_CONS; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS; + clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS; + break; + case IPA_RM_RESOURCE_ETHERNET_CONS: + clients->names[i++] = IPA_CLIENT_ETHERNET_CONS; + break; + case IPA_RM_RESOURCE_USB_PROD: + clients->names[i++] = IPA_CLIENT_USB_PROD; + break; + case IPA_RM_RESOURCE_HSIC_PROD: + clients->names[i++] = IPA_CLIENT_HSIC1_PROD; + break; + case IPA_RM_RESOURCE_MHI_PROD: + clients->names[i++] = IPA_CLIENT_MHI_PROD; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + clients->names[i++] = IPA_CLIENT_ODU_PROD; + break; + case IPA_RM_RESOURCE_ETHERNET_PROD: + clients->names[i++] = IPA_CLIENT_ETHERNET_PROD; + break; + default: + break; + } + clients->length = i; + + return 0; +} + +/** + * ipa_should_pipe_be_suspended() - returns true when the client's pipe should + * be suspended during a power save scenario. False otherwise. + * + * @client: [IN] IPA client + */ +bool ipa_should_pipe_be_suspended(enum ipa_client_type client) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + WARN_ON(1); + return false; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + if (ep->keep_ipa_awake) + return false; + + if (client == IPA_CLIENT_USB_CONS || + client == IPA_CLIENT_MHI_CONS || + client == IPA_CLIENT_HSIC1_CONS || + client == IPA_CLIENT_WLAN1_CONS || + client == IPA_CLIENT_WLAN2_CONS || + client == IPA_CLIENT_WLAN3_CONS || + client == IPA_CLIENT_WLAN4_CONS || + client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS || + client == IPA_CLIENT_ETHERNET_CONS) + return true; + + return false; +} + +/** + * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + struct ipa_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + bool pipe_suspended = false; + + memset(&clients, 0, sizeof(clients)); + res = ipa_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("Bad params.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa_ctx->resume_on_connect[client] = false; + if (ipa_ctx->ep[ipa_ep_idx].client == client && + ipa_should_pipe_be_suspended(client)) { + if (ipa_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); + pipe_suspended = true; + } + } + } + /* Sleep ~1 msec */ + if (pipe_suspended) + usleep_range(1000, 2000); + + /* before gating IPA clocks do TAG process */ + ipa_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); + + return 0; +} + +/** + * ipa2_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int res; + struct ipa_client_names clients; + int index; + enum ipa_client_type client; + struct ipa_ep_cfg_ctrl suspend; + int ipa_ep_idx; + unsigned long flags; + struct ipa_active_client_logging_info log_info; + + if (ipa_active_clients_trylock(&flags) == 0) + return -EPERM; + if (ipa_ctx->ipa_active_clients.cnt == 1) { + res = -EPERM; + goto bail; + } + + memset(&clients, 0, sizeof(clients)); + res = ipa_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n" + , resource); + goto bail; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa_ctx->resume_on_connect[client] = false; + if (ipa_ctx->ep[ipa_ep_idx].client == client && + ipa_should_pipe_be_suspended(client)) { + if (ipa_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + } + } + + if (res == 0) { + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(resource)); + ipa2_active_clients_log_dec(&log_info, true); + ipa_ctx->ipa_active_clients.cnt--; + IPADBG("active clients = %d\n", + ipa_ctx->ipa_active_clients.cnt); + } +bail: + ipa_active_clients_trylock_unlock(&flags); + + return res; +} + +/** + * ipa2_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa2_resume_resource(enum ipa_rm_resource_name resource) +{ + + struct ipa_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + + memset(&clients, 0, sizeof(clients)); + res = ipa_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("ipa_get_clients_from_rm_resource() failed.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + /* + * The related ep, will be resumed on connect + * while its resource is granted + */ + ipa_ctx->resume_on_connect[client] = true; + IPADBG("%d will be resumed on connect.\n", client); + if (ipa_ctx->ep[ipa_ep_idx].client == client && + ipa_should_pipe_be_suspended(client)) { + spin_lock(&ipa_ctx->disconnect_lock); + if (ipa_ctx->ep[ipa_ep_idx].valid && + !ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) { + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + spin_unlock(&ipa_ctx->disconnect_lock); + } + } + + return res; +} + +/* read how much SRAM is available for SW use + * In case of IPAv2.0 this will also supply an offset from + * which we can start write + */ +static void _ipa_sram_settings_read_v1_1(void) +{ + ipa_ctx->smem_restricted_bytes = 0; + ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v1_1); + ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST; + ipa_ctx->hdr_tbl_lcl = true; + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = true; + ipa_ctx->ip6_flt_tbl_lcl = true; +} + +static void _ipa_sram_settings_read_v2_0(void) +{ + ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); + ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); + ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa_ctx->hdr_tbl_lcl = false; + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = false; + ipa_ctx->ip6_flt_tbl_lcl = false; +} + +static void _ipa_sram_settings_read_v2_5(void) +{ + ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); + ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); + ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa_ctx->hdr_tbl_lcl = false; + ipa_ctx->hdr_proc_ctx_tbl_lcl = true; + + /* + * when proc ctx table is located in internal memory, + * modem entries resides first. + */ + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { + ipa_ctx->hdr_proc_ctx_tbl.start_offset = + IPA_MEM_PART(modem_hdr_proc_ctx_size); + } + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = false; + ipa_ctx->ip6_flt_tbl_lcl = false; +} + +static void _ipa_sram_settings_read_v2_6L(void) +{ + ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); + ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); + ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa_ctx->hdr_tbl_lcl = false; + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = false; + ipa_ctx->ip6_flt_tbl_lcl = false; +} + +static void _ipa_cfg_route_v1_1(struct ipa_route *route) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, route->route_dis, + IPA_ROUTE_ROUTE_DIS_SHFT, + IPA_ROUTE_ROUTE_DIS_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe, + IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); + + ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); +} + +static void _ipa_cfg_route_v2_0(struct ipa_route *route) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, route->route_dis, + IPA_ROUTE_ROUTE_DIS_SHFT, + IPA_ROUTE_ROUTE_DIS_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe, + IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK); + + ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); +} + +/** + * ipa_cfg_route() - configure IPA route + * @route: IPA route + * + * Return codes: + * 0: success + */ +int ipa_cfg_route(struct ipa_route *route) +{ + + IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n", + route->route_dis, + route->route_def_pipe, + route->route_def_hdr_table); + IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n", + route->route_def_hdr_ofst, + route->route_frag_def_pipe); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipa_ctx->ctrl->ipa_cfg_route(route); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa_cfg_filter() - configure filter + * @disable: disable value + * + * Return codes: + * 0: success + */ +int ipa_cfg_filter(u32 disable) +{ + u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst, + IPA_SETFIELD(!disable, + IPA_FILTER_FILTER_EN_SHFT, + IPA_FILTER_FILTER_EN_BMSK)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa_init_hw() - initialize HW + * + * Return codes: + * 0: success + */ +int ipa_init_hw(void) +{ + u32 ipa_version = 0; + + /* do soft reset of IPA */ + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1); + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0); + + /* enable IPA */ + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 0x11); + + /* Read IPA version and make sure we have access to the registers */ + ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST); + if (ipa_version == 0) + return -EFAULT; + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */ + ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL); + } + return 0; +} + +/** + * ipa2_get_ep_mapping() - provide endpoint mapping + * @client: client type + * + * Return value: endpoint mapping + */ +int ipa2_get_ep_mapping(enum ipa_client_type client) +{ + u8 hw_type_index = IPA_1_1; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return INVALID_EP_MAPPING_INDEX; + } + + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR_RL("Bad client number! client =%d\n", client); + return INVALID_EP_MAPPING_INDEX; + } + + switch (ipa_ctx->ipa_hw_type) { + case IPA_HW_v2_0: + case IPA_HW_v2_5: + hw_type_index = IPA_2_0; + break; + case IPA_HW_v2_6L: + hw_type_index = IPA_2_6L; + break; + default: + hw_type_index = IPA_1_1; + break; + } + + if (!ep_mapping[hw_type_index][client].valid) + return INVALID_EP_MAPPING_INDEX; + + return ep_mapping[hw_type_index][client].pipe_num; +} + +/* ipa2_set_client() - provide client mapping + * @client: client type + * + * Return value: none + */ + +void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink) +{ + if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { + IPAERR("Bad client number! client =%d\n", client); + } else if (index >= IPA_MAX_NUM_PIPES || index < 0) { + IPAERR("Bad pipe index! index =%d\n", index); + } else { + ipa_ctx->ipacm_client[index].client_enum = client; + ipa_ctx->ipacm_client[index].uplink = uplink; + } +} + +/* ipa2_get_wlan_stats() - get ipa wifi stats + * + * Return value: success or failure + */ +int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats) +{ + if (ipa_ctx->uc_wdi_ctx.stats_notify) { + ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS, + wdi_sap_stats); + } else { + IPAERR_RL("uc_wdi_ctx.stats_notify not registered\n"); + return -EFAULT; + } + return 0; +} + +int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota) +{ + if (ipa_ctx->uc_wdi_ctx.stats_notify) { + ipa_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA, + wdi_quota); + } else { + IPAERR("uc_wdi_ctx.stats_notify not registered\n"); + return -EFAULT; + } + return 0; +} + +/** + * ipa2_get_client() - provide client mapping + * @client: client type + * + * Return value: client mapping enum + */ +enum ipacm_client_enum ipa2_get_client(int pipe_idx) +{ + if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) { + IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx); + return IPACM_CLIENT_MAX; + } else { + return ipa_ctx->ipacm_client[pipe_idx].client_enum; + } +} + +/** + * ipa2_get_client_uplink() - provide client mapping + * @client: client type + * + * Return value: none + */ +bool ipa2_get_client_uplink(int pipe_idx) +{ + if (pipe_idx < 0 || pipe_idx >= IPA_MAX_NUM_PIPES) { + IPAERR("invalid pipe idx %d\n", pipe_idx); + return false; + } + + return ipa_ctx->ipacm_client[pipe_idx].uplink; +} + +/** + * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. + */ +enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx) +{ + int i; + int j; + enum ipa_client_type client; + struct ipa_client_names clients; + bool found = false; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return -EINVAL; + } + + client = ipa_ctx->ep[pipe_idx].client; + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + memset(&clients, 0, sizeof(clients)); + ipa_get_clients_from_rm_resource(i, &clients); + for (j = 0; j < clients.length; j++) { + if (clients.names[j] == client) { + found = true; + break; + } + } + if (found) + break; + } + + if (!found) + return -EFAULT; + + return i; +} + +/** + * ipa2_get_client_mapping() - provide client mapping + * @pipe_idx: IPA end-point number + * + * Return value: client mapping + */ +enum ipa_client_type ipa2_get_client_mapping(int pipe_idx) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return -EINVAL; + } + + return ipa_ctx->ep[pipe_idx].client; +} + +static void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset, + const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN]) +{ + *buf = ipa_write_8(hdr_mac_addr_offset, *buf); + + /* MAC addr mask copied as little endian each 4 bytes */ + *buf = ipa_write_8(mac_addr_mask[3], *buf); + *buf = ipa_write_8(mac_addr_mask[2], *buf); + *buf = ipa_write_8(mac_addr_mask[1], *buf); + *buf = ipa_write_8(mac_addr_mask[0], *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_8(mac_addr_mask[5], *buf); + *buf = ipa_write_8(mac_addr_mask[4], *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + + /* MAC addr copied as little endian each 4 bytes */ + *buf = ipa_write_8(mac_addr[3], *buf); + *buf = ipa_write_8(mac_addr[2], *buf); + *buf = ipa_write_8(mac_addr[1], *buf); + *buf = ipa_write_8(mac_addr[0], *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_8(mac_addr[5], *buf); + *buf = ipa_write_8(mac_addr[4], *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_pad_to_32(*buf); +} + +/** + * ipa_generate_hw_rule() - generate HW rule + * @ip: IP address type + * @attrib: IPA rule attribute + * @buf: output buffer + * @en_rule: rule + * + * Return codes: + * 0: success + * -EPERM: wrong input + */ +int ipa_generate_hw_rule(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + + if (ip == IPA_IP_v4) { + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || + attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask & + IPA_FLT_FLOW_LABEL) { + IPAERR("v6 attrib's specified for v4 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_TOS) { + *en_rule |= IPA_TOS_EQ; + *buf = ipa_write_8(attrib->u.v4.tos, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* 0 => offset of TOS in v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32((attrib->tos_mask << 16), *buf); + *buf = ipa_write_32((attrib->tos_value << 16), *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_PROTOCOL_EQ; + *buf = ipa_write_8(attrib->u.v4.protocol, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* 12 => offset of src ip in v4 header */ + *buf = ipa_write_8(12, *buf); + *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf); + *buf = ipa_write_32(attrib->u.v4.src_addr, *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* 16 => offset of dst ip in v4 header */ + *buf = ipa_write_8(16, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* -2 => offset of ether type in L2 hdr */ + *buf = ipa_write_8((u8)-2, *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port_hi, *buf); + *buf = ipa_write_16(attrib->src_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v4 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port_hi, *buf); + *buf = ipa_write_16(attrib->dst_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of type after v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->type, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 1 => offset of code after v4 header */ + *buf = ipa_write_8(1, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->code, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of SPI after v4 header FIXME */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFFFFFFFF, *buf); + *buf = ipa_write_32(attrib->spi, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v4 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + *buf = ipa_write_8(0, *buf); /* offset, reserved */ + *buf = ipa_write_32(attrib->meta_data_mask, *buf); + *buf = ipa_write_32(attrib->meta_data, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + *buf = ipa_pad_to_32(*buf); + } + } else if (ip == IPA_IP_v6) { + + /* v6 code below assumes no extension headers TODO: fix this */ + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_TOS || + attrib->attrib_mask & IPA_FLT_PROTOCOL) { + IPAERR("v4 attrib's specified for v6 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_PROTOCOL_EQ; + *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* -2 => offset of ether type in L2 hdr */ + *buf = ipa_write_8((u8)-2, *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of type after v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->type, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 1 => offset of code after v6 header */ + *buf = ipa_write_8(1, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->code, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of SPI after v6 header FIXME */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFFFFFFFF, *buf); + *buf = ipa_write_32(attrib->spi, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 22 => offset of IP type after v6 header */ + *buf = ipa_write_8(22, *buf); + *buf = ipa_write_32(0xF0000000, *buf); + if (attrib->type == 0x40) + *buf = ipa_write_32(0x40000000, *buf); + else + *buf = ipa_write_32(0x60000000, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 38 => offset of inner IPv4 addr */ + *buf = ipa_write_8(38, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v6 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port_hi, *buf); + *buf = ipa_write_16(attrib->src_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v6 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port_hi, *buf); + *buf = ipa_write_16(attrib->dst_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + /* 8 => offset of src ip in v6 header */ + *buf = ipa_write_8(8, *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + /* 24 => offset of dst ip in v6 header */ + *buf = ipa_write_8(24, *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_FLT_TC; + *buf = ipa_write_8(attrib->u.v6.tc, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + /* 0 => offset of TOS in v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32((attrib->tos_mask << 20), *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + + *buf = ipa_write_32((attrib->tos_value << 20), *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_FLT_FLOW_LABEL; + /* FIXME FL is only 20 bits */ + *buf = ipa_write_32(attrib->u.v6.flow_label, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + *buf = ipa_write_8(0, *buf); /* offset, reserved */ + *buf = ipa_write_32(attrib->meta_data_mask, *buf); + *buf = ipa_write_32(attrib->meta_data, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + *buf = ipa_pad_to_32(*buf); + } + } else { + IPAERR("unsupported ip %d\n", ip); + return -EPERM; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + IPADBG_LOW("building default rule\n"); + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + *buf = ipa_write_8(0, *buf); /* offset */ + *buf = ipa_write_32(0, *buf); /* mask */ + *buf = ipa_write_32(0, *buf); /* val */ + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + return 0; +} + +static void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, + u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128) +{ + eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset; + eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3]; + eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2]; + eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1]; + eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0]; + eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0; + eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0; + eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5]; + eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4]; + memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8); + eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3]; + eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2]; + eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1]; + eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0]; + eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0; + eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0; + eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5]; + eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4]; + memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8); +} + +int ipa_generate_flt_eq(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + u16 eq_bitmap = 0; + u16 *en_rule = &eq_bitmap; + + if (ip == IPA_IP_v4) { + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || + attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask & + IPA_FLT_FLOW_LABEL) { + IPAERR_RL("v6 attrib's specified for v4 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_TOS) { + *en_rule |= IPA_TOS_EQ; + eq_atrb->tos_eq_present = 1; + eq_atrb->tos_eq = attrib->u.v4.tos; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->tos_mask << 16; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->tos_value << 16; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_PROTOCOL_EQ; + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v4.protocol; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 12; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.src_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.src_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 16; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.dst_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR_RL("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR_RL("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + eq_atrb->ipv4_frag_eq_present = 1; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + } else if (ip == IPA_IP_v6) { + + /* v6 code below assumes no extension headers TODO: fix this */ + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_TOS || + attrib->attrib_mask & IPA_FLT_PROTOCOL) { + IPAERR_RL("v4 attrib's specified for v6 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_PROTOCOL_EQ; + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v6.next_hdr; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 22 => offset of inner IP type after v6 header */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xF0000000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (u32)attrib->type << 24; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 38 => offset of inner IPv4 addr */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->u.v4.dst_addr; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR_RL("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR_RL("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + eq_atrb->offset_meq_128[ofst_meq128].offset = 8; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.src_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.src_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.src_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.src_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.src_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.src_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.src_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.src_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + eq_atrb->offset_meq_128[ofst_meq128].offset = 24; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.dst_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.dst_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.dst_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.dst_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.dst_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.dst_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.dst_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.dst_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_FLT_TC; + eq_atrb->tc_eq_present = 1; + eq_atrb->tc_eq = attrib->u.v6.tc; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + eq_atrb->offset_meq_128[ofst_meq128].offset = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->tos_mask << 20; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->tos_value << 20; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = 0; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_FLT_FLOW_LABEL; + eq_atrb->fl_eq_present = 1; + eq_atrb->fl_eq = attrib->u.v6.flow_label; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + eq_atrb->ipv4_frag_eq_present = 1; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + + } else { + IPAERR_RL("unsupported ip %d\n", ip); + return -EPERM; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = 0; + eq_atrb->offset_meq_32[ofst_meq32].value = 0; + ofst_meq32++; + } + + eq_atrb->rule_eq_bitmap = *en_rule; + eq_atrb->num_offset_meq_32 = ofst_meq32; + eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; + eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; + eq_atrb->num_offset_meq_128 = ofst_meq128; + + return 0; +} + +/** + * ipa2_cfg_ep - IPA end-point configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * This includes nat, header, mode, aggregation and route settings and is a one + * shot API to configure the IPA end-point fully + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) +{ + int result = -EINVAL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr); + if (result) + return result; + + result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext); + if (result) + return result; + + result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr); + if (result) + return result; + + result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg); + if (result) + return result; + + if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) { + result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat); + if (result) + return result; + + result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode); + if (result) + return result; + + result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route); + if (result) + return result; + + result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr); + if (result) + return result; + } else { + result = ipa2_cfg_ep_metadata_mask(clnt_hdl, + &ipa_ep_cfg->metadata_mask); + if (result) + return result; + } + + return 0; +} + +static const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en) +{ + switch (nat_en) { + case (IPA_BYPASS_NAT): + return "NAT disabled"; + case (IPA_SRC_NAT): + return "Source NAT"; + case (IPA_DST_NAT): + return "Dst NAT"; + } + + return "undefined"; +} + +static void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ep_nat) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en, + IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT, + IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl), + reg_val); +} + +static void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ep_nat) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en, + IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT, + IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_nat() - IPA end-point NAT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d, nat_en=%d(%s)\n", + clnt_hdl, + ep_nat->nat_en, + ipa_get_nat_en_str(ep_nat->nat_en)); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ep_status) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ep_status) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en, + IPA_ENDP_STATUS_n_STATUS_EN_SHFT, + IPA_ENDP_STATUS_n_STATUS_EN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep, + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT, + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_STATUS_n_OFST(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_status() - IPA end-point status configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, status_en=%d status_ep=%d\n", + clnt_hdl, + ep_status->status_en, + ep_status->status_ep); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].status = *ep_status; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *cfg) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *cfg) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK); + + ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n", + clnt_hdl, + cfg->frag_offload_en, + cfg->cs_offload_en, + cfg->cs_metadata_hdr_offset); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, metadata_mask=0x%x\n", + clnt_hdl, + metadata_mask->metadata_mask); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_hdr *ep_hdr) +{ + u32 val = 0; + + val = IPA_SETFIELD(ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK); + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val); +} + +static void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_hdr *ep_hdr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid, + IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2, + IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional, + IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2, + IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val); +} + +/** + * ipa2_cfg_ep_hdr() - IPA end-point header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n", + clnt_hdl, + ep_hdr->hdr_remove_additional, + ep_hdr->hdr_a5_mux, + ep_hdr->hdr_ofst_pkt_size); + + IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n", + ep_hdr->hdr_ofst_pkt_size_valid, + ep_hdr->hdr_additional_const_len); + + IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x", + ep_hdr->hdr_ofst_metadata, + ep_hdr->hdr_ofst_metadata_valid, + ep_hdr->hdr_len); + + ep = &ipa_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr = *ep_hdr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr) +{ + IPADBG("Not supported for version 1.1\n"); + return 0; +} + +static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val) +{ + u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, hdr_endianness, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val); + + return 0; +} + +static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0); + + return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); +} + +static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5); + + return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); + +} + +static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5); + + return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); + +} + +/** + * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_hdr_ext: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d hdr_pad_to_alignment=%d\n", + clnt_hdl, + ep_hdr_ext->hdr_pad_to_alignment); + + IPADBG("hdr_total_len_or_pad_offset=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_offset); + + IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n", + ep_hdr_ext->hdr_payload_len_inc_padding, + ep_hdr_ext->hdr_total_len_or_pad); + + IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_valid, + ep_hdr_ext->hdr_little_endian); + + ep = &ipa_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr_ext = *ep_hdr_ext; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa2_cfg_ep_hdr() - IPA end-point Control configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + u32 reg_val = 0; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) { + IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n", + clnt_hdl, + ep_ctrl->ipa_ep_suspend, + ep_ctrl->ipa_ep_delay); + + IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend, + IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT, + IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val); + + return 0; + +} + +/** + * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration + * @aggr_granularity: [in] defines the granularity of AGGR timers + * number of units of 1/32msec + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity) +{ + u32 reg_val = 0; + + if (aggr_granularity <= IPA_AGGR_GRAN_MIN || + aggr_granularity > IPA_AGGR_GRAN_MAX) { + IPAERR("bad param, aggr_granularity = %d\n", + aggr_granularity); + return -EINVAL; + } + IPADBG("aggr_granularity=%d\n", aggr_granularity); + + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST); + reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1, + IPA_COUNTER_CFG_AGGR_GRAN_SHFT, + IPA_COUNTER_CFG_AGGR_GRAN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_COUNTER_CFG_OFST, reg_val); + + return 0; + +} +EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity); + +/** + * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer + * configuration + * @eot_coal_granularity: defines the granularity of EOT_COAL timers + * number of units of 1/32msec + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity) +{ + u32 reg_val = 0; + + if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN || + eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) { + IPAERR("bad parm, eot_coal_granularity = %d\n", + eot_coal_granularity); + return -EINVAL; + } + IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity); + + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST); + reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1, + IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT, + IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_COUNTER_CFG_OFST, reg_val); + + return 0; + +} +EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity); + +static const char * const ipa_get_mode_type_str(enum ipa_mode_type mode) +{ + switch (mode) { + case (IPA_BASIC): + return "Basic"; + case (IPA_ENABLE_FRAMING_HDLC): + return "HDLC framing"; + case (IPA_ENABLE_DEFRAMING_HDLC): + return "HDLC de-framing"; + case (IPA_DMA): + return "DMA"; + } + + return "undefined"; +} + +static void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number, + const struct ipa_ep_cfg_mode *ep_mode) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode, + IPA_ENDP_INIT_MODE_N_MODE_SHFT, + IPA_ENDP_INIT_MODE_N_MODE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val); +} + +static void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number, + const struct ipa_ep_cfg_mode *ep_mode) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode, + IPA_ENDP_INIT_MODE_N_MODE_SHFT, + IPA_ENDP_INIT_MODE_N_MODE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val); +} + +/** + * ipa2_cfg_ep_mode() - IPA end-point mode configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) +{ + int ep; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + ep = ipa2_get_ep_mapping(ep_mode->dst); + if (ep == -1 && ep_mode->mode == IPA_DMA) { + IPAERR("dst %d does not exist\n", ep_mode->dst); + return -EINVAL; + } + + WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst)); + + if (!IPA_CLIENT_IS_CONS(ep_mode->dst)) + ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + + IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d", + clnt_hdl, + ep_mode->mode, + ipa_get_mode_type_str(ep_mode->mode), + ep_mode->dst); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode; + ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl, + ipa_ctx->ep[clnt_hdl].dst_pipe_index, + ep_mode); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static const char * const get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) +{ + switch (aggr_en) { + case (IPA_BYPASS_AGGR): + return "no aggregation"; + case (IPA_ENABLE_AGGR): + return "aggregation enabled"; + case (IPA_ENABLE_DEAGGR): + return "de-aggregation enabled"; + } + + return "undefined"; +} + +static const char * const get_aggr_type_str(enum ipa_aggr_type aggr_type) +{ + switch (aggr_type) { + case (IPA_MBIM_16): + return "MBIM_16"; + case (IPA_HDLC): + return "HDLC"; + case (IPA_TLP): + return "TLP"; + case (IPA_RNDIS): + return "RNDIS"; + case (IPA_GENERIC): + return "GENERIC"; + case (IPA_QCMAP): + return "QCMAP"; + case (IPA_COALESCE): + return "COALESCE"; + } + return "undefined"; +} + +static void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_aggr *ep_aggr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val); +} + +static void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_aggr *ep_aggr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val); +} + +/** + * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n", + clnt_hdl, + ep_aggr->aggr_en, + get_aggr_enable_str(ep_aggr->aggr_en), + ep_aggr->aggr, + get_aggr_type_str(ep_aggr->aggr), + ep_aggr->aggr_byte_limit, + ep_aggr->aggr_time_limit); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index) +{ + int reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index), + reg_val); +} + +static void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index) +{ + int reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index), + reg_val); +} + +/** + * ipa2_cfg_ep_route() - IPA end-point routing configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("ROUTE does not apply to IPA out EP %d\n", + clnt_hdl); + return -EINVAL; + } + + /* + * if DMA mode was configured previously for this EP, return with + * success + */ + if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) { + IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n", + clnt_hdl); + return 0; + } + + if (ep_route->rt_tbl_hdl) + IPAERR("client specified non-zero RT TBL hdl - ignore it\n"); + + IPADBG("pipe=%d, rt_tbl_hdl=%d\n", + clnt_hdl, + ep_route->rt_tbl_hdl); + + /* always use "default" routing table when programming EP ROUTE reg */ + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) + ipa_ctx->ep[clnt_hdl].rt_tbl_idx = + IPA_MEM_PART(v4_apps_rt_index_lo); + else + ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl, + ipa_ctx->ep[clnt_hdl].rt_tbl_idx); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_holb_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number), + (u16)ep_holb->tmr_val); +} + +static void _ipa_cfg_ep_holb_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), + (u16)ep_holb->tmr_val); +} + +static void _ipa_cfg_ep_holb_v2_5(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), + ep_holb->tmr_val); +} + +static void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), + ep_holb->tmr_val); +} + +/** + * ipa2_cfg_ep_holb() - IPA end-point holb configuration + * + * If an IPA producer pipe is full, IPA HW by default will block + * indefinitely till space opens up. During this time no packets + * including those from unrelated pipes will be processed. Enabling + * HOLB means IPA HW will be allowed to drop packets as/when needed + * and indefinite blocking is avoided. + * + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL || + ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val || + ep_holb->en > 1) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl); + return -EINVAL; + } + + if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) { + IPAERR("HOLB is not supported for this IPA core\n"); + return -EINVAL; + } + + ipa_ctx->ep[clnt_hdl].holb = *ep_holb; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl, + ep_holb->tmr_val); + + return 0; +} + +/** + * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration + * + * Wrapper function for ipa_cfg_ep_holb() with client name instead of + * client handle. This function is used for clients that does not have + * client handle. + * + * @client: [in] client name + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb); +} + +static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + IPADBG("Not supported for version 1.1\n"); + return 0; +} + +static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val); + + return 0; +} + +/** + * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_deaggr: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d deaggr_hdr_len=%d\n", + clnt_hdl, + ep_deaggr->deaggr_hdr_len); + + IPADBG("packet_offset_valid=%d\n", + ep_deaggr->packet_offset_valid); + + IPADBG("packet_offset_location=%d max_packet_len=%d\n", + ep_deaggr->packet_offset_location, + ep_deaggr->max_packet_len); + + ep = &ipa_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.deaggr = *ep_deaggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_metadata *meta) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_metadata *meta) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id, + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT, + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number), + reg_val); +} + +/** + * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +static int ipa2_cfg_ep_metadata(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata *ep_md) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md); + ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; + ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} +EXPORT_SYMBOL(ipa2_cfg_ep_metadata); + +int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + struct ipa_ep_cfg_metadata meta; + struct ipa_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (param_in->client >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parm client:%d\n", param_in->client); + goto fail; + } + + ipa_ep_idx = ipa2_get_ep_mapping(param_in->client); + if (ipa_ep_idx == -1) { + IPAERR_RL("Invalid client.\n"); + goto fail; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (!ep->valid) { + IPAERR_RL("EP not allocated.\n"); + goto fail; + } + + meta.qmap_id = param_in->qmap_id; + if (param_in->client == IPA_CLIENT_USB_PROD || + param_in->client == IPA_CLIENT_HSIC1_PROD || + param_in->client == IPA_CLIENT_ODU_PROD || + param_in->client == IPA_CLIENT_ETHERNET_PROD) { + result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta); + } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) { + ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta; + result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id); + if (result) + IPAERR_RL("qmap_id %d write failed on ep=%d\n", + meta.qmap_id, ipa_ep_idx); + result = 0; + } + +fail: + return result; +} + +/** + * ipa_dump_buff_internal() - dumps buffer for debug purposes + * @base: buffer base address + * @phy_base: buffer physical base address + * @size: size of the buffer + */ +void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size) +{ + int i; + u32 *cur = (u32 *)base; + u8 *byt; + + IPADBG("system phys addr=%pa len=%u\n", &phy_base, size); + for (i = 0; i < size / 4; i++) { + byt = (u8 *)(cur + i); + IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i), + byt[0], byt[1], byt[2], byt[3]); + } + IPADBG("END\n"); +} + +/** + * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling + * @time: time fom dtsi entry or from debugfs file system + * @min: rx polling min timeout + * @max: rx polling max timeout + * Maximum time could be of 10Msec allowed. + */ +void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time) +{ + if ((time >= MIN_RX_POLL_TIME) && + (time <= MAX_RX_POLL_TIME)) { + *min = (time * MSEC) + LOWER_CUTOFF; + *max = (time * MSEC) + UPPER_CUTOFF; + } else { + /* Setting up the default min max time */ + IPADBG("Setting up default rx polling timeout\n"); + *min = (MIN_RX_POLL_TIME * MSEC) + + LOWER_CUTOFF; + *max = (MIN_RX_POLL_TIME * MSEC) + + UPPER_CUTOFF; + } + IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max); +} + +/** + * ipa_pipe_mem_init() - initialize the pipe memory + * @start_ofst: start offset + * @size: size + * + * Return value: + * 0: success + * -ENOMEM: no memory + */ +int ipa_pipe_mem_init(u32 start_ofst, u32 size) +{ + int res; + u32 aligned_start_ofst; + u32 aligned_size; + struct gen_pool *pool; + + if (!size) { + IPAERR("no IPA pipe memory allocated\n"); + goto fail; + } + + aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst); + aligned_size = size - (aligned_start_ofst - start_ofst); + + IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n", + start_ofst, aligned_start_ofst, size, aligned_size); + + /* allocation order of 8 i.e. 128 bytes, global pool */ + pool = gen_pool_create(8, -1); + if (!pool) { + IPAERR("Failed to create a new memory pool.\n"); + goto fail; + } + + res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1); + if (res) { + IPAERR("Failed to add memory to IPA pipe pool\n"); + goto err_pool_add; + } + + ipa_ctx->pipe_mem_pool = pool; + return 0; + +err_pool_add: + gen_pool_destroy(pool); +fail: + return -ENOMEM; +} + +/** + * ipa_pipe_mem_alloc() - allocate pipe memory + * @ofst: offset + * @size: size + * + * Return value: + * 0: success + */ +int ipa_pipe_mem_alloc(u32 *ofst, u32 size) +{ + u32 vaddr; + int res = -1; + + if (!ipa_ctx->pipe_mem_pool || !size) { + IPAERR("failed size=%u pipe_mem_pool=%p\n", size, + ipa_ctx->pipe_mem_pool); + return res; + } + + vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size); + + if (vaddr) { + *ofst = vaddr; + res = 0; + IPADBG("size=%u ofst=%u\n", size, vaddr); + } else { + IPAERR("size=%u failed\n", size); + } + + return res; +} + +/** + * ipa_pipe_mem_free() - free pipe memory + * @ofst: offset + * @size: size + * + * Return value: + * 0: success + */ +int ipa_pipe_mem_free(u32 ofst, u32 size) +{ + IPADBG("size=%u ofst=%u\n", size, ofst); + if (ipa_ctx->pipe_mem_pool && size) + gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size); + return 0; +} + +/** + * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting + * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, + * etc + * + * Returns: 0 on success + */ +int ipa2_set_aggr_mode(enum ipa_aggr_mode mode) +{ + u32 reg_val; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) | + (reg_val & 0xfffffffe)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation + * mode + * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be + * "QND") + * + * Set the NDP signature used for QCNCM aggregation mode. The fourth byte + * (expected to be 'P') needs to be set using the header addition mechanism + * + * Returns: 0 on success, negative on failure + */ +int ipa2_set_qcncm_ndp_sig(char sig[3]) +{ + u32 reg_val; + + if (sig == NULL) { + IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n"); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 | + (sig[1] << 12) | (sig[2] << 4) | + (reg_val & 0xf000000f)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame + * configuration + * @enable: [in] true for single NDP/MBIM; false otherwise + * + * Returns: 0 on success + */ +int ipa2_set_single_ndp_per_mbim(bool enable) +{ + u32 reg_val; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST, + (enable & 0x1) | (reg_val & 0xfffffffe)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix + * for MBIM aggregation. + * @enable: [in] true for enable HW fix; false otherwise + * + * Returns: 0 on success + */ +int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable) +{ + u32 reg_val; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, + (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) | + (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return 0; +} +EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr); + +/** + * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary + * @start: start address of the memory buffer + * @end: end address of the memory buffer + * @boundary: boundary + * + * Return value: + * 1: if the interval [start, end] straddles boundary + * 0: otherwise + */ +int ipa_straddle_boundary(u32 start, u32 end, u32 boundary) +{ + u32 next_start; + u32 prev_end; + + IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary); + + next_start = (start + (boundary - 1)) & ~(boundary - 1); + prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary; + + while (next_start < prev_end) + next_start += boundary; + + if (next_start == prev_end) + return 1; + else + return 0; +} + +/** + * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM + * + * Function is rate limited to avoid flooding kernel log buffer + */ +void ipa2_bam_reg_dump(void) +{ + static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1); + + if (__ratelimit(&_rs)) { + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + pr_err("IPA BAM START\n"); + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) { + sps_get_bam_debug_info(ipa_ctx->bam_handle, 5, + 511950, 0, 0); + sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0, + 0, 0); + } else { + sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, + (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS)) + | + SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))), + 0, 2); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + } +} + +static void ipa_init_mem_partition_v2(void) +{ + IPADBG("Memory partition IPA 2\n"); + IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; + IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START; + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST; + IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE; + IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; + IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), + IPA_MEM_PART(v4_flt_size_ddr)); + + IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST; + IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE; + IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; + IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), + IPA_MEM_PART(v6_flt_size_ddr)); + + IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST; + IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); + + IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX; + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); + + IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI; + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO; + IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI; + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE; + IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; + IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), + IPA_MEM_PART(v4_rt_size_ddr)); + + IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST; + IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); + + IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX; + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); + + IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI; + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO; + IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI; + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE; + IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; + IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), + IPA_MEM_PART(v6_rt_size_ddr)); + + IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST; + IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE; + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST; + IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE; + IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR; + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST; + IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE; + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST; + IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE; + IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); + + IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST; + IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE; + IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); + + IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST; + IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE; + IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), + IPA_MEM_PART(uc_info_size)); + + IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST; + IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST; + IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE; + IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST; + IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE; +} + +static void ipa_init_mem_partition_v2_5(void) +{ + IPADBG("Memory partition IPA 2.5\n"); + IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; + IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST; + IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE; + IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), + IPA_MEM_PART(uc_info_size)); + + IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START; + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST; + IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE; + IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; + IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), + IPA_MEM_PART(v4_flt_size_ddr)); + + IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST; + IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE; + IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; + IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), + IPA_MEM_PART(v6_flt_size_ddr)); + + IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST; + IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); + + IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX; + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); + + IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI; + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO; + IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI; + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE; + IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; + IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), + IPA_MEM_PART(v4_rt_size_ddr)); + + IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST; + IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); + + IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX; + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); + + IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI; + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO; + IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI; + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE; + IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; + IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), + IPA_MEM_PART(v6_rt_size_ddr)); + + IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST; + IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE; + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST; + IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE; + IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR; + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) = + IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST; + IPA_MEM_PART(modem_hdr_proc_ctx_size) = + IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE; + IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_proc_ctx_ofst), + IPA_MEM_PART(modem_hdr_proc_ctx_size)); + + IPA_MEM_PART(apps_hdr_proc_ctx_ofst) = + IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST; + IPA_MEM_PART(apps_hdr_proc_ctx_size) = + IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE; + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) = + IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR; + IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_proc_ctx_ofst), + IPA_MEM_PART(apps_hdr_proc_ctx_size), + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr)); + + IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST; + IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE; + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST; + IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE; + IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); + + IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST; + IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE; + IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); + + IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST; + IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST; + IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE; + IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST; + IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE; +} + +static void ipa_init_mem_partition_v2_6L(void) +{ + IPADBG("Memory partition IPA 2.6Lite\n"); + IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; + IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST; + IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE; + IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), + IPA_MEM_PART(uc_info_size)); + + IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START; + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST; + IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE; + IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; + IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), + IPA_MEM_PART(v4_flt_size_ddr)); + + IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST; + IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE; + IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; + IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), + IPA_MEM_PART(v6_flt_size_ddr)); + + IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST; + IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); + + IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX; + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); + + IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI; + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO; + IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI; + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE; + IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; + IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), + IPA_MEM_PART(v4_rt_size_ddr)); + + IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST; + IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); + + IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX; + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); + + IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI; + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO; + IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI; + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE; + IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; + IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), + IPA_MEM_PART(v6_rt_size_ddr)); + + IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST; + IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE; + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST; + IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE; + IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR; + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + IPA_MEM_PART(modem_comp_decomp_ofst) = + IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST; + IPA_MEM_PART(modem_comp_decomp_size) = + IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE; + IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_comp_decomp_ofst), + IPA_MEM_PART(modem_comp_decomp_size)); + + IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST; + IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE; + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST; + IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE; + IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); + + IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST; + IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE; + IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); + + IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST; + IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST; + IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE; + IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST; + IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE; +} + +/** + * ipa_controller_shared_static_bind() - set the appropriate shared methods for + * for IPA HW version 2.0, 2.5, 2.6 and 2.6L + * + * @ctrl: data structure which holds the function pointers + */ +static void ipa_controller_shared_static_bind(struct ipa_controller *ctrl) +{ + ctrl->ipa_init_rt4 = _ipa_init_rt4_v2; + ctrl->ipa_init_rt6 = _ipa_init_rt6_v2; + ctrl->ipa_init_flt4 = _ipa_init_flt4_v2; + ctrl->ipa_init_flt6 = _ipa_init_flt6_v2; + ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0; + ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0; + ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0; + ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0; + ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0; + ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0; + ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0; + ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0; + ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0; + ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0; + ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS; + ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0; + ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0; + ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0; + ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0; + ctrl->ipa_commit_flt = __ipa_commit_flt_v2; + ctrl->ipa_commit_rt = __ipa_commit_rt_v2; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2; + ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0; + ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0; + ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0; + ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0; + ctrl->clock_scaling_bw_threshold_nominal = + IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS; + ctrl->clock_scaling_bw_threshold_turbo = + IPA_V2_0_BW_THRESHOLD_TURBO_MBPS; +} + +/** + * ipa_ctrl_static_bind() - set the appropriate methods for + * IPA Driver based on the HW version + * + * @ctrl: data structure which holds the function pointers + * @hw_type: the HW type in use + * + * This function can avoid the runtime assignment by using C99 special + * struct initialization - hard decision... time.vs.mem + */ +int ipa_controller_static_bind(struct ipa_controller *ctrl, + enum ipa_hw_type hw_type) +{ + switch (hw_type) { + case (IPA_HW_v1_1): + ipa_init_mem_partition_v2(); + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1; + ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1; + ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1; + ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1; + ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1; + ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1; + ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1; + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1; + ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1; + ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1; + ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1; + ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1; + ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE; + ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE; + ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE; + ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1; + ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1; + ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1; + ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1; + ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1; + ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1; + ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1; + ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1; + ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1; + ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0; + ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL; + break; + case (IPA_HW_v2_0): + ipa_init_mem_partition_v2(); + ipa_controller_shared_static_bind(ctrl); + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0; + ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0; + ctrl->ipa_init_sram = _ipa_init_sram_v2; + ctrl->ipa_init_hdr = _ipa_init_hdr_v2; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2; + ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2; + break; + case (IPA_HW_v2_5): + ipa_init_mem_partition_v2_5(); + ipa_controller_shared_static_bind(ctrl); + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5; + ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5; + ctrl->ipa_init_sram = _ipa_init_sram_v2_5; + ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5; + ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5; + break; + case (IPA_HW_v2_6L): + ipa_init_mem_partition_v2_6L(); + ipa_controller_shared_static_bind(ctrl); + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L; + ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L; + ctrl->ipa_init_sram = _ipa_init_sram_v2_6L; + ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L; + ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L; + break; + default: + return -EPERM; + } + + return 0; +} + +void ipa_skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); +} + +int ipa_id_alloc(void *ptr) +{ + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&ipa_ctx->idr_lock); + id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT); + spin_unlock(&ipa_ctx->idr_lock); + idr_preload_end(); + + return id; +} + +void *ipa_id_find(u32 id) +{ + void *ptr; + + spin_lock(&ipa_ctx->idr_lock); + ptr = idr_find(&ipa_ctx->ipa_idr, id); + spin_unlock(&ipa_ctx->idr_lock); + + return ptr; +} + +void ipa_id_remove(u32 id) +{ + spin_lock(&ipa_ctx->idr_lock); + idr_remove(&ipa_ctx->ipa_idr, id); + spin_unlock(&ipa_ctx->idr_lock); +} + +static void ipa_tag_free_buf(void *user1, int user2) +{ + kfree(user1); +} + +static void ipa_tag_free_skb(void *user1, int user2) +{ + dev_kfree_skb_any((struct sk_buff *)user1); +} + +#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4 + +/* ipa_tag_process() - Initiates a tag process. Incorporates the input + * descriptors + * + * @desc: descriptors with commands for IC + * @desc_size: amount of descriptors in the above variable + * + * Note: The descriptors are copied (if there's room), the client needs to + * free his descriptors afterwards + * + * Return: 0 or negative in case of failure + */ +int ipa_tag_process(struct ipa_desc desc[], + int descs_num, + unsigned long timeout) +{ + struct ipa_sys_context *sys; + struct ipa_desc *tag_desc; + int desc_idx = 0; + struct ipa_ip_packet_init *pkt_init; + struct ipa_register_write *reg_write_nop; + struct ipa_ip_packet_tag_status *status; + int i; + struct sk_buff *dummy_skb; + int res; + struct ipa_tag_completion *comp; + int ep_idx; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + /* Not enough room for the required descriptors for the tag process */ + if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) { + IPAERR("up to %d descriptors are allowed (received %d)\n", + IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS, + descs_num); + return -ENOMEM; + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + sys = ipa_ctx->ep[ep_idx].sys; + + tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag); + if (!tag_desc) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_alloc_desc; + } + + /* IP_PACKET_INIT IC for tag status to be sent to apps */ + pkt_init = kzalloc(sizeof(*pkt_init), flag); + if (!pkt_init) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_alloc_pkt_init; + } + + pkt_init->destination_pipe_index = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + + tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT; + tag_desc[desc_idx].pyld = pkt_init; + tag_desc[desc_idx].len = sizeof(*pkt_init); + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa_tag_free_buf; + tag_desc[desc_idx].user1 = pkt_init; + desc_idx++; + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE; + tag_desc[desc_idx].pyld = reg_write_nop; + tag_desc[desc_idx].len = sizeof(*reg_write_nop); + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa_tag_free_buf; + tag_desc[desc_idx].user1 = reg_write_nop; + desc_idx++; + + /* status IC */ + status = kzalloc(sizeof(*status), flag); + if (!status) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + + status->tag_f_2 = IPA_COOKIE; + + tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS; + tag_desc[desc_idx].pyld = status; + tag_desc[desc_idx].len = sizeof(*status); + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa_tag_free_buf; + tag_desc[desc_idx].user1 = status; + desc_idx++; + + /* Copy the required descriptors from the client now */ + if (desc) { + memcpy(&(tag_desc[desc_idx]), desc, descs_num * + sizeof(struct ipa_desc)); + desc_idx += descs_num; + } + + comp = kzalloc(sizeof(*comp), GFP_KERNEL); + if (!comp) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + init_completion(&comp->comp); + + /* completion needs to be released from both here and rx handler */ + atomic_set(&comp->cnt, 2); + + /* dummy packet to send to IPA. packet payload is a completion object */ + dummy_skb = alloc_skb(sizeof(comp), flag); + if (!dummy_skb) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_free_skb; + } + + memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp)); + + tag_desc[desc_idx].pyld = dummy_skb->data; + tag_desc[desc_idx].len = dummy_skb->len; + tag_desc[desc_idx].type = IPA_DATA_DESC_SKB; + tag_desc[desc_idx].callback = ipa_tag_free_skb; + tag_desc[desc_idx].user1 = dummy_skb; + desc_idx++; + + /* send all descriptors to IPA with single EOT */ + res = ipa_send(sys, desc_idx, tag_desc, true); + if (res) { + IPAERR("failed to send TAG packets %d\n", res); + res = -ENOMEM; + goto fail_send; + } + kfree(tag_desc); + tag_desc = NULL; + + IPADBG("waiting for TAG response\n"); + res = wait_for_completion_timeout(&comp->comp, timeout); + if (res == 0) { + IPAERR("timeout (%lu msec) on waiting for TAG response\n", + timeout); + WARN_ON(1); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + return -ETIME; + } + + IPADBG("TAG response arrived!\n"); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + + /* sleep for short period to ensure IPA wrote all packets to BAM */ + usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC); + + return 0; + +fail_send: + dev_kfree_skb_any(dummy_skb); + desc_idx--; +fail_free_skb: + kfree(comp); +fail_free_desc: + /* + * Free only the first descriptors allocated here. + * [pkt_init, status, nop] + * The user is responsible to free his allocations + * in case of failure. + * The min is required because we may fail during + * of the initial allocations above + */ + for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++) + kfree(tag_desc[i].user1); + +fail_alloc_pkt_init: + kfree(tag_desc); +fail_alloc_desc: + return res; +} + +/** + * ipa_tag_generate_force_close_desc() - generate descriptors for force close + * immediate command + * + * @desc: descriptors for IC + * @desc_size: desc array size + * @start_pipe: first pipe to close aggregation + * @end_pipe: last (non-inclusive) pipe to close aggregation + * + * Return: number of descriptors written or negative in case of failure + */ +static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[], + int desc_size, int start_pipe, int end_pipe) +{ + int i; + u32 aggr_init; + int desc_idx = 0; + int res; + struct ipa_register_write *reg_write_agg_close; + + for (i = start_pipe; i < end_pipe; i++) { + aggr_init = ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i)); + if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >> + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR) + continue; + IPADBG("Force close ep: %d\n", i); + if (desc_idx + 1 > desc_size) { + IPAERR("Internal error - no descriptors\n"); + res = -EFAULT; + goto fail_no_desc; + } + + reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close), + GFP_KERNEL); + if (!reg_write_agg_close) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_alloc_reg_write_agg_close; + } + + reg_write_agg_close->skip_pipeline_clear = 0; + reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i); + reg_write_agg_close->value = + (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + reg_write_agg_close->value_mask = + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + + desc[desc_idx].opcode = IPA_REGISTER_WRITE; + desc[desc_idx].pyld = reg_write_agg_close; + desc[desc_idx].len = sizeof(*reg_write_agg_close); + desc[desc_idx].type = IPA_IMM_CMD_DESC; + desc[desc_idx].callback = ipa_tag_free_buf; + desc[desc_idx].user1 = reg_write_agg_close; + ++desc_idx; + } + + return desc_idx; + +fail_alloc_reg_write_agg_close: + for (i = 0; i < desc_idx; ++i) + kfree(desc[desc_idx].user1); +fail_no_desc: + return res; +} + +/** + * ipa_tag_aggr_force_close() - Force close aggregation + * + * @pipe_num: pipe number or -1 for all pipes + */ +int ipa_tag_aggr_force_close(int pipe_num) +{ + struct ipa_desc *desc; + int res = -1; + int start_pipe; + int end_pipe; + int num_descs; + int num_aggr_descs; + + if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) { + IPAERR("Invalid pipe number %d\n", pipe_num); + return -EINVAL; + } + + if (pipe_num == -1) { + start_pipe = 0; + end_pipe = ipa_ctx->ipa_num_pipes; + } else { + start_pipe = pipe_num; + end_pipe = pipe_num + 1; + } + + num_descs = end_pipe - start_pipe; + + desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL); + if (!desc) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + /* Force close aggregation on all valid pipes with aggregation */ + num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs, + start_pipe, end_pipe); + if (num_aggr_descs < 0) { + IPAERR("ipa_tag_generate_force_close_desc failed %d\n", + num_aggr_descs); + goto fail_free_desc; + } + + res = ipa_tag_process(desc, num_aggr_descs, + IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT); + +fail_free_desc: + kfree(desc); + + return res; +} + +/** + * ipa2_is_ready() - check if IPA module was initialized + * successfully + * + * Return value: true for yes; false for no + */ +bool ipa2_is_ready(void) +{ + return (ipa_ctx != NULL) ? true : false; +} + +/** + * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle + * + * Return value: true for yes; false for no + */ +bool ipa2_is_client_handle_valid(u32 clnt_hdl) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return false; + } + + if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes) + return true; + return false; +} + +/** + * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote + * + * Return value: none + */ +void ipa2_proxy_clk_unvote(void) +{ + if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE"); + ipa_ctx->q6_proxy_clk_vote_valid = false; + } +} + +/** + * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote + * + * Return value: none + */ +void ipa2_proxy_clk_vote(void) +{ + if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE"); + ipa_ctx->q6_proxy_clk_vote_valid = true; + } +} + + +/** + * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes + * + * Return value: u16 - number of IPA smem restricted bytes + */ +u16 ipa2_get_smem_restr_bytes(void) +{ + if (ipa_ctx) + return ipa_ctx->smem_restricted_bytes; + + IPAERR("IPA Driver not initialized\n"); + + return 0; +} + +/** + * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt + * + * Return value: true if modem configures embedded pipe flt, false otherwise + */ +bool ipa2_get_modem_cfg_emb_pipe_flt(void) +{ + if (ipa_ctx) + return ipa_ctx->modem_cfg_emb_pipe_flt; + + IPAERR("IPA driver has not been initialized\n"); + + return false; +} +/** + * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS + * + * Return value: enum ipa_transport_type + */ +static enum ipa_transport_type ipa2_get_transport_type(void) +{ + return IPA_TRANSPORT_TYPE_SPS; +} + +u32 ipa_get_num_pipes(void) +{ + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) + return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST); + else + return IPA_MAX_NUM_PIPES; +} +EXPORT_SYMBOL(ipa_get_num_pipes); + +/** + * ipa2_disable_apps_wan_cons_deaggr()- + * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * + * Return value: 0 or negative in case of failure + */ +static int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, + uint32_t agg_count) +{ + int res = -1; + + /* checking if IPA-HW can support */ + if ((agg_size >> 10) > + IPA_AGGR_BYTE_LIMIT) { + IPAWANERR("IPA-AGG byte limit %d\n", + IPA_AGGR_BYTE_LIMIT); + IPAWANERR("exceed aggr_byte_limit\n"); + return res; + } + if (agg_count > + IPA_AGGR_PKT_LIMIT) { + IPAWANERR("IPA-AGG pkt limit %d\n", + IPA_AGGR_PKT_LIMIT); + IPAWANERR("exceed aggr_pkt_limit\n"); + return res; + } + + if (ipa_ctx) { + ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true; + return 0; + } + return res; +} + +static const struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info + (enum ipa_client_type client) +{ + IPAERR("Not supported for IPA 2.x\n"); + return NULL; +} + +static int ipa2_stop_gsi_channel(u32 clnt_hdl) +{ + IPAERR("Not supported for IPA 2.x\n"); + return -EFAULT; +} + +static void *ipa2_get_ipc_logbuf(void) +{ + if (ipa_ctx) + return ipa_ctx->logbuf; + + return NULL; +} + +static void *ipa2_get_ipc_logbuf_low(void) +{ + if (ipa_ctx) + return ipa_ctx->logbuf_low; + + return NULL; +} + +static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb) +{ + *holb = ipa_ctx->ep[ep_idx].holb; +} + +static int ipa2_generate_tag_process(void) +{ + int res; + + res = ipa_tag_process(NULL, 0, HZ); + if (res) + IPAERR("TAG process failed\n"); + + return res; +} + +static void ipa2_set_tag_process_before_gating(bool val) +{ + ipa_ctx->tag_process_before_gating = val; +} + +static bool ipa2_pm_is_used(void) +{ + return false; +} + +int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl) +{ + if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) { + IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type); + WARN_ON(1); + return -EPERM; + } + + api_ctrl->ipa_connect = ipa2_connect; + api_ctrl->ipa_disconnect = ipa2_disconnect; + api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint; + api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay; + api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint; + api_ctrl->ipa_cfg_ep = ipa2_cfg_ep; + api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat; + api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr; + api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext; + api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode; + api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr; + api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr; + api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route; + api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb; + api_ctrl->ipa_get_holb = ipa2_get_holb; + api_ctrl->ipa_set_tag_process_before_gating = + ipa2_set_tag_process_before_gating; + api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg; + api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask; + api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client; + api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl; + api_ctrl->ipa_add_hdr = ipa2_add_hdr; + api_ctrl->ipa_add_hdr_usr = ipa2_add_hdr_usr; + api_ctrl->ipa_del_hdr = ipa2_del_hdr; + api_ctrl->ipa_commit_hdr = ipa2_commit_hdr; + api_ctrl->ipa_reset_hdr = ipa2_reset_hdr; + api_ctrl->ipa_get_hdr = ipa2_get_hdr; + api_ctrl->ipa_put_hdr = ipa2_put_hdr; + api_ctrl->ipa_copy_hdr = ipa2_copy_hdr; + api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx; + api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx; + api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule; + api_ctrl->ipa_add_rt_rule_usr = ipa2_add_rt_rule_usr; + api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule; + api_ctrl->ipa_commit_rt = ipa2_commit_rt; + api_ctrl->ipa_reset_rt = ipa2_reset_rt; + api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl; + api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl; + api_ctrl->ipa_query_rt_index = ipa2_query_rt_index; + api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule; + api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule; + api_ctrl->ipa_add_flt_rule_usr = ipa2_add_flt_rule_usr; + api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule; + api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule; + api_ctrl->ipa_commit_flt = ipa2_commit_flt; + api_ctrl->ipa_reset_flt = ipa2_reset_flt; + api_ctrl->ipa_allocate_nat_device = ipa2_allocate_nat_device; + api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd; + api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd; + api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd; + api_ctrl->ipa_send_msg = ipa2_send_msg; + api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg; + api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg; + api_ctrl->ipa_register_intf = ipa2_register_intf; + api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext; + api_ctrl->ipa_deregister_intf = ipa2_deregister_intf; + api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode; + api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig; + api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim; + api_ctrl->ipa_tx_dp = ipa2_tx_dp; + api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul; + api_ctrl->ipa_free_skb = ipa2_free_skb; + api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe; + api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe; + api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls; + api_ctrl->ipa_sys_setup = ipa2_sys_setup; + api_ctrl->ipa_sys_teardown = ipa2_sys_teardown; + api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe; + api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe; + api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe; + api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe; + api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe; + api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe; + api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats; + api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes; + api_ctrl->ipa_broadcast_wdi_quota_reach_ind = + ipa2_broadcast_wdi_quota_reach_ind; + api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa; + api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB; + api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB; + api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping; + api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping; + api_ctrl->teth_bridge_init = ipa2_teth_bridge_init; + api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect; + api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect; + api_ctrl->ipa_set_client = ipa2_set_client; + api_ctrl->ipa_get_client = ipa2_get_client; + api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink; + api_ctrl->ipa_dma_init = ipa2_dma_init; + api_ctrl->ipa_dma_enable = ipa2_dma_enable; + api_ctrl->ipa_dma_disable = ipa2_dma_disable; + api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy; + api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy; + api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy; + api_ctrl->ipa_dma_destroy = ipa2_dma_destroy; + api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine; + api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe; + api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe; + api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel; + api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty; + api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process; + api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe; + api_ctrl->ipa_qmi_enable_force_clear_datapath_send = + qmi_enable_force_clear_datapath_send; + api_ctrl->ipa_qmi_disable_force_clear_datapath_send = + qmi_disable_force_clear_datapath_send; + api_ctrl->ipa_mhi_reset_channel_internal = + ipa2_mhi_reset_channel_internal; + api_ctrl->ipa_mhi_start_channel_internal = + ipa2_mhi_start_channel_internal; + api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info = + ipa2_uc_mhi_send_dl_ul_sync_info; + api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init; + api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel; + api_ctrl->ipa_uc_mhi_stop_event_update_channel = + ipa2_uc_mhi_stop_event_update_channel; + api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup; + api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats; + api_ctrl->ipa_uc_state_check = ipa2_uc_state_check; + api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id; + api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler; + api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler; + api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler; + api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump; + api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping; + api_ctrl->ipa_is_ready = ipa2_is_ready; + api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote; + api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote; + api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid; + api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping; + api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep; + api_ctrl->ipa_get_modem_cfg_emb_pipe_flt = + ipa2_get_modem_cfg_emb_pipe_flt; + api_ctrl->ipa_get_transport_type = ipa2_get_transport_type; + api_ctrl->ipa_ap_suspend = ipa2_ap_suspend; + api_ctrl->ipa_ap_resume = ipa2_ap_resume; + api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain; + api_ctrl->ipa_disable_apps_wan_cons_deaggr = + ipa2_disable_apps_wan_cons_deaggr; + api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev; + api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info; + api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel; + api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb; + api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks; + api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks; + api_ctrl->ipa_inc_client_enable_clks_no_block = + ipa2_inc_client_enable_clks_no_block; + api_ctrl->ipa_suspend_resource_no_block = + ipa2_suspend_resource_no_block; + api_ctrl->ipa_resume_resource = ipa2_resume_resource; + api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync; + api_ctrl->ipa_set_required_perf_profile = + ipa2_set_required_perf_profile; + api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf; + api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low; + api_ctrl->ipa_rx_poll = ipa2_rx_poll; + api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb; + api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes; + api_ctrl->ipa_tear_down_uc_offload_pipes = + ipa2_tear_down_uc_offload_pipes; + api_ctrl->ipa_get_pdev = ipa2_get_pdev; + api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB; + api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB; + api_ctrl->ipa_conn_wdi_pipes = ipa2_conn_wdi3_pipes; + api_ctrl->ipa_disconn_wdi_pipes = ipa2_disconn_wdi3_pipes; + api_ctrl->ipa_enable_wdi_pipes = ipa2_enable_wdi3_pipes; + api_ctrl->ipa_disable_wdi_pipes = ipa2_disable_wdi3_pipes; + api_ctrl->ipa_pm_is_used = ipa2_pm_is_used; + + return 0; +} + +/** + * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes. + * + * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L, + * IPA_DEFAULT_SYS_YELLOW_WM otherwise. + */ +u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys) +{ + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L && + ipa_ctx->ipa_uc_monitor_holb) { + return ipa_read_reg(ipa_ctx->mmio, + IPA_YELLOW_MARKER_SYS_CFG_OFST); + } else { + if (!sys) + return 0; + + return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz; + } +} +EXPORT_SYMBOL(ipa_get_sys_yellow_wm); + +void ipa_suspend_apps_pipes(bool suspend) +{ + struct ipa_ep_cfg_ctrl cfg; + int ipa_ep_idx; + u32 lan_empty = 0, wan_empty = 0; + int ret; + struct sps_event_notify notify; + struct ipa_ep_context *ep; + + memset(&cfg, 0, sizeof(cfg)); + cfg.ipa_ep_suspend = suspend; + + ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg); + /* Check if the pipes are empty. */ + ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty); + if (ret) { + IPAERR("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + } + if (!lan_empty) { + IPADBG("LAN Cons is not-empty. Enter poll mode.\n"); + notify.user = ep->sys; + notify.event_id = SPS_EVENT_EOT; + if (ep->sys->sps_callback) + ep->sys->sps_callback(¬ify); + } + } + + ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + /* Considering the case for SSR. */ + if (ipa_ep_idx == -1) { + IPADBG("Invalid client.\n"); + return; + } + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg); + /* Check if the pipes are empty. */ + ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty); + if (ret) { + IPAERR("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + } + if (!wan_empty) { + IPADBG("WAN Cons is not-empty. Enter poll mode.\n"); + notify.user = ep->sys; + notify.event_id = SPS_EVENT_EOT; + if (ep->sys->sps_callback) + ep->sys->sps_callback(¬ify); + } + } +} + +/** + * ipa2_get_pdev() - return a pointer to IPA dev struct + * + * Return value: a pointer to IPA dev struct + * + */ +struct device *ipa2_get_pdev(void) +{ + if (!ipa_ctx) + return NULL; + + return ipa_ctx->pdev; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c new file mode 100644 index 0000000000000000000000000000000000000000..3b670c92612c5272735e6eac2f695d9e72a016a6 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" +#include + +#define IPA_HW_WDI3_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI3_TX_MBOX_START_INDEX 50 + +static int ipa_send_wdi3_setup_pipe_cmd( + u8 is_smmu_enabled, struct ipa_wdi_pipe_setup_info *info, + struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir) +{ + int ipa_ep_idx; + int result = 0, len; + unsigned long va; + struct ipa_mem_buffer cmd; + struct IpaHwWdi3SetUpCmdData_t *wdi3_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + + if (info == NULL || info_smmu == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_WDI3; + + if (!is_smmu_enabled) { + ipa_ep_idx = ipa_get_ep_mapping(info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", info->client, ipa_ep_idx); + IPADBG("ring_base_pa = 0x%pad\n", &info->transfer_ring_base_pa); + IPADBG("ring_size = %hu\n", info->transfer_ring_size); + IPADBG("ring_db_pa = 0x%pad\n", + &info->transfer_ring_doorbell_pa); + IPADBG("evt_ring_base_pa = 0x%pad\n", + &info->event_ring_base_pa); + IPADBG("evt_ring_size = %hu\n", info->event_ring_size); + IPADBG("evt_ring_db_pa = 0x%pad\n", + &info->event_ring_doorbell_pa); + IPADBG("num_pkt_buffers = %hu\n", info->num_pkt_buffers); + IPADBG("pkt_offset = %d\n", info->pkt_offset); + + wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params; + wdi3_params->transfer_ring_base_pa = + (u32)info->transfer_ring_base_pa; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)info->transfer_ring_base_pa >> 32); + wdi3_params->transfer_ring_size = info->transfer_ring_size; + wdi3_params->transfer_ring_doorbell_pa = + (u32)info->transfer_ring_doorbell_pa; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)info->transfer_ring_doorbell_pa >> 32); + wdi3_params->event_ring_base_pa = (u32)info->event_ring_base_pa; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)info->event_ring_base_pa >> 32); + wdi3_params->event_ring_size = info->event_ring_size; + wdi3_params->event_ring_doorbell_pa = + (u32)info->event_ring_doorbell_pa; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)info->event_ring_doorbell_pa >> 32); + wdi3_params->num_pkt_buffers = info->num_pkt_buffers; + wdi3_params->ipa_pipe_number = ipa_ep_idx; + wdi3_params->dir = dir; + wdi3_params->pkt_offset = info->pkt_offset; + memcpy(wdi3_params->desc_format_template, + info->desc_format_template, + sizeof(wdi3_params->desc_format_template)); + } else { + ipa_ep_idx = ipa_get_ep_mapping(info_smmu->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", info_smmu->client, ipa_ep_idx); + IPADBG("ring_size = %hu\n", info_smmu->transfer_ring_size); + IPADBG("ring_db_pa = 0x%pad\n", + &info_smmu->transfer_ring_doorbell_pa); + IPADBG("evt_ring_size = %hu\n", info_smmu->event_ring_size); + IPADBG("evt_ring_db_pa = 0x%pad\n", + &info_smmu->event_ring_doorbell_pa); + IPADBG("num_pkt_buffers = %hu\n", info_smmu->num_pkt_buffers); + IPADBG("pkt_offset = %d\n", info_smmu->pkt_offset); + + wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params; + + if (dir == IPA_WDI3_TX_DIR) { + len = info_smmu->transfer_ring_size; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + true, info->transfer_ring_base_pa, + &info_smmu->transfer_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_base_pa = (u32)va; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->transfer_ring_size = len; + + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_DB_RES, + true, info_smmu->transfer_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_doorbell_pa = + (u32)va; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + + len = info_smmu->event_ring_size; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + true, info->event_ring_base_pa, + &info_smmu->event_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_base_pa = (u32)va; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->event_ring_size = len; + + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + true, info_smmu->event_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_doorbell_pa = + (u32)va; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + } else { + len = info_smmu->transfer_ring_size; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + true, info->transfer_ring_base_pa, + &info_smmu->transfer_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_base_pa = (u32)va; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->transfer_ring_size = len; + + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + true, info_smmu->transfer_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_doorbell_pa = + (u32)va; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + + len = info_smmu->event_ring_size; + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_RES, true, + info->event_ring_base_pa, + &info_smmu->event_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_base_pa = (u32)va; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->event_ring_size = len; + + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, true, + info_smmu->event_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_doorbell_pa = + (u32)va; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + } + wdi3_params->num_pkt_buffers = info_smmu->num_pkt_buffers; + wdi3_params->ipa_pipe_number = ipa_ep_idx; + wdi3_params->dir = dir; + wdi3_params->pkt_offset = info_smmu->pkt_offset; + memcpy(wdi3_params->desc_format_template, + info_smmu->desc_format_template, + sizeof(wdi3_params->desc_format_template)); + } + + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("uc setup channel cmd failed: %d\n", result); + result = -EFAULT; + } + + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +int ipa2_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify) +{ + enum ipa_client_type rx_client; + enum ipa_client_type tx_client; + struct ipa_ep_context *ep_rx; + struct ipa_ep_context *ep_tx; + int ipa_ep_idx_rx; + int ipa_ep_idx_tx; + int result = 0; + + if (in == NULL || out == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + if (in->is_smmu_enabled == false) { + rx_client = in->u_rx.rx.client; + tx_client = in->u_tx.tx.client; + } else { + rx_client = in->u_rx.rx_smmu.client; + tx_client = in->u_tx.tx_smmu.client; + } + + ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client); + ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client); + + if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + if (ipa_ep_idx_rx >= IPA_MAX_NUM_PIPES || + ipa_ep_idx_tx >= IPA_MAX_NUM_PIPES) { + IPAERR("ep out of range.\n"); + return -EFAULT; + } + + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + + if (ep_rx->valid || ep_tx->valid) { + IPAERR("EP already allocated.\n"); + return -EFAULT; + } + + memset(ep_rx, 0, offsetof(struct ipa_ep_context, sys)); + memset(ep_tx, 0, offsetof(struct ipa_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (wdi_notify) + ipa_ctx->uc_wdi_ctx.stats_notify = wdi_notify; + else + IPADBG("wdi_notify is null\n"); + + /* setup rx ep cfg */ + ep_rx->valid = 1; + ep_rx->client = rx_client; + result = ipa_disable_data_path(ipa_ep_idx_rx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_rx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + ep_rx->client_notify = in->notify; + ep_rx->priv = in->priv; + + if (in->is_smmu_enabled == false) + memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg, + sizeof(ep_rx->cfg)); + else + memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg, + sizeof(ep_rx->cfg)); + + if (ipa_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) { + IPAERR("fail to setup rx pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled, + &in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR)) { + IPAERR("fail to send cmd to uc for rx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_install_dflt_flt_rules(ipa_ep_idx_rx); + out->rx_uc_db_pa = ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI3_RX_MBOX_START_INDEX/32, + IPA_HW_WDI3_RX_MBOX_START_INDEX % 32); + + IPADBG("client %d (ep: %d) connected\n", rx_client, + ipa_ep_idx_rx); + + /* setup tx ep cfg */ + ep_tx->valid = 1; + ep_tx->client = tx_client; + result = ipa_disable_data_path(ipa_ep_idx_tx); + if (result) { + IPAERR("disable data path failed res=%d ep=%d.\n", result, + ipa_ep_idx_tx); + result = -EFAULT; + goto fail; + } + + if (in->is_smmu_enabled == false) + memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg, + sizeof(ep_tx->cfg)); + else + memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg, + sizeof(ep_tx->cfg)); + + if (ipa_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) { + IPAERR("fail to setup tx pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled, + &in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR)) { + IPAERR("fail to send cmd to uc for tx pipe\n"); + result = -EFAULT; + goto fail; + } + out->tx_uc_db_pa = ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI3_TX_MBOX_START_INDEX/32, + IPA_HW_WDI3_TX_MBOX_START_INDEX % 32); + IPADBG("client %d (ep: %d) connected\n", tx_client, + ipa_ep_idx_tx); + +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +static int ipa_send_wdi3_common_ch_cmd(int ipa_ep_idx, int command) +{ + struct ipa_mem_buffer cmd; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + union IpaHwWdi3CommonChCmdData_t *wdi3; + int result = 0; + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + /* enable the TX pipe */ + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_WDI3; + + wdi3 = &cmd_data->CommonCh_params.Wdi3CommonCh_params; + wdi3->params.ipa_pipe_number = ipa_ep_idx; + result = ipa_uc_send_cmd((u32)(cmd.phys_base), command, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + result = -EFAULT; + goto fail; + } + +fail: + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +int ipa2_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA_MAX_NUM_PIPES || + ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA_MAX_NUM_PIPES) { + IPAERR("invalid ipa ep index\n"); + return -EINVAL; + } + + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + + /* tear down tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) { + IPAERR("fail to tear down tx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_disable_data_path(ipa_ep_idx_tx); + memset(ep_tx, 0, sizeof(struct ipa_ep_context)); + IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx); + + /* tear down rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) { + IPAERR("fail to tear down rx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_disable_data_path(ipa_ep_idx_rx); + ipa_delete_dflt_flt_rules(ipa_ep_idx_rx); + memset(ep_rx, 0, sizeof(struct ipa_ep_context)); + IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx); + +fail: + return result; +} + +int ipa2_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + + /* enable tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) { + IPAERR("fail to enable tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* resume tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) { + IPAERR("fail to resume tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* enable rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) { + IPAERR("fail to enable rx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* resume rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) { + IPAERR("fail to resume rx pipe\n"); + result = -EFAULT; + goto fail; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* enable data path */ + result = ipa_enable_data_path(ipa_ep_idx_rx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_rx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + + result = ipa_enable_data_path(ipa_ep_idx_tx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_tx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +fail: + return result; +} + +int ipa2_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + + /* suspend tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) { + IPAERR("fail to suspend tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* disable tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) { + IPAERR("fail to disable tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* suspend rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) { + IPAERR("fail to suspend rx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* disable rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) { + IPAERR("fail to disable rx pipe\n"); + result = -EFAULT; + goto fail; + } + +fail: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c new file mode 100644 index 0000000000000000000000000000000000000000..418a142a29113b79de8ed273e7ed2b0d9ee64b53 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -0,0 +1,3262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + */ + +/* + * WWAN Transport Network Driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include +#include +#include +#include + +#include "ipa_trace.h" + +#define WWAN_METADATA_SHFT 24 +#define WWAN_METADATA_MASK 0xFF000000 +#define WWAN_DATA_LEN 2000 +#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */ +#define HEADROOM_FOR_QMAP 8 /* for mux header */ +#define TAILROOM 0 /* for padding by mux layer */ +#define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */ +#define UL_FILTER_RULE_HANDLE_START 69 +#define DEFAULT_OUTSTANDING_HIGH_CTL 96 +#define DEFAULT_OUTSTANDING_HIGH 64 +#define DEFAULT_OUTSTANDING_LOW 32 + +#define IPA_WWAN_DEV_NAME "rmnet_ipa%d" +#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0" +#define IPA_UPSTEAM_WLAN1_IFACE_NAME "wlan1" + +#define IPA_WWAN_DEVICE_COUNT (1) + +#define IPA_WWAN_RX_SOFTIRQ_THRESH 16 + +#define INVALID_MUX_ID 0xFF +#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64 +#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64 +#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */ + +#define NAPI_WEIGHT 60 +#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024 + +static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT]; +static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg; +static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl; +static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL]; +static int num_q6_rule, old_num_q6_rule; +static int rmnet_index; +static bool egress_set, a7_ul_flt_set; +static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/ +static atomic_t is_initialized; +static atomic_t is_ssr; +static void *subsys_notify_handle; + +static u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */ +static struct mutex ipa_to_apps_pipe_handle_guard; +static struct mutex add_mux_channel_lock; +static int wwan_add_ul_flt_rule_to_ipa(void); +static int wwan_del_ul_flt_rule_to_ipa(void); +static void ipa_wwan_msg_free_cb(void*, u32, u32); +static void ipa_rmnet_rx_cb(void *priv); +static int ipa_rmnet_poll(struct napi_struct *napi, int budget); + +static void wake_tx_queue(struct work_struct *work); +static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue); + +static void tethering_stats_poll_queue(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work, + tethering_stats_poll_queue); + +enum wwan_device_status { + WWAN_DEVICE_INACTIVE = 0, + WWAN_DEVICE_ACTIVE = 1 +}; + +struct ipa_rmnet_plat_drv_res { + bool ipa_rmnet_ssr; + bool ipa_loaduC; + bool ipa_advertise_sg_support; + bool ipa_napi_enable; + u32 wan_rx_desc_size; +}; + +static struct ipa_rmnet_plat_drv_res ipa_rmnet_res; +/** + * struct wwan_private - WWAN private data + * @net: network interface struct implemented by this driver + * @stats: iface statistics + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * @ch_id: channel id + * @lock: spinlock for mutual exclusion + * @device_status: holds device status + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct wwan_private { + struct net_device *net; + struct net_device_stats stats; + atomic_t outstanding_pkts; + int outstanding_high_ctl; + int outstanding_high; + int outstanding_low; + uint32_t ch_id; + spinlock_t lock; + struct completion resource_granted_completion; + enum wwan_device_status device_status; + struct napi_struct napi; +}; + +/** + * ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa_setup_a7_qmap_hdr(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + u32 pyld_sz; + int ret; + + /* install the basic exception header */ + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) { + IPAWANERR("fail to alloc exception hdr\n"); + return -ENOMEM; + } + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + + if (ipa2_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + qmap_hdr_hdl = hdr_entry->hdr_hdl; + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static void ipa_del_a7_qmap_hdr(void) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = qmap_hdr_hdl; + + ret = ipa2_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa2_del_hdr failed\n"); + else + IPAWANDBG("hdrs deletion done\n"); + + qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa_del_qmap_hdr(uint32_t hdr_hdl) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + if (hdr_hdl == 0) { + IPAWANERR("Invalid hdr_hdl provided\n"); + return; + } + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = hdr_hdl; + + ret = ipa2_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa2_del_hdr failed\n"); + else + IPAWANDBG("header deletion done\n"); + + qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa_del_mux_qmap_hdrs(void) +{ + int index; + + for (index = 0; index < rmnet_index; index++) { + ipa_del_qmap_hdr(mux_channel[index].hdr_hdl); + mux_channel[index].hdr_hdl = 0; + } +} + +static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + char hdr_name[IPA_RESOURCE_NAME_MAX]; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) { + IPAWANERR("fail to alloc exception hdr\n"); + return -ENOMEM; + } + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_id); + strlcpy(hdr_entry->name, hdr_name, + IPA_RESOURCE_NAME_MAX); + + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + hdr_entry->hdr[1] = (uint8_t) mux_id; + IPAWANDBG("header (%s) with mux-id: (%d)\n", + hdr_name, + hdr_entry->hdr[1]); + if (ipa2_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; + *hdr_hdl = hdr_entry->hdr_hdl; +bail: + kfree(hdr); + return ret; +} + +/** + * ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa_setup_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) { + IPAWANERR("fail to alloc mem\n"); + return -ENOMEM; + } + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS; + rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl; + + if (ipa2_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + + IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa2_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + kfree(rt_rule); + return 0; +} + +static void ipa_del_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_del_rt_rule *rt_rule; + struct ipa_rt_rule_del *rt_rule_entry; + int len; + + len = sizeof(struct ipa_ioc_del_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_del); + rt_rule = kzalloc(len, GFP_KERNEL); + if (!rt_rule) { + IPAWANERR("unable to allocate memory for del route rule\n"); + return; + } + + memset(rt_rule, 0, len); + rt_rule->commit = 1; + rt_rule->num_hdls = 1; + rt_rule->ip = IPA_IP_v4; + + rt_rule_entry = &rt_rule->hdl[0]; + rt_rule_entry->status = -1; + rt_rule_entry->hdl = dflt_v4_wan_rt_hdl; + + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v4); + if (ipa2_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed!\n"); + } + + rt_rule->ip = IPA_IP_v6; + rt_rule_entry->hdl = dflt_v6_wan_rt_hdl; + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v6); + if (ipa2_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed!\n"); + } + + kfree(rt_rule); +} + +int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req, uint32_t *rule_hdl) +{ + int i, j; + + /* prevent multi-threads accessing num_q6_rule */ + mutex_lock(&add_mux_channel_lock); + if (rule_req->filter_spec_list_valid == true) { + num_q6_rule = rule_req->filter_spec_list_len; + IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule); + } else { + num_q6_rule = 0; + IPAWANERR("got no UL rules from modem\n"); + mutex_unlock(&add_mux_channel_lock); + return -EINVAL; + } + + /* copy UL filter rules from Modem*/ + for (i = 0; i < num_q6_rule; i++) { + /* check if rules overside the cache*/ + if (i == MAX_NUM_Q6_RULE) { + IPAWANERR("Reaching (%d) max cache ", + MAX_NUM_Q6_RULE); + IPAWANERR(" however total (%d)\n", + num_q6_rule); + goto failure; + } + /* construct UL_filter_rule handler QMI use-cas */ + ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl = + UL_FILTER_RULE_HANDLE_START + i; + rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl; + switch (rule_req->filter_spec_list[i].ip_type) { + case QMI_IPA_IP_TYPE_V4_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = IPA_IP_v4; + break; + + case QMI_IPA_IP_TYPE_V6_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = IPA_IP_v6; + break; + + case QMI_IPA_IP_TYPE_V4V6_V01: + /* Fall through */ + default: + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = IPA_IP_MAX; + break; + } + + switch (rule_req->filter_spec_list[i].filter_action) { + case QMI_IPA_FILTER_ACTION_SRC_NAT_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_SRC_NAT; + break; + case QMI_IPA_FILTER_ACTION_DST_NAT_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_DST_NAT; + break; + + case QMI_IPA_FILTER_ACTION_ROUTING_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_ROUTING; + break; + + case QMI_IPA_FILTER_ACTION_EXCEPTION_V01: + /* Fall through */ + default: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_EXCEPTION; + break; + } + if (rule_req->filter_spec_list[i].is_routing_table_index_valid + == true) + ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx = + rule_req->filter_spec_list[i].route_table_index; + if (rule_req->filter_spec_list[i].is_mux_id_valid == true) + ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id = + rule_req->filter_spec_list[i].mux_id; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap = + rule_req->filter_spec_list[i].filter_rule.rule_eq_bitmap; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present = + rule_req->filter_spec_list[i].filter_rule.tos_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq = + rule_req->filter_spec_list[i].filter_rule.tos_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq_present = + rule_req->filter_spec_list[i].filter_rule.protocol_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq = + rule_req->filter_spec_list[i].filter_rule.protocol_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_range_16 = + rule_req->filter_spec_list[i].filter_rule.num_ihl_offset_range_16; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_range_16; j++) { + IPAWANDBG("copy_ul_filter_rule_to_ipa"); +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_range_16[j].offset += rule_req->filter_spec_list[i].filter_rule.ihl_offset_range_16[j].offset; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_range_16[j].range_low += rule_req->filter_spec_list[i].filter_rule.ihl_offset_range_16[j].range_low; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_range_16[j].range_high += rule_req->filter_spec_list[i].filter_rule.ihl_offset_range_16[j].range_high; +} + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 = + rule_req->filter_spec_list[i].filter_rule.num_offset_meq_32; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32; j++) { + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_32[j].offset += rule_req->filter_spec_list[i].filter_rule.offset_meq_32[j].offset; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_32[j].mask = +rule_req->filter_spec_list[i].filter_rule.offset_meq_32[j].mask; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_32[j].value = +rule_req->filter_spec_list[i].filter_rule.offset_meq_32[j].value; +} + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present = + rule_req->filter_spec_list[i].filter_rule.tc_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq = + rule_req->filter_spec_list[i].filter_rule.tc_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present = + rule_req->filter_spec_list[i].filter_rule.flow_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq = + rule_req->filter_spec_list[i].filter_rule.flow_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_16_present + = rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_16_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_16.offset = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_16.offset; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_16.value = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_16.value; + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_32_present = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_32_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_32.offset = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_32.offset; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_32.value = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_32.value; + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_meq_32 = + rule_req->filter_spec_list[i].filter_rule.num_ihl_offset_meq_32; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_meq_32; j++) { + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_meq_32[j].offset += rule_req->filter_spec_list[i].filter_rule.ihl_offset_meq_32[j].offset; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_meq_32[j].mask = +rule_req->filter_spec_list[i].filter_rule.ihl_offset_meq_32[j].mask; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_meq_32[j].value = +rule_req->filter_spec_list[i].filter_rule.ihl_offset_meq_32[j].value; +} + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 = + rule_req->filter_spec_list[i].filter_rule.num_offset_meq_128; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128; j++) { + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_128[j].offset = +rule_req->filter_spec_list[i].filter_rule.offset_meq_128[j].offset; +memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_128[j].mask, +rule_req->filter_spec_list[i].filter_rule.offset_meq_128[j].mask, 16); +memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_128[j].value, +rule_req->filter_spec_list[i].filter_rule.offset_meq_128[j].value, 16); +} + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32_present = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.offset = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32.offset; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.mask = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32.mask; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.value = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32.value; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ipv4_frag_eq_present = + rule_req->filter_spec_list[i].filter_rule.ipv4_frag_eq_present; + } + + if (rule_req->xlat_filter_indices_list_valid) { + if (rule_req->xlat_filter_indices_list_len > num_q6_rule) { + IPAWANERR("Number of xlat indices is not valid: %d\n", + rule_req->xlat_filter_indices_list_len); + goto failure; + } + IPAWANDBG("Receive %d XLAT indices: ", + rule_req->xlat_filter_indices_list_len); + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) + IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]); + IPAWANDBG("\n"); + + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) { + if (rule_req->xlat_filter_indices_list[i] + >= num_q6_rule) { + IPAWANERR("Xlat rule idx is wrong: %d\n", + rule_req->xlat_filter_indices_list[i]); + goto failure; + } else { + ipa_qmi_ctx->q6_ul_filter_rule + [rule_req->xlat_filter_indices_list[i]] + .is_xlat_rule = 1; + IPAWANDBG("Rule %d is xlat rule\n", + rule_req->xlat_filter_indices_list[i]); + } + } + } + goto success; + +failure: + num_q6_rule = 0; + memset(ipa_qmi_ctx->q6_ul_filter_rule, 0, + sizeof(ipa_qmi_ctx->q6_ul_filter_rule)); + mutex_unlock(&add_mux_channel_lock); + return -EINVAL; + +success: + mutex_unlock(&add_mux_channel_lock); + return 0; +} + +static int wwan_add_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + int num_v4_rule = 0, num_v6_rule = 0; + struct ipa_ioc_add_flt_rule *param; + struct ipa_flt_rule_add flt_rule_entry; + struct ipa_fltr_installed_notif_req_msg_v01 *req; + + if (ipa_qmi_ctx == NULL) { + IPAWANERR("ipa_qmi_ctx is NULL!\n"); + return -EFAULT; + } + + pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) + + sizeof(struct ipa_flt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) + return -ENOMEM; + + req = kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01), + GFP_KERNEL); + if (!req) { + kfree(param); + return -ENOMEM; + } + + memset(req, 0, sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + + param->commit = 1; + param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD; + param->global = false; + param->num_rules = (uint8_t)1; + + mutex_lock(&ipa_qmi_lock); + for (i = 0; i < num_q6_rule; i++) { + param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add)); + flt_rule_entry.at_rear = true; + flt_rule_entry.rule.action = + ipa_qmi_ctx->q6_ul_filter_rule[i].action; + flt_rule_entry.rule.rt_tbl_idx + = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx; + flt_rule_entry.rule.retain_hdr = true; + + /* debug rt-hdl*/ + IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n", + i, flt_rule_entry.rule.rt_tbl_idx); + flt_rule_entry.rule.eq_attrib_type = true; + memcpy(&(flt_rule_entry.rule.eq_attrib), + &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib, + sizeof(struct ipa_ipfltri_rule_eq)); + memcpy(&(param->rules[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_add)); + if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + retval = -EFAULT; + IPAWANERR("add A7 UL filter rule(%d) failed\n", i); + } else { + /* store the rule handler */ + ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] = + param->rules[0].flt_rule_hdl; + } + } + mutex_unlock(&ipa_qmi_lock); + + /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/ + req->source_pipe_index = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); + req->install_status = QMI_RESULT_SUCCESS_V01; + req->filter_index_list_len = num_q6_rule; + mutex_lock(&ipa_qmi_lock); + for (i = 0; i < num_q6_rule; i++) { + if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) { + req->filter_index_list[i].filter_index = num_v4_rule; + num_v4_rule++; + } else { + req->filter_index_list[i].filter_index = num_v6_rule; + num_v6_rule++; + } + req->filter_index_list[i].filter_handle = + ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl; + } + mutex_unlock(&ipa_qmi_lock); + if (qmi_filter_notify_send(req)) { + IPAWANDBG("add filter rule index on A7-RX failed\n"); + retval = -EFAULT; + } + old_num_q6_rule = num_q6_rule; + IPAWANDBG("add (%d) filter rule index on A7-RX\n", + old_num_q6_rule); + kfree(param); + kfree(req); + return retval; +} + +static int wwan_del_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + struct ipa_ioc_del_flt_rule *param; + struct ipa_flt_rule_del flt_rule_entry; + + pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) + + sizeof(struct ipa_flt_rule_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + IPAWANERR("kzalloc failed\n"); + return -ENOMEM; + } + + param->commit = 1; + param->num_hdls = (uint8_t) 1; + + for (i = 0; i < old_num_q6_rule; i++) { + param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del)); + flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i]; + /* debug rt-hdl*/ + IPAWANDBG("delete-IPA rule index(%d)\n", i); + memcpy(&(param->hdl[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_del)); + if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + IPAWANERR("del A7 UL filter rule(%d) failed\n", i); + kfree(param); + return -EFAULT; + } + } + + /* set UL filter-rule add-indication */ + a7_ul_flt_set = false; + old_num_q6_rule = 0; + + kfree(param); + return retval; +} + +static int find_mux_channel_index(uint32_t mux_id) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (mux_id == mux_channel[i].mux_id) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static int find_vchannel_name_index(const char *vchannel_name) +{ + int i; + + for (i = 0; i < rmnet_index; i++) { + if (strcmp(mux_channel[i].vchannel_name, vchannel_name) == 0) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static enum ipa_upstream_type find_upstream_type(const char *upstreamIface) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (strcmp(mux_channel[i].vchannel_name, + upstreamIface) == 0) + return IPA_UPSTEAM_MODEM; + } + + if ((strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0) || + (strcmp(IPA_UPSTEAM_WLAN1_IFACE_NAME, upstreamIface) == 0)) + return IPA_UPSTEAM_WLAN; + else + return IPA_UPSTEAM_MAX; +} + +static int wwan_register_to_ipa(int index) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *tx_ipv4_property; + struct ipa_ioc_tx_intf_prop *tx_ipv6_property; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + struct ipa_ext_intf ext_properties = {0}; + struct ipa_ioc_ext_intf_prop *ext_ioc_properties; + u32 pyld_sz; + int ret = 0, i; + + IPAWANDBG("index(%d) device[%s]:\n", index, + mux_channel[index].vchannel_name); + if (!mux_channel[index].mux_hdr_set) { + ret = ipa_add_qmap_hdr(mux_channel[index].mux_id, + &mux_channel[index].hdr_hdl); + if (ret) { + IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index); + return ret; + } + mux_channel[index].mux_hdr_set = true; + } + tx_properties.prop = tx_ioc_properties; + tx_ipv4_property = &tx_properties.prop[0]; + tx_ipv4_property->ip = IPA_IP_v4; + tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_channel[index].mux_id); + tx_ipv6_property = &tx_properties.prop[1]; + tx_ipv6_property->ip = IPA_IP_v6; + tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */ + snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_channel[index].mux_id); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv4_property->attrib.meta_data = + mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv6_property->attrib.meta_data = + mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD; + rx_properties.num_props = 2; + + pyld_sz = num_q6_rule * + sizeof(struct ipa_ioc_ext_intf_prop); + ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL); + if (!ext_ioc_properties) { + IPAWANERR("Error allocate memory\n"); + return -ENOMEM; + } + + ext_properties.prop = ext_ioc_properties; + ext_properties.excp_pipe_valid = true; + ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS; + ext_properties.num_props = num_q6_rule; + for (i = 0; i < num_q6_rule; i++) { + memcpy(&(ext_properties.prop[i]), + &(ipa_qmi_ctx->q6_ul_filter_rule[i]), + sizeof(struct ipa_ioc_ext_intf_prop)); + ext_properties.prop[i].mux_id = mux_channel[index].mux_id; + IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i, + ext_properties.prop[i].ip, + ext_properties.prop[i].rt_tbl_idx); + IPAWANDBG("action: %d mux:%d\n", + ext_properties.prop[i].action, + ext_properties.prop[i].mux_id); + } + ret = ipa2_register_intf_ext(mux_channel[index].vchannel_name, + &tx_properties, &rx_properties, &ext_properties); + if (ret) { + IPAWANERR("[%s]:ipa2_register_intf failed %d\n", + mux_channel[index].vchannel_name, ret); + goto fail; + } + mux_channel[index].ul_flt_reg = true; +fail: + kfree(ext_ioc_properties); + return ret; +} + +static void ipa_cleanup_deregister_intf(void) +{ + int i; + int ret; + + for (i = 0; i < rmnet_index; i++) { + if (mux_channel[i].ul_flt_reg) { + ret = ipa2_deregister_intf( + mux_channel[i].vchannel_name); + if (ret < 0) { + IPAWANERR("de-register device %s(%d) failed\n", + mux_channel[i].vchannel_name, + i); + return; + } + IPAWANDBG("de-register device %s(%d) success\n", + mux_channel[i].vchannel_name, + i); + } + mux_channel[i].ul_flt_reg = false; + } +} + +int wwan_update_mux_channel_prop(void) +{ + int ret = 0, i; + /* install UL filter rules */ + if (egress_set) { + if (ipa_qmi_ctx && + !ipa_qmi_ctx->modem_cfg_emb_pipe_flt) { + IPAWANDBG("setup UL filter rules\n"); + if (a7_ul_flt_set) { + IPAWANDBG("del previous UL filter rules\n"); + /* delete rule hdlers */ + ret = wwan_del_ul_flt_rule_to_ipa(); + if (ret) { + IPAWANERR("failed to del old rules\n"); + return -EINVAL; + } + IPAWANDBG("deleted old UL rules\n"); + } + ret = wwan_add_ul_flt_rule_to_ipa(); + } + if (ret) + IPAWANERR("failed to install UL rules\n"); + else + a7_ul_flt_set = true; + } + /* update Tx/Rx/Ext property */ + IPAWANDBG("update Tx/Rx/Ext property in IPA\n"); + if (rmnet_index == 0) { + IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n"); + return ret; + } + + ipa_cleanup_deregister_intf(); + + for (i = 0; i < rmnet_index; i++) { + ret = wwan_register_to_ipa(i); + if (ret < 0) { + IPAWANERR("failed to re-regist %s, mux %d, index %d\n", + mux_channel[i].vchannel_name, + mux_channel[i].mux_id, + i); + return -ENODEV; + } + IPAWANERR("dev(%s) has registered to IPA\n", + mux_channel[i].vchannel_name); + mux_channel[i].ul_flt_reg = true; + } + return ret; +} + +#ifdef INIT_COMPLETION +#define reinit_completion(x) INIT_COMPLETION(*(x)) +#endif /* INIT_COMPLETION */ + +static int __ipa_wwan_open(struct net_device *dev) +{ + struct wwan_private *wwan_ptr = netdev_priv(dev); + + IPAWANDBG("[%s] __wwan_open()\n", dev->name); + if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) + reinit_completion(&wwan_ptr->resource_granted_completion); + wwan_ptr->device_status = WWAN_DEVICE_ACTIVE; + + if (ipa_rmnet_res.ipa_napi_enable) + napi_enable(&(wwan_ptr->napi)); + return 0; +} + +/** + * wwan_open() - Opens the wwan network interface. Opens logical + * channel on A2 MUX driver and starts the network stack queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa_wwan_open(struct net_device *dev) +{ + int rc = 0; + + IPAWANDBG("[%s] wwan_open()\n", dev->name); + rc = __ipa_wwan_open(dev); + if (rc == 0) + netif_start_queue(dev); + return rc; +} + +static int __ipa_wwan_close(struct net_device *dev) +{ + struct wwan_private *wwan_ptr = netdev_priv(dev); + int rc = 0; + + if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) { + wwan_ptr->device_status = WWAN_DEVICE_INACTIVE; + /* do not close wwan port once up, this causes + * remote side to hang if tried to open again + */ + reinit_completion(&wwan_ptr->resource_granted_completion); + if (ipa_rmnet_res.ipa_napi_enable) + napi_disable(&(wwan_ptr->napi)); + rc = ipa2_deregister_intf(dev->name); + if (rc) { + IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n", + dev->name, rc); + return rc; + } + return rc; + } else { + return -EBADF; + } +} + +/** + * ipa_wwan_stop() - Stops the wwan network interface. Closes + * logical channel on A2 MUX driver and stops the network stack + * queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa_wwan_stop(struct net_device *dev) +{ + IPAWANDBG("[%s]\n", dev->name); + __ipa_wwan_close(dev); + netif_stop_queue(dev); + return 0; +} + +static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu) +{ + if (0 > new_mtu || WWAN_DATA_LEN < new_mtu) + return -EINVAL; + IPAWANDBG("[%s] MTU change: old=%d new=%d\n", + dev->name, dev->mtu, new_mtu); + dev->mtu = new_mtu; + return 0; +} + +/** + * ipa_wwan_xmit() - Transmits an skb. + * + * @skb: skb to be transmitted + * @dev: network device + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int ret = 0; + bool qmap_check; + struct wwan_private *wwan_ptr = netdev_priv(dev); + struct ipa_tx_meta meta; + + if (skb->protocol != htons(ETH_P_MAP)) { + IPAWANDBG_LOW + ("SW filtering out none QMAP packet received from %s", + current->comm); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + qmap_check = RMNET_MAP_GET_CD_BIT(skb); + if (netif_queue_stopped(dev)) { + if (qmap_check && + atomic_read(&wwan_ptr->outstanding_pkts) < + wwan_ptr->outstanding_high_ctl) { + pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name); + goto send; + } else { + pr_err("[%s]fatal: %s stopped\n", __func__, dev->name); + return NETDEV_TX_BUSY; + } + } + + /* checking High WM hit */ + if (atomic_read(&wwan_ptr->outstanding_pkts) >= + wwan_ptr->outstanding_high) { + if (!qmap_check) { + IPAWANDBG_LOW + ("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n", + atomic_read(&wwan_ptr->outstanding_pkts), + wwan_ptr->outstanding_high, + netif_queue_stopped(dev), + qmap_check); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + } + +send: + /* IPA_RM checking start */ + ret = ipa_rm_inactivity_timer_request_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret == -EINPROGRESS) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + if (ret) { + pr_err("[%s] fatal: ipa rm timer request resource failed %d\n", + dev->name, ret); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return -EFAULT; + } + /* IPA_RM checking end */ + + if (qmap_check) { + memset(&meta, 0, sizeof(meta)); + meta.pkt_init_dst_ep_valid = true; + meta.pkt_init_dst_ep_remote = true; + ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta); + } else { + ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL); + } + + if (ret) { + ret = NETDEV_TX_BUSY; + goto out; + } + + atomic_inc(&wwan_ptr->outstanding_pkts); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + ret = NETDEV_TX_OK; +out: + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + return ret; +} + +static void ipa_wwan_tx_timeout(struct net_device *dev) +{ + IPAWANERR("[%s]:[%s] data stall in UL\n", __func__, dev->name); +} + +/** + * apps_ipa_tx_complete_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * Check that the packet is the one we sent and release it + * This function will be called in defered context in IPA wq. + */ +static void apps_ipa_tx_complete_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct net_device *dev = (struct net_device *)priv; + struct wwan_private *wwan_ptr; + + if (dev != ipa_netdevs[0]) { + IPAWANDBG("Received pre-SSR packet completion\n"); + dev_kfree_skb_any(skb); + return; + } + + if (evt != IPA_WRITE_DONE) { + IPAWANERR("unsupported evt on Tx callback, Drop the packet\n"); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return; + } + + wwan_ptr = netdev_priv(dev); + atomic_dec(&wwan_ptr->outstanding_pkts); + __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0)); + if (!atomic_read(&is_ssr) && + netif_queue_stopped(wwan_ptr->net) && + atomic_read(&wwan_ptr->outstanding_pkts) < + (wwan_ptr->outstanding_low)) { + IPAWANDBG_LOW + ("Outstanding low (%d) - wake up queue\n", + wwan_ptr->outstanding_low); + netif_wake_queue(wwan_ptr->net); + } + __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0)); + dev_kfree_skb_any(skb); + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); +} + +/** + * apps_ipa_packet_receive_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data + */ +static void apps_ipa_packet_receive_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct net_device *dev = (struct net_device *)priv; + + if (evt == IPA_RECEIVE) { + struct sk_buff *skb = (struct sk_buff *)data; + int result; + unsigned int packet_len = skb->len; + + IPAWANDBG_LOW("Rx packet was received"); + skb->dev = ipa_netdevs[0]; + skb->protocol = htons(ETH_P_MAP); + + if (ipa_rmnet_res.ipa_napi_enable) { + trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets); + result = netif_receive_skb(skb); + } else { + if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH + == 0) { + trace_rmnet_ipa_netifni(dev->stats.rx_packets); + result = netif_rx_ni(skb); + } else { + trace_rmnet_ipa_netifrx(dev->stats.rx_packets); + result = netif_rx(skb); + } + } + + if (result) { + pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n", + __func__, __LINE__); + dev->stats.rx_dropped++; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_len; + } else if (evt == IPA_CLIENT_START_POLL) + ipa_rmnet_rx_cb(priv); + else if (evt == IPA_CLIENT_COMP_NAPI) { + struct wwan_private *wwan_ptr = netdev_priv(dev); + + if (ipa_rmnet_res.ipa_napi_enable) + napi_complete(&(wwan_ptr->napi)); + } else + IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt); + +} + +static int handle_ingress_format(struct net_device *dev, + struct rmnet_ioctl_extended_s *in) +{ + int ret = 0; + + IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n"); + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) + ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_DL; + + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) { + IPAWANERR("get AGG size %d count %d\n", + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + ret = ipa_disable_apps_wan_cons_deaggr( + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + if (!ret) { + ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit = + in->u.ingress_format.agg_size; + ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit = + in->u.ingress_format.agg_count; + } + } + + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = + true; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0; + ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000; + + ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS; + ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify; + ipa_to_apps_ep_cfg.priv = dev; + + ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable; + ipa_to_apps_ep_cfg.desc_fifo_sz = + ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec); + + mutex_lock(&ipa_to_apps_pipe_handle_guard); + if (atomic_read(&is_ssr)) { + IPAWANDBG("In SSR sequence/recovery\n"); + mutex_unlock(&ipa_to_apps_pipe_handle_guard); + return -EFAULT; + } + ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl); + mutex_unlock(&ipa_to_apps_pipe_handle_guard); + + if (ret) + IPAWANERR("failed to configure ingress\n"); + + return ret; +} + +/** + * ipa_wwan_ioctl() - I/O control for wwan network driver. + * + * @dev: network device + * @ifr: ignored + * @cmd: cmd to be excecuded. can be one of the following: + * IPA_WWAN_IOCTL_OPEN - Open the network interface + * IPA_WWAN_IOCTL_CLOSE - Close the network interface + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + int mru = 1000, epid = 1, mux_index, len; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg *wan_msg = NULL; + struct rmnet_ioctl_extended_s extend_ioctl_data; + struct rmnet_ioctl_data_s ioctl_data; + + IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd); + switch (cmd) { + /* Set Ethernet protocol */ + case RMNET_IOCTL_SET_LLP_ETHERNET: + break; + /* Set RAWIP protocol */ + case RMNET_IOCTL_SET_LLP_IP: + break; + /* Get link protocol */ + case RMNET_IOCTL_GET_LLP: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Set QoS header enabled */ + case RMNET_IOCTL_SET_QOS_ENABLE: + return -EINVAL; + /* Set QoS header disabled */ + case RMNET_IOCTL_SET_QOS_DISABLE: + break; + /* Get QoS header state */ + case RMNET_IOCTL_GET_QOS: + ioctl_data.u.operation_mode = RMNET_MODE_NONE; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Get operation mode */ + case RMNET_IOCTL_GET_OPMODE: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Open transport port */ + case RMNET_IOCTL_OPEN: + break; + /* Close transport port */ + case RMNET_IOCTL_CLOSE: + break; + /* Flow enable */ + case RMNET_IOCTL_FLOW_ENABLE: + IPAWANDBG("Received flow enable\n"); + if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_data_s))) { + rc = -EFAULT; + break; + } + ipa_flow_control(IPA_CLIENT_USB_PROD, true, + ioctl_data.u.tcm_handle); + break; + /* Flow disable */ + case RMNET_IOCTL_FLOW_DISABLE: + IPAWANDBG("Received flow disable\n"); + if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_data_s))) { + rc = -EFAULT; + break; + } + ipa_flow_control(IPA_CLIENT_USB_PROD, false, + ioctl_data.u.tcm_handle); + break; + /* Set flow handle */ + case RMNET_IOCTL_FLOW_SET_HNDL: + break; + + /* Extended IOCTLs */ + case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); + if (copy_from_user(&extend_ioctl_data, + (const void __user *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("failed to copy extended ioctl data\n"); + rc = -EFAULT; + break; + } + switch (extend_ioctl_data.extended_ioctl) { + /* Get features */ + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n"); + extend_ioctl_data.u.data = + (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL | + RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT | + RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT); + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Set MRU */ + case RMNET_IOCTL_SET_MRU: + mru = extend_ioctl_data.u.data; + IPAWANDBG("get MRU size %d\n", + extend_ioctl_data.u.data); + break; + /* Get MRU */ + case RMNET_IOCTL_GET_MRU: + extend_ioctl_data.u.data = mru; + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* GET SG support */ + case RMNET_IOCTL_GET_SG_SUPPORT: + extend_ioctl_data.u.data = + ipa_rmnet_res.ipa_advertise_sg_support; + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Get endpoint ID */ + case RMNET_IOCTL_GET_EPID: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n"); + extend_ioctl_data.u.data = epid; + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&extend_ioctl_data, + (const void __user *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n", + extend_ioctl_data.u.data); + break; + /* Endpoint pair */ + case RMNET_IOCTL_GET_EP_PAIR: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n"); + extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); + extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&extend_ioctl_data, + (const void __user *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n", + extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num, + extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num); + break; + /* Get driver name */ + case RMNET_IOCTL_GET_DRIVER_NAME: + memcpy(&extend_ioctl_data.u.if_name, + ipa_netdevs[0]->name, IFNAMSIZ); + extend_ioctl_data.u.if_name[IFNAMSIZ - 1] = '\0'; + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Add MUX ID */ + case RMNET_IOCTL_ADD_MUX_CHANNEL: + mux_index = find_mux_channel_index( + extend_ioctl_data.u.rmnet_mux_val.mux_id); + if (mux_index < MAX_NUM_OF_MUX_CHANNEL) { + IPAWANDBG("already setup mux(%d)\n", + extend_ioctl_data.u.rmnet_mux_val.mux_id); + return rc; + } + mutex_lock(&add_mux_channel_lock); + if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("Exceed mux_channel limit(%d)\n", + rmnet_index); + mutex_unlock(&add_mux_channel_lock); + return -EFAULT; + } + extend_ioctl_data.u.rmnet_mux_val.vchannel_name + [IFNAMSIZ-1] = '\0'; + IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n", + extend_ioctl_data.u.rmnet_mux_val.mux_id, + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + /* cache the mux name and id */ + mux_channel[rmnet_index].mux_id = + extend_ioctl_data.u.rmnet_mux_val.mux_id; + memcpy(mux_channel[rmnet_index].vchannel_name, + extend_ioctl_data.u.rmnet_mux_val.vchannel_name, + sizeof(mux_channel[rmnet_index].vchannel_name)); + mux_channel[rmnet_index].vchannel_name[ + IFNAMSIZ - 1] = '\0'; + + IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n", + mux_channel[rmnet_index].vchannel_name, + mux_channel[rmnet_index].mux_id, + rmnet_index); + /* check if UL filter rules coming*/ + if (num_q6_rule != 0) { + IPAWANERR("dev(%s) register to IPA\n", + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + rc = wwan_register_to_ipa(rmnet_index); + if (rc < 0) { + IPAWANERR("device %s reg IPA failed\n", + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + mutex_unlock(&add_mux_channel_lock); + return -ENODEV; + } + mux_channel[rmnet_index].mux_channel_set = true; + mux_channel[rmnet_index].ul_flt_reg = true; + } else { + IPAWANDBG("dev(%s) haven't registered to IPA\n", + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + mux_channel[rmnet_index].mux_channel_set = true; + mux_channel[rmnet_index].ul_flt_reg = false; + } + rmnet_index++; + mutex_unlock(&add_mux_channel_lock); + break; + case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT: + IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n"); + if ((extend_ioctl_data.u.data) & + RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) { + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8; + apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_UL; + apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.cs_metadata_hdr_offset = 1; + } else { + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4; + } + if ((extend_ioctl_data.u.data) & + RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION) + apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en = + IPA_ENABLE_AGGR; + else + apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en = + IPA_BYPASS_AGGR; + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + /* modem want offset at 0! */ + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0; + apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst = + IPA_CLIENT_APPS_LAN_WAN_PROD; + apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC; + + apps_to_ipa_ep_cfg.client = + IPA_CLIENT_APPS_LAN_WAN_PROD; + apps_to_ipa_ep_cfg.notify = + apps_ipa_tx_complete_notify; + apps_to_ipa_ep_cfg.desc_fifo_sz = + IPA_SYS_TX_DATA_DESC_FIFO_SZ; + apps_to_ipa_ep_cfg.priv = dev; + + rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg, + &apps_to_ipa_hdl); + if (rc) + IPAWANERR("failed to config egress endpoint\n"); + + if (num_q6_rule != 0) { + /* already got Q6 UL filter rules*/ + if (ipa_qmi_ctx && + !ipa_qmi_ctx->modem_cfg_emb_pipe_flt) { + /* protect num_q6_rule */ + mutex_lock(&add_mux_channel_lock); + rc = wwan_add_ul_flt_rule_to_ipa(); + mutex_unlock(&add_mux_channel_lock); + } else + rc = 0; + egress_set = true; + if (rc) + IPAWANERR("install UL rules failed\n"); + else + a7_ul_flt_set = true; + } else { + /* wait Q6 UL filter rules*/ + egress_set = true; + IPAWANDBG("no UL-rules, egress_set(%d)\n", + egress_set); + } + break; + case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */ + rc = handle_ingress_format(dev, &extend_ioctl_data); + break; + case RMNET_IOCTL_SET_XLAT_DEV_INFO: + wan_msg = kzalloc(sizeof(struct ipa_wan_msg), + GFP_KERNEL); + if (!wan_msg) { + IPAWANERR("Failed to allocate memory.\n"); + return -ENOMEM; + } + extend_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0'; + len = sizeof(wan_msg->upstream_ifname) > + sizeof(extend_ioctl_data.u.if_name) ? + sizeof(extend_ioctl_data.u.if_name) : + sizeof(wan_msg->upstream_ifname); + strlcpy(wan_msg->upstream_ifname, + extend_ioctl_data.u.if_name, len); + wan_msg->upstream_ifname[len - 1] = '\0'; + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = WAN_XLAT_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + rc = ipa2_send_msg(&msg_meta, wan_msg, + ipa_wwan_msg_free_cb); + if (rc) { + IPAWANERR("Failed to send XLAT_CONNECT msg\n"); + kfree(wan_msg); + } + break; + /* Get agg count */ + case RMNET_IOCTL_GET_AGGREGATION_COUNT: + break; + /* Set agg count */ + case RMNET_IOCTL_SET_AGGREGATION_COUNT: + break; + /* Get agg size */ + case RMNET_IOCTL_GET_AGGREGATION_SIZE: + break; + /* Set agg size */ + case RMNET_IOCTL_SET_AGGREGATION_SIZE: + break; + /* Do flow control */ + case RMNET_IOCTL_FLOW_CONTROL: + break; + /* For legacy use */ + case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL: + break; + /* Get HW/SW map */ + case RMNET_IOCTL_GET_HWSW_MAP: + break; + /* Set RX Headroom */ + case RMNET_IOCTL_SET_RX_HEADROOM: + break; + default: + IPAWANERR("[%s] unsupported extended cmd[%d]", + dev->name, + extend_ioctl_data.extended_ioctl); + rc = -EINVAL; + } + break; + default: + IPAWANERR("[%s] unsupported cmd[%d]", + dev->name, cmd); + rc = -EINVAL; + } + return rc; +} + +static const struct net_device_ops ipa_wwan_ops_ip = { + .ndo_open = ipa_wwan_open, + .ndo_stop = ipa_wwan_stop, + .ndo_start_xmit = ipa_wwan_xmit, + .ndo_tx_timeout = ipa_wwan_tx_timeout, + .ndo_do_ioctl = ipa_wwan_ioctl, + .ndo_change_mtu = ipa_wwan_change_mtu, + .ndo_set_mac_address = NULL, + .ndo_validate_addr = NULL, +}; + +/** + * wwan_setup() - Setups the wwan network driver. + * + * @dev: network device + * + * Return codes: + * None + */ + +static void ipa_wwan_setup(struct net_device *dev) +{ + dev->netdev_ops = &ipa_wwan_ops_ip; + ether_setup(dev); + /* set this after calling ether_setup */ + dev->header_ops = NULL; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->mtu = WWAN_DATA_LEN; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->needed_headroom = HEADROOM_FOR_QMAP; + dev->needed_tailroom = TAILROOM; + dev->watchdog_timeo = 1000; +} + +/* IPA_RM related functions start*/ +static void q6_prod_rm_request_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource); +static void q6_prod_rm_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource); + +static void q6_prod_rm_request_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__, + ret); + return; + } +} + +static int q6_rm_request_resource(void) +{ + queue_delayed_work(ipa_rm_q6_workqueue, + &q6_con_rm_request, 0); + return 0; +} + +static void q6_prod_rm_release_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__, + ret); + return; + } +} + + +static int q6_rm_release_resource(void) +{ + queue_delayed_work(ipa_rm_q6_workqueue, + &q6_con_rm_release, 0); + return 0; +} + + +static void q6_rm_notify_cb(void *user_data, + enum ipa_rm_event event, + unsigned long data) +{ + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__); + break; + case IPA_RM_RESOURCE_RELEASED: + IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__); + break; + default: + return; + } +} +static int q6_initialize_rm(void) +{ + struct ipa_rm_create_params create_params; + struct ipa_rm_perf_profile profile; + int result; + + /* Initialize IPA_RM workqueue */ + ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req"); + if (!ipa_rm_q6_workqueue) + return -ENOMEM; + + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_PROD; + create_params.reg_params.notify_cb = &q6_rm_notify_cb; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err1; + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_CONS; + create_params.release_resource = &q6_rm_release_resource; + create_params.request_resource = &q6_rm_request_resource; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err2; + /* add dependency*/ + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 100; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (result) + goto set_perf_err; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, + &profile); + if (result) + goto set_perf_err; + return result; + +set_perf_err: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); +add_dpnd_err: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, result); +create_rsrc_err2: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, result); +create_rsrc_err1: + destroy_workqueue(ipa_rm_q6_workqueue); + return result; +} + +static void q6_deinitialize_rm(void) +{ + int ret; + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS, + ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, ret); + + if (ipa_rm_q6_workqueue) + destroy_workqueue(ipa_rm_q6_workqueue); +} + +static void wake_tx_queue(struct work_struct *work) +{ + if (ipa_netdevs[0]) { + __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0)); + netif_wake_queue(ipa_netdevs[0]); + __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0)); + } +} + +/** + * ipa_rm_resource_granted() - Called upon + * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped. + * + * @work: work object supplied ny workqueue + * + * Return codes: + * None + */ +static void ipa_rm_resource_granted(void *dev) +{ + IPAWANDBG_LOW("Resource Granted - starting queue\n"); + schedule_work(&ipa_tx_wakequeue_work); +} + +/** + * ipa_rm_notify() - Callback function for RM events. Handles + * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events. + * IPA_RM_RESOURCE_GRANTED is handled in the context of shared + * workqueue. + * + * @dev: network device + * @event: IPA RM event + * @data: Additional data provided by IPA RM + * + * Return codes: + * None + */ +static void ipa_rm_notify(void *dev, enum ipa_rm_event event, + unsigned long data) +{ + struct wwan_private *wwan_ptr = netdev_priv(dev); + + pr_debug("%s: event %d\n", __func__, event); + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) { + complete_all(&wwan_ptr->resource_granted_completion); + break; + } + ipa_rm_resource_granted(dev); + break; + case IPA_RM_RESOURCE_RELEASED: + break; + default: + pr_err("%s: unknown event %d\n", __func__, event); + break; + } +} + +/* IPA_RM related functions end*/ + +static int ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data); + +static struct notifier_block ssr_notifier = { + .notifier_call = ssr_notifier_cb, +}; + +static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev, + struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res) +{ + int result; + + ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ; + ipa_rmnet_drv_res->ipa_rmnet_ssr = + of_property_read_bool(pdev->dev.of_node, + "qcom,rmnet-ipa-ssr"); + pr_info("IPA SSR support = %s\n", + ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False"); + ipa_rmnet_drv_res->ipa_loaduC = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-loaduC"); + pr_info("IPA ipa-loaduC = %s\n", + ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_advertise_sg_support = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-advertise-sg-support"); + pr_info("IPA SG support = %s\n", + ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_napi_enable = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-napi-enable"); + pr_info("IPA Napi Enable = %s\n", + ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False"); + + /* Get IPA WAN RX desc fifo size */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-desc-size", + &ipa_rmnet_drv_res->wan_rx_desc_size); + if (result) + pr_info("using default for wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + else + IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + + return 0; +} + +struct ipa_rmnet_context ipa_rmnet_ctx; + +/** + * ipa_wwan_probe() - Initialized the module and registers as a + * network interface to the network stack + * + * Return codes: + * 0: success + * -ENOMEM: No memory available + * -EFAULT: Internal error + * -ENODEV: IPA driver not loaded + */ +static int ipa_wwan_probe(struct platform_device *pdev) +{ + int ret, i; + struct net_device *dev; + struct wwan_private *wwan_ptr; + struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */ + struct ipa_rm_perf_profile profile; /* IPA_RM */ + + pr_info("rmnet_ipa started initialization\n"); + + if (!ipa2_is_ready()) { + IPAWANERR("IPA driver not loaded\n"); + return -ENODEV; + } + + ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res); + ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr; + + ret = ipa_init_q6_smem(); + if (ret) { + IPAWANERR("ipa_init_q6_smem failed!\n"); + return ret; + } + + /* initialize tx/rx enpoint setup */ + memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params)); + memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params)); + + /* initialize ex property setup */ + num_q6_rule = 0; + old_num_q6_rule = 0; + rmnet_index = 0; + egress_set = false; + a7_ul_flt_set = false; + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) + memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val)); + + /* start A7 QMI service/client */ + if (ipa_rmnet_res.ipa_loaduC) + /* Android platform loads uC */ + ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01); + else + /* LE platform not loads uC */ + ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01); + + /* construct default WAN RT tbl for IPACM */ + ret = ipa_setup_a7_qmap_hdr(); + if (ret) + goto setup_a7_qmap_hdr_err; + ret = ipa_setup_dflt_wan_rt_tables(); + if (ret) + goto setup_dflt_wan_rt_tables_err; + + if (!atomic_read(&is_ssr)) { + /* Start transport-driver fd ioctl for ipacm for first init */ + ret = wan_ioctl_init(); + if (ret) + goto wan_ioctl_init_err; + } else { + /* Enable sending QMI messages after SSR */ + wan_ioctl_enable_qmi_messages(); + } + + /* initialize wan-driver netdev */ + dev = alloc_netdev(sizeof(struct wwan_private), + IPA_WWAN_DEV_NAME, + NET_NAME_UNKNOWN, + ipa_wwan_setup); + if (!dev) { + IPAWANERR("no memory for netdev\n"); + ret = -ENOMEM; + goto alloc_netdev_err; + } + ipa_netdevs[0] = dev; + wwan_ptr = netdev_priv(dev); + memset(wwan_ptr, 0, sizeof(*wwan_ptr)); + IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr); + wwan_ptr->net = dev; + wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL; + wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&wwan_ptr->outstanding_pkts, 0); + spin_lock_init(&wwan_ptr->lock); + init_completion(&wwan_ptr->resource_granted_completion); + + if (!atomic_read(&is_ssr)) { + /* IPA_RM configuration starts */ + ret = q6_initialize_rm(); + if (ret) { + IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n", + __func__, ret); + goto q6_init_err; + } + } + + memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params)); + ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD; + ipa_rm_params.reg_params.user_data = dev; + ipa_rm_params.reg_params.notify_cb = ipa_rm_notify; + ret = ipa_rm_create_resource(&ipa_rm_params); + if (ret) { + pr_err("%s: unable to create resourse %d in IPA RM\n", + __func__, IPA_RM_RESOURCE_WWAN_0_PROD); + goto create_rsrc_err; + } + ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_INACTIVITY_TIMER); + if (ret) { + pr_err("%s: ipa rm timer init failed %d on resourse %d\n", + __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD); + goto timer_init_err; + } + /* add dependency */ + ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, + &profile); + if (ret) + goto set_perf_err; + /* IPA_RM configuration ends */ + + /* Enable SG support in netdevice. */ + if (ipa_rmnet_res.ipa_advertise_sg_support) + dev->hw_features |= NETIF_F_SG; + + /* Enable NAPI support in netdevice. */ + if (ipa_rmnet_res.ipa_napi_enable) { + netif_napi_add(dev, &(wwan_ptr->napi), + ipa_rmnet_poll, NAPI_WEIGHT); + } + + ret = register_netdev(dev); + if (ret) { + IPAWANERR("unable to register ipa_netdev %d rc=%d\n", + 0, ret); + goto set_perf_err; + } + + IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", + ipa_netdevs[0]->name); + if (ret) { + IPAWANERR("default configuration failed rc=%d\n", + ret); + goto config_err; + } + atomic_set(&is_initialized, 1); + if (!atomic_read(&is_ssr)) { + /* offline charging mode */ + ipa2_proxy_clk_unvote(); + } + atomic_set(&is_ssr, 0); + + pr_info("rmnet_ipa completed initialization\n"); + return 0; +config_err: + if (ipa_rmnet_res.ipa_napi_enable) + netif_napi_del(&(wwan_ptr->napi)); + unregister_netdev(ipa_netdevs[0]); +set_perf_err: + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); +add_dpnd_err: + ret = ipa_rm_inactivity_timer_destroy( + IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */ + if (ret) + IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +timer_init_err: + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +create_rsrc_err: + + if (!atomic_read(&is_ssr)) + q6_deinitialize_rm(); + +q6_init_err: + free_netdev(ipa_netdevs[0]); + ipa_netdevs[0] = NULL; +alloc_netdev_err: + wan_ioctl_deinit(); +wan_ioctl_init_err: + ipa_del_dflt_wan_rt_tables(); +setup_dflt_wan_rt_tables_err: + ipa_del_a7_qmap_hdr(); +setup_a7_qmap_hdr_err: + ipa_qmi_service_exit(); + atomic_set(&is_ssr, 0); + return ret; +} + +static int ipa_wwan_remove(struct platform_device *pdev) +{ + int ret; + struct wwan_private *wwan_ptr; + + wwan_ptr = netdev_priv(ipa_netdevs[0]); + + pr_info("rmnet_ipa started deinitialization\n"); + mutex_lock(&ipa_to_apps_pipe_handle_guard); + ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl); + if (ret < 0) + IPAWANERR("Failed to teardown IPA->APPS pipe\n"); + else + ipa_to_apps_hdl = -1; + if (ipa_rmnet_res.ipa_napi_enable) + netif_napi_del(&(wwan_ptr->napi)); + mutex_unlock(&ipa_to_apps_pipe_handle_guard); + unregister_netdev(ipa_netdevs[0]); + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); + ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR( + "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + cancel_work_sync(&ipa_tx_wakequeue_work); + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + free_netdev(ipa_netdevs[0]); + ipa_netdevs[0] = NULL; + /* No need to remove wwan_ioctl during SSR */ + if (!atomic_read(&is_ssr)) + wan_ioctl_deinit(); + ipa_del_dflt_wan_rt_tables(); + ipa_del_a7_qmap_hdr(); + ipa_del_mux_qmap_hdrs(); + if (ipa_qmi_ctx && !ipa_qmi_ctx->modem_cfg_emb_pipe_flt) + wwan_del_ul_flt_rule_to_ipa(); + ipa_cleanup_deregister_intf(); + atomic_set(&is_initialized, 0); + pr_info("rmnet_ipa completed deinitialization\n"); + return 0; +} + +/** + * rmnet_ipa_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case there are pending packets + * in the Tx queue. This will postpone the suspend operation until all the + * pending packets will be transmitted. + * + * In case there are no packets to send, releases the WWAN0_PROD entity. + * As an outcome, the number of IPA active clients should be decremented + * until IPA clocks can be gated. + */ +static int rmnet_ipa_ap_suspend(struct device *dev) +{ + struct net_device *netdev = ipa_netdevs[0]; + struct wwan_private *wwan_ptr = netdev_priv(netdev); + + IPAWANDBG_LOW("Enter...\n"); + /* Do not allow A7 to suspend in case there are outstanding packets */ + if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) { + IPAWANDBG("Outstanding packets, postponing AP suspend.\n"); + return -EAGAIN; + } + + /* Make sure that there is no Tx operation ongoing */ + netif_tx_lock_bh(netdev); + netif_stop_queue(netdev); + + /* Stoppig Watch dog timer when pipe was in suspend state */ + if (del_timer(&netdev->watchdog_timer)) + dev_put(netdev); + + ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + netif_tx_unlock_bh(netdev); + IPAWANDBG_LOW("Exit\n"); + + return 0; +} + +/** + * rmnet_ipa_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Enables the network interface queue and returns success to the + * runtime_pm framework. + */ +static int rmnet_ipa_ap_resume(struct device *dev) +{ + struct net_device *netdev = ipa_netdevs[0]; + + IPAWANDBG_LOW("Enter...\n"); + if (netdev) { + netif_wake_queue(netdev); + /* Starting Watch dog timer, pipe was changes to resume state */ + if (netif_running(netdev) && netdev->watchdog_timeo <= 0) + __netdev_watchdog_up(netdev); + } + IPAWANDBG_LOW("Exit\n"); + + return 0; +} + +static void ipa_stop_polling_stats(void) +{ + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + ipa_rmnet_ctx.polling_interval = 0; +} + +static const struct of_device_id rmnet_ipa_dt_match[] = { + {.compatible = "qcom,rmnet-ipa"}, + {}, +}; +MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match); + +static const struct dev_pm_ops rmnet_ipa_pm_ops = { + .suspend_noirq = rmnet_ipa_ap_suspend, + .resume_noirq = rmnet_ipa_ap_resume, +}; + +static struct platform_driver rmnet_ipa_driver = { + .driver = { + .name = "rmnet_ipa", + .pm = &rmnet_ipa_pm_ops, + .of_match_table = rmnet_ipa_dt_match, + }, + .probe = ipa_wwan_probe, + .remove = ipa_wwan_remove, +}; + +/** + * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification + * + * This function sends the SSR notification before modem shutdown and + * after_powerup from SSR framework, to user-space module + */ +static void rmnet_ipa_send_ssr_notification(bool ssr_done) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + if (ssr_done) + msg_meta.msg_type = IPA_SSR_AFTER_POWERUP; + else + msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +static int ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data) +{ + if (ipa_rmnet_ctx.ipa_rmnet_ssr) { + if (code == SUBSYS_BEFORE_SHUTDOWN) { + pr_info("IPA received MPSS BEFORE_SHUTDOWN\n"); + /* send SSR before-shutdown notification to IPACM */ + rmnet_ipa_send_ssr_notification(false); + atomic_set(&is_ssr, 1); + ipa_q6_pre_shutdown_cleanup(); + if (ipa_netdevs[0]) + netif_stop_queue(ipa_netdevs[0]); + ipa_qmi_stop_workqueues(); + wan_ioctl_stop_qmi_messages(); + ipa_stop_polling_stats(); + if (atomic_read(&is_initialized)) + platform_driver_unregister(&rmnet_ipa_driver); + pr_info("IPA BEFORE_SHUTDOWN handling is complete\n"); + return NOTIFY_DONE; + } + if (code == SUBSYS_AFTER_SHUTDOWN) { + pr_info("IPA received MPSS AFTER_SHUTDOWN\n"); + if (atomic_read(&is_ssr)) + ipa_q6_post_shutdown_cleanup(); + pr_info("IPA AFTER_SHUTDOWN handling is complete\n"); + return NOTIFY_DONE; + } + if (code == SUBSYS_AFTER_POWERUP) { + pr_info("IPA received MPSS AFTER_POWERUP\n"); + if (!atomic_read(&is_initialized) + && atomic_read(&is_ssr)) + platform_driver_register(&rmnet_ipa_driver); + pr_info("IPA AFTER_POWERUP handling is complete\n"); + return NOTIFY_DONE; + } + if (code == SUBSYS_BEFORE_POWERUP) { + pr_info("IPA received MPSS BEFORE_POWERUP\n"); + if (atomic_read(&is_ssr)) + /* clean up cached QMI msg/handlers */ + ipa_qmi_service_exit(); + ipa2_proxy_clk_vote(); + pr_info("IPA BEFORE_POWERUP handling is complete\n"); + return NOTIFY_DONE; + } + } + IPAWANDBG_LOW("Exit\n"); + return NOTIFY_DONE; +} + +/** + * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg + * @buff: pointer to buffer containing the message + * @len: message len + * @type: message type + * + * This function is invoked when ipa2_send_msg is complete (Provided as a + * free function pointer along with the message). + */ +static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAWANERR("Null buffer\n"); + return; + } + + if (type != IPA_TETHERING_STATS_UPDATE_STATS && + type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) { + IPAWANERR("Wrong type given. buff %p type %d\n", + buff, type); + } + kfree(buff); +} + +/** + * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem + * + * This function queries the IPA Modem driver for the pipe stats + * via QMI, and updates the user space IPA entity. + */ +static void rmnet_ipa_get_stats_and_update(bool reset) +{ + struct ipa_get_data_stats_req_msg_v01 req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for stats message\n"); + return; + } + + memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + if (reset) { + req.reset_stats_valid = true; + req.reset_stats = true; + IPAWANERR("Get the latest pipe-stats and reset it\n"); + } + + rc = ipa_qmi_get_data_stats(&req, resp); + if (rc) { + IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01); + rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa2_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * tethering_stats_poll_queue() - Stats polling function + * @work - Work entry + * + * This function is scheduled periodically (per the interval) in + * order to poll the IPA Modem driver for the pipe stats. + */ +static void tethering_stats_poll_queue(struct work_struct *work) +{ + rmnet_ipa_get_stats_and_update(false); + + /* Schedule again only if there's an active polling interval */ + if (ipa_rmnet_ctx.polling_interval != 0) + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, + msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000)); +} + +/** + * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem + * + * This function retrieves the data usage (used quota) from the IPA Modem driver + * via QMI, and updates IPA user space entity. + */ +static void rmnet_ipa_get_network_stats_and_update(void) +{ + struct ipa_get_apn_data_stats_req_msg_v01 req; + struct ipa_get_apn_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for network stats message\n"); + return; + } + + memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01)); + + req.mux_id_list_valid = true; + req.mux_id_list_len = 1; + req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id; + + rc = ipa_qmi_get_network_stats(&req, resp); + if (rc) { + IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); + rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa2_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from + * IPA Modem + * This function sends the quota_reach indication from the IPA Modem driver + * via QMI, to user-space module + */ +static void rmnet_ipa_send_quota_reach_ind(void) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_QUOTA_REACH; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +/** + * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_POLL_TETHERING_STATS. + * In case polling interval received is 0, polling will stop + * (If there's a polling in progress, it will allow it to finish), and then will + * fetch network stats, and update the IPA user space. + * + * Return codes: + * 0: Success + */ +int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data) +{ + ipa_rmnet_ctx.polling_interval = data->polling_interval_secs; + + cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work); + + if (ipa_rmnet_ctx.polling_interval == 0) { + ipa_qmi_stop_data_qouta(); + rmnet_ipa_get_network_stats_and_update(); + rmnet_ipa_get_stats_and_update(true); + return 0; + } + + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0); + return 0; +} + +/** + * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data) +{ + u32 mux_id; + int index; + struct ipa_set_data_usage_quota_req_msg_v01 req; + + /* stop quota */ + if (!data->set_quota) + ipa_qmi_stop_data_qouta(); + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + index = find_vchannel_name_index(data->interface_name); + IPAWANERR("iface name %s, quota %lu\n", + data->interface_name, + (unsigned long) data->quota_mbytes); + + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%s is an invalid iface name\n", + data->interface_name); + return -ENODEV; + } + + mux_id = mux_channel[index].mux_id; + + ipa_rmnet_ctx.metered_mux_id = mux_id; + + memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01)); + req.apn_quota_list_valid = true; + req.apn_quota_list_len = 1; + req.apn_quota_list[0].mux_id = mux_id; + req.apn_quota_list[0].num_Mbytes = data->quota_mbytes; + + return ipa_qmi_set_data_quota(&req); +} + +static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data) +{ + struct ipa_set_wifi_quota wifi_quota; + int rc = 0; + + memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota)); + wifi_quota.set_quota = data->set_quota; + wifi_quota.quota_bytes = data->quota_mbytes; + IPAWANDBG("iface name %s, quota %lu\n", + data->interface_name, + (unsigned long) data->quota_mbytes); + + rc = ipa2_set_wlan_quota(&wifi_quota); + /* check if wlan-fw takes this quota-set */ + if (!wifi_quota.set_valid) + rc = -EFAULT; + return rc; +} + +/** + * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->interface_name); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstream iface %s not supported\n", + data->interface_name); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + rc = rmnet_ipa_set_data_quota_wifi(data); + if (rc) { + IPAWANERR("set quota on wifi failed\n"); + return rc; + } + } else { + rc = rmnet_ipa_set_data_quota_modem(data); + if (rc) { + IPAWANERR("set quota on modem failed\n"); + return rc; + } + } + return rc; +} + + /* rmnet_ipa_set_tether_client_pipe() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid src/dst pipes provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa_set_tether_client_pipe( + struct wan_ioctl_set_tether_client_pipe *data) +{ + int number, i; + + /* error checking if ul_src_pipe_len valid or not*/ + if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->ul_src_pipe_len < 0) { + IPAWANERR("UL src pipes %d exceeding max %d\n", + data->ul_src_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + /* error checking if dl_dst_pipe_len valid or not*/ + if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->dl_dst_pipe_len < 0) { + IPAWANERR("DL dst pipes %d exceeding max %d\n", + data->dl_dst_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + + IPAWANDBG("client %d, UL %d, DL %d, reset %d\n", + data->ipa_client, + data->ul_src_pipe_len, + data->dl_dst_pipe_len, + data->reset_client); + number = data->ul_src_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("UL index-%d pipe %d\n", i, + data->ul_src_pipe_list[i]); + if (data->reset_client) + ipa_set_client(data->ul_src_pipe_list[i], + 0, false); + else + ipa_set_client(data->ul_src_pipe_list[i], + data->ipa_client, true); + } + number = data->dl_dst_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("DL index-%d pipe %d\n", i, + data->dl_dst_pipe_list[i]); + if (data->reset_client) + ipa_set_client(data->dl_dst_pipe_list[i], + 0, false); + else + ipa_set_client(data->dl_dst_pipe_list[i], + data->ipa_client, false); + } + return 0; +} + +static int rmnet_ipa_query_tethering_stats_wifi( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + struct ipa_get_wdi_sap_stats *sap_stats; + int rc; + + sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats), + GFP_KERNEL); + if (!sap_stats) + return -ENOMEM; + + sap_stats->reset_stats = reset; + IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats); + + rc = ipa2_get_wlan_stats(sap_stats); + if (rc) { + kfree(sap_stats); + return rc; + } else if (data == NULL) { + IPAWANDBG("only reset wlan stats\n"); + kfree(sap_stats); + return 0; + } + + if (sap_stats->stats_valid) { + data->ipv4_tx_packets = sap_stats->ipv4_tx_packets; + data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes; + data->ipv4_rx_packets = sap_stats->ipv4_rx_packets; + data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes; + data->ipv6_tx_packets = sap_stats->ipv6_tx_packets; + data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes; + data->ipv6_rx_packets = sap_stats->ipv6_rx_packets; + data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes; + } + + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + + kfree(sap_stats); + return rc; +} + +static int rmnet_ipa_query_tethering_stats_modem( + struct wan_ioctl_query_tether_stats *data, + bool reset +) +{ + struct ipa_get_data_stats_req_msg_v01 *req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + int pipe_len, rc; + + req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAWANERR("failed to allocate memory for stats message\n"); + return -ENOMEM; + } + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("failed to allocate memory for stats message\n"); + kfree(req); + return -ENOMEM; + } + memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + if (reset) { + req->reset_stats_valid = true; + req->reset_stats = true; + IPAWANDBG("reset the pipe stats\n"); + } else { + /* print tethered-client enum */ + IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client); + } + + rc = ipa_qmi_get_data_stats(req, resp); + if (rc) { + IPAWANERR("can't get ipa_qmi_get_data_stats\n"); + kfree(req); + kfree(resp); + return rc; + } else if (data == NULL) { + IPAWANDBG("only reset modem stats\n"); + kfree(req); + kfree(resp); + return 0; + } + + if (resp->dl_dst_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len; + pipe_len++) { + IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n", + pipe_len, + resp->dl_dst_pipe_stats_list[pipe_len].pipe_index); + IPAWANDBG_LOW + ("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n", + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_packets, + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_packets, + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_bytes, + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_bytes); + if (ipa_get_client_uplink( + resp->dl_dst_pipe_stats_list[pipe_len].pipe_index) + == false) { + if (data->ipa_client == ipa_get_client( + resp->dl_dst_pipe_stats_list[pipe_len].pipe_index)) { + /* update the DL stats */ + data->ipv4_rx_packets += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_packets; + data->ipv6_rx_packets += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_packets; + data->ipv4_rx_bytes += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_bytes; + data->ipv6_rx_bytes += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_bytes; + } + } + } + } + IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + + if (resp->ul_src_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len; + pipe_len++) { + IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n", + pipe_len, + resp->ul_src_pipe_stats_list[pipe_len].pipe_index); + IPAWANDBG_LOW + ("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n", + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_packets, + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_packets, + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_bytes, + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_bytes); + if (ipa_get_client_uplink( + resp->ul_src_pipe_stats_list[pipe_len].pipe_index) + == true) { + if (data->ipa_client == ipa_get_client( + resp->ul_src_pipe_stats_list[pipe_len].pipe_index)) { + /* update the DL stats */ + data->ipv4_tx_packets += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_packets; + data->ipv6_tx_packets += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_packets; + data->ipv4_tx_bytes += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_bytes; + data->ipv6_tx_bytes += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_bytes; + } + } + } + } + IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + kfree(req); + kfree(resp); + return 0; +} + +int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstreamIface %s not supported\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + data, false); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_modem( + data, false); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } + return rc; +} + +int rmnet_ipa_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data) +{ + struct wan_ioctl_query_tether_stats tether_stats; + enum ipa_upstream_type upstream_type; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR_RL( + "wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + tether_stats.ipa_client = data->ipa_client; + rc = rmnet_ipa_query_tethering_stats_modem( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } + return rc; +} + +int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstream iface %s not supported\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG(" reset wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + NULL, true); + if (rc) { + IPAWANERR("reset WLAN stats failed\n"); + return rc; + } + } else { + IPAWANDBG(" reset modem-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_modem( + NULL, true); + if (rc) { + IPAWANERR("reset MODEM stats failed\n"); + return rc; + } + } + return rc; +} + + +/** + * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota + * @mux_id - The MUX ID on which the quota has been reached + * + * This function broadcasts a Netlink event using the kobject of the + * rmnet_ipa interface in order to alert the user space that the quota + * on the specific interface which matches the mux_id has been reached. + * + */ +void ipa_broadcast_quota_reach_ind(u32 mux_id, + enum ipa_upstream_type upstream_type) +{ + char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE]; + char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char *envp[IPA_UEVENT_NUM_EVNP] = { + alert_msg, iface_name_l, iface_name_m, NULL }; + int res; + int index; + + /* check upstream_type*/ + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstreamIface type %d not supported\n", + upstream_type); + return; + } else if (upstream_type == IPA_UPSTEAM_MODEM) { + index = find_mux_channel_index(mux_id); + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%u is an mux ID\n", mux_id); + return; + } + } + + res = scnprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE, + "ALERT_NAME=%s", "quotaReachedAlert"); + if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + /* posting msg for L-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = scnprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", mux_channel[index].vchannel_name); + } else { + res = scnprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + /* posting msg for M-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = scnprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", mux_channel[index].vchannel_name); + } else { + res = scnprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n", + alert_msg, iface_name_l, iface_name_m); + kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp); + + rmnet_ipa_send_quota_reach_ind(); +} + +/** + * ipa_q6_handshake_complete() - Perform operations once Q6 is up + * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR. + * + * This function is invoked once the handshake between the IPA AP driver + * and IPA Q6 driver is complete. At this point, it is possible to perform + * operations which can't be performed until IPA Q6 driver is up. + * + */ +void ipa_q6_handshake_complete(bool ssr_bootup) +{ + /* It is required to recover the network stats after SSR recovery */ + if (ssr_bootup) { + /* + * In case the uC is required to be loaded by the Modem, + * the proxy vote will be removed only when uC loading is + * complete and indication is received by the AP. After SSR, + * uC is already loaded. Therefore, proxy vote can be removed + * once Modem init is complete. + */ + ipa2_proxy_clk_unvote(); + + /* send SSR power-up notification to IPACM */ + rmnet_ipa_send_ssr_notification(true); + + /* + * It is required to recover the network stats after + * SSR recovery + */ + rmnet_ipa_get_network_stats_and_update(); + + /* Enable holb monitoring on Q6 pipes. */ + ipa_q6_monitor_holb_mitigation(true); + } +} + +static int __init ipa_wwan_init(void) +{ + atomic_set(&is_initialized, 0); + atomic_set(&is_ssr, 0); + + mutex_init(&ipa_to_apps_pipe_handle_guard); + mutex_init(&add_mux_channel_lock); + ipa_to_apps_hdl = -1; + + ipa_qmi_init(); + + /* Register for Modem SSR */ + subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM, + &ssr_notifier); + if (!IS_ERR(subsys_notify_handle)) + return platform_driver_register(&rmnet_ipa_driver); + else + return (int)PTR_ERR(subsys_notify_handle); +} + +static void __exit ipa_wwan_cleanup(void) +{ + int ret; + + ipa_qmi_cleanup(); + mutex_destroy(&ipa_to_apps_pipe_handle_guard); + mutex_destroy(&add_mux_channel_lock); + ret = subsys_notif_unregister_notifier(subsys_notify_handle, + &ssr_notifier); + if (ret) + IPAWANERR( + "Error subsys_notif_unregister_notifier system %s, ret=%d\n", + SUBSYS_MODEM, ret); + platform_driver_unregister(&rmnet_ipa_driver); +} + +static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) + IPAWANERR("Null buffer.\n"); + kfree(buff); +} + +static void ipa_rmnet_rx_cb(void *priv) +{ + struct net_device *dev = priv; + struct wwan_private *wwan_ptr; + + IPAWANDBG("\n"); + + if (dev != ipa_netdevs[0]) { + IPAWANERR("Not matching with netdev\n"); + return; + } + + wwan_ptr = netdev_priv(dev); + napi_schedule(&(wwan_ptr->napi)); +} + +static int ipa_rmnet_poll(struct napi_struct *napi, int budget) +{ + int rcvd_pkts = 0; + + rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT); + IPAWANDBG("rcvd packets: %d\n", rcvd_pkts); + return rcvd_pkts; +} + +late_initcall(ipa_wwan_init); +module_exit(ipa_wwan_cleanup); +MODULE_DESCRIPTION("WWAN Network Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c new file mode 100644 index 0000000000000000000000000000000000000000..91064e5ea9c4312f3e2dc05ef7c75b6c2f525e2a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2015, 2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" + +#define DRIVER_NAME "wwan_ioctl" + +#ifdef CONFIG_COMPAT +#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_INDEX, \ + compat_uptr_t) +#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_POLL_TETHERING_STATS, \ + compat_uptr_t) +#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_DATA_QUOTA, \ + compat_uptr_t) +#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_RESET_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_DL_FILTER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + compat_uptr_t) + +#endif + +static unsigned int dev_num = 1; +static struct cdev wan_ioctl_cdev; +static unsigned int process_ioctl = 1; +static struct class *class; +static dev_t device; +#ifdef CONFIG_COMPAT +long compat_wan_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +#endif + +static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int retval = 0, rc = 0; + u32 pyld_sz; + u8 *param = NULL; + + IPAWANDBG("device %s got ioctl events :>>>\n", + DRIVER_NAME); + + if (!process_ioctl) { + IPAWANDBG("modem is in SSR, ignoring ioctl\n"); + return -EAGAIN; + } + + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (qmi_filter_request_send( + (struct ipa_install_fltr_rule_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 add filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_FLT_RULE_INDEX: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (qmi_filter_notify_send( + (struct ipa_fltr_installed_notif_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 rule index fail\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_VOTE_FOR_BW_MBPS: + IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(uint32_t); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (vote_for_bus_bw((uint32_t *)param)) { + IPAWANERR("Failed to vote for bus BW\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_POLL_TETHERING_STATS: + IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa_poll_tethering_stats( + (struct wan_ioctl_poll_tethering_stats *)param)) { + IPAWANERR_RL("WAN_IOCTL_POLL_TETHERING_STATS failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_DATA_QUOTA: + IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_data_quota); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + rc = rmnet_ipa_set_data_quota( + (struct wan_ioctl_set_data_quota *)param); + if (rc != 0) { + IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n"); + if (rc == -ENODEV) + retval = -ENODEV; + else + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_TETHER_CLIENT_PIPE: + IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa_set_tether_client_pipe( + (struct wan_ioctl_set_tether_client_pipe *)param)) { + IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_query_tethering_stats( + (struct wan_ioctl_query_tether_stats *)param, false)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS_ALL: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_query_tethering_stats_all( + (struct wan_ioctl_query_tether_stats_all *)param)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_RESET_TETHER_STATS: + IPAWANDBG_LOW("got WAN_IOC_RESET_TETHER_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_reset_tethering_stats( + (struct wan_ioctl_reset_tether_stats *)param)) { + IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + break; + + default: + retval = -ENOTTY; + } + kfree(param); + return retval; +} + +#ifdef CONFIG_COMPAT +long compat_wan_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE32: + cmd = WAN_IOC_ADD_FLT_RULE; + break; + case WAN_IOC_ADD_FLT_RULE_INDEX32: + cmd = WAN_IOC_ADD_FLT_RULE_INDEX; + break; + case WAN_IOC_POLL_TETHERING_STATS32: + cmd = WAN_IOC_POLL_TETHERING_STATS; + break; + case WAN_IOC_SET_DATA_QUOTA32: + cmd = WAN_IOC_SET_DATA_QUOTA; + break; + case WAN_IOC_SET_TETHER_CLIENT_PIPE32: + cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE; + break; + case WAN_IOC_QUERY_TETHER_STATS32: + cmd = WAN_IOC_QUERY_TETHER_STATS; + break; + case WAN_IOC_RESET_TETHER_STATS32: + cmd = WAN_IOC_RESET_TETHER_STATS; + break; + case WAN_IOC_QUERY_DL_FILTER_STATS32: + cmd = WAN_IOC_QUERY_DL_FILTER_STATS; + break; + default: + return -ENOIOCTLCMD; + } + return wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static int wan_ioctl_open(struct inode *inode, struct file *filp) +{ + IPAWANDBG("\n IPA A7 wan_ioctl open OK :>>>> "); + return 0; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = wan_ioctl_open, + .read = NULL, + .unlocked_ioctl = wan_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_wan_ioctl, +#endif +}; + +int wan_ioctl_init(void) +{ + unsigned int wan_ioctl_major = 0; + int ret; + struct device *dev; + + device = MKDEV(wan_ioctl_major, 0); + + ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME); + if (ret) { + IPAWANERR(":device_alloc err.\n"); + goto dev_alloc_err; + } + wan_ioctl_major = MAJOR(device); + + class = class_create(THIS_MODULE, DRIVER_NAME); + if (IS_ERR(class)) { + IPAWANERR(":class_create err.\n"); + goto class_err; + } + + dev = device_create(class, NULL, device, + NULL, DRIVER_NAME); + if (IS_ERR(dev)) { + IPAWANERR(":device_create err.\n"); + goto device_err; + } + + cdev_init(&wan_ioctl_cdev, &fops); + ret = cdev_add(&wan_ioctl_cdev, device, dev_num); + if (ret) { + IPAWANERR(":cdev_add err.\n"); + goto cdev_add_err; + } + + process_ioctl = 1; + + IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n", + DRIVER_NAME, wan_ioctl_major); + return 0; + +cdev_add_err: + device_destroy(class, device); +device_err: + class_destroy(class); +class_err: + unregister_chrdev_region(device, dev_num); +dev_alloc_err: + return -ENODEV; +} + +void wan_ioctl_stop_qmi_messages(void) +{ + process_ioctl = 0; +} + +void wan_ioctl_enable_qmi_messages(void) +{ + process_ioctl = 1; +} + +void wan_ioctl_deinit(void) +{ + cdev_del(&wan_ioctl_cdev); + device_destroy(class, device); + class_destroy(class); + unregister_chrdev_region(device, dev_num); +} diff --git a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c new file mode 100644 index 0000000000000000000000000000000000000000..8bd1acb4215c7421db3613e2fd27715c9b9054c1 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge" + +#define TETH_DBG(fmt, args...) \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args) +#define TETH_DBG_FUNC_ENTRY() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__) +#define TETH_DBG_FUNC_EXIT() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__) +#define TETH_ERR(fmt, args...) \ + pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +/** + * struct teth_bridge_ctx - Tethering bridge driver context information + * @class: kernel class pointer + * @dev_num: kernel device number + * @dev: kernel device struct pointer + * @cdev: kernel character device struct + */ +struct teth_bridge_ctx { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; +}; +static struct teth_bridge_ctx *teth_ctx; + +/** + * teth_bridge_ipa_cb() - Callback to handle IPA data path events + * @priv - private data + * @evt - event type + * @data - event specific data (usually skb) + * + * This callback is called by IPA driver for exception packets from USB. + * All exception packets are handled by Q6 and should not reach this function. + * Packets will arrive to AP exception pipe only in case where packets are + * sent from USB before Q6 has setup the call. + */ +static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + + TETH_DBG_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + TETH_ERR("unexpected event %d\n", evt); + WARN_ON(1); + return; + } + + TETH_ERR("Unexpected exception packet from USB, dropping packet\n"); + dev_kfree_skb_any(skb); + TETH_DBG_FUNC_EXIT(); +} + +/** + * ipa2_teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. USB driver installs this callback function in the call to + * ipa_connect(). + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int ipa2_teth_bridge_init(struct teth_bridge_init_params *params) +{ + int res = 0; + + TETH_DBG_FUNC_ENTRY(); + + if (!params) { + TETH_ERR("Bad parameter\n"); + TETH_DBG_FUNC_EXIT(); + return -EINVAL; + } + + params->usb_notify_cb = teth_bridge_ipa_cb; + params->private_data = NULL; + params->skip_ep_cfg = true; + + /* Build dependency graph */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res < 0 && res != -EINPROGRESS) { + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + if (res < 0 && res != -EINPROGRESS) { + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + + res = 0; + goto bail; + +bail: + TETH_DBG_FUNC_EXIT(); + return res; +} + +/** + * ipa2_teth_bridge_disconnect() - Disconnect tethering bridge module + */ +int ipa2_teth_bridge_disconnect(enum ipa_client_type client) +{ + TETH_DBG_FUNC_ENTRY(); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + TETH_DBG_FUNC_EXIT(); + + return 0; +} + +/** + * ipa2_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params) +{ + return 0; +} + +static long teth_bridge_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + IPAERR("No ioctls are supported !\n"); + return -ENOIOCTLCMD; +} + +static const struct file_operations teth_bridge_drv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = teth_bridge_ioctl, +}; + +/** + * teth_bridge_driver_init() - Initialize tethering bridge driver + * + */ +int teth_bridge_driver_init(void) +{ + int res; + + TETH_DBG("Tethering bridge driver init\n"); + teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL); + if (!teth_ctx) { + TETH_ERR("kzalloc err.\n"); + return -ENOMEM; + } + + teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME); + + res = alloc_chrdev_region(&teth_ctx->dev_num, 0, 1, + TETH_BRIDGE_DRV_NAME); + if (res) { + TETH_ERR("alloc_chrdev_region err.\n"); + res = -ENODEV; + goto fail_alloc_chrdev_region; + } + + teth_ctx->dev = device_create(teth_ctx->class, NULL, teth_ctx->dev_num, + teth_ctx, TETH_BRIDGE_DRV_NAME); + if (IS_ERR(teth_ctx->dev)) { + TETH_ERR(":device_create err.\n"); + res = -ENODEV; + goto fail_device_create; + } + + cdev_init(&teth_ctx->cdev, &teth_bridge_drv_fops); + teth_ctx->cdev.owner = THIS_MODULE; + teth_ctx->cdev.ops = &teth_bridge_drv_fops; + + res = cdev_add(&teth_ctx->cdev, teth_ctx->dev_num, 1); + if (res) { + TETH_ERR(":cdev_add err=%d\n", -res); + res = -ENODEV; + goto fail_cdev_add; + } + TETH_DBG("Tethering bridge driver init OK\n"); + + return 0; +fail_cdev_add: + device_destroy(teth_ctx->class, teth_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(teth_ctx->dev_num, 1); +fail_alloc_chrdev_region: + kfree(teth_ctx); + teth_ctx = NULL; + + return res; +} +EXPORT_SYMBOL(teth_bridge_driver_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Tethering bridge driver"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 9e87612219c543e89ea3383fa0e281ef83884fda..77654e7f0b48b59ecde4def92464e2b7ede43f4b 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -3059,16 +3059,16 @@ static void ipa3_q6_avoid_holb(void) * setting HOLB on Q6 pipes, and from APPS perspective * they are not valid, therefore, the above function * will fail. + * Also don't reset the HOLB timer to 0 for Q6 pipes. */ - ipahal_write_reg_n_fields( - IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, - ep_idx, &ep_holb); ipahal_write_reg_n_fields( IPA_ENDP_INIT_HOL_BLOCK_EN_n, ep_idx, &ep_holb); - /* IPA4.5 issue requires HOLB_EN to be written twice */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + /* For targets > IPA_4.0 issue requires HOLB_EN to be + * written twice. + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) ipahal_write_reg_n_fields( IPA_ENDP_INIT_HOL_BLOCK_EN_n, ep_idx, &ep_holb); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 5aa0dc4136685881dc96155db5fdbe8f3cde1034..4d771cb56b6a55e4d5717ad366c12eb3f8fd2dbc 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1341,8 +1341,10 @@ int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset, IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe_idx, &ep_holb); - /* IPA4.5 issue requires HOLB_EN to be written twice */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + /* For targets > IPA_4.0 issue requires HOLB_EN to be + * written twice. + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) ipahal_write_reg_n_fields( IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe_idx, &ep_holb); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h index 45fe93157665515eb31c23f6e0e7faded3ebc9e6..bcfdfd722e73c5e9cfc124ca6541144438e28926 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h @@ -179,12 +179,12 @@ static inline int ipa_pm_exceptions_stat(char *buf, int size) return -EPERM; } -static inline int ipa_pm_add_dummy_clients(s8 power_plan); +static inline int ipa_pm_add_dummy_clients(s8 power_plan) { return -EPERM; } -static inline int ipa_pm_remove_dummy_clients(void); +static inline int ipa_pm_remove_dummy_clients(void) { return -EPERM; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index c1e3735827fc40ab7ec4c70ad727938e92a87d5b..8ee40de2c36a539f04b7d8864411caa9c5a66dff 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -1580,6 +1580,7 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) IPAWANERR( "ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n"); /* Cleanup when ipa3_wwan_remove is called */ + qmi_handle_release(ipa_q6_clnt); vfree(ipa_q6_clnt); ipa_q6_clnt = NULL; return; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index 37406c8ebf306118d3703acd2fc558813500f98d..4abef20c9629e1c0ef08122cc7170488fe570f20 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -371,7 +371,7 @@ static inline int ipa3_qmi_add_offload_request_send( } static inline int ipa3_qmi_rmv_offload_request_send( - struct ipa_rmv_offload_connection_req_msg_v01 *req) + struct ipa_remove_offload_connection_req_msg_v01 *req) { return -EPERM; } @@ -484,11 +484,13 @@ static inline int ipa3_qmi_send_mhi_ready_indication( return -EPERM; } +#ifdef CONFIG_RMNET_IPA3 static int ipa3_qmi_send_rsc_pipe_indication( struct ipa_endp_desc_indication_msg_v01 *req) { return -EPERM; } +#endif /* CONFIG_RMNET_IPA3 */ static inline int ipa3_qmi_send_mhi_cleanup_request( struct ipa_mhi_cleanup_req_msg_v01 *req) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 1afda31add22e303fa2faceb06e8e766427a9b3f..e40f17098115f2021cb1139a12682bd53e06b886 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -5294,8 +5294,8 @@ int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) ipa3_ctx->ep[clnt_hdl].holb.en = IPA_HOLB_TMR_EN; ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, ep_holb); - /* IPA4.5 issue requires HOLB_EN to be written twice */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + /* For targets > IPA_4.0 issue requires HOLB_EN to be written twice */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, ep_holb); diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index b1fc78e17e8cc012f6c925ce4c2d112f4e9b3885..42e9714157a29e624bfb91a850d6bc14270080db 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -2569,8 +2569,9 @@ static int ipa3_wwan_remove(struct platform_device *pdev) if (ipa3_rmnet_res.ipa_napi_enable) netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); - IPAWANINFO("rmnet_ipa unregister_netdev\n"); + IPAWANDBG("rmnet_ipa unregister_netdev started\n"); unregister_netdev(IPA_NETDEV()); + IPAWANDBG("rmnet_ipa unregister_netdev completed\n"); ipa3_wwan_deregister_netdev_pm_client(); cancel_work_sync(&ipa3_tx_wakequeue_work); cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index d31e9bd4a781ecd42dca6efb1ca7873437f8b590..16e799ba93674840edd335c14e95963d2d9d5d3d 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -1576,7 +1576,8 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, geni_se_dev->bus_bw_noc))) return; if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list))) { - GENI_SE_DBG(ipc, false, NULL, "%s: Clocks not on\n", __func__); + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "%s: Clocks not on\n", + __func__); return; } m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0); @@ -1596,16 +1597,16 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, se_dma_tx_len = geni_read_reg(base, SE_DMA_TX_LEN); se_dma_tx_len_in = geni_read_reg(base, SE_DMA_TX_LEN_IN); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "%s: m_cmd0:0x%x, m_irq_status:0x%x, s_irq_status:0x%x, geni_status:0x%x, geni_ios:0x%x\n", __func__, m_cmd0, m_irq_status, s_irq_status, geni_status, geni_ios); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "dma_rx_irq:0x%x, dma_tx_irq:0x%x, rx_fifo_sts:0x%x, tx_fifo_sts:0x%x\n" , dma_rx_irq, dma_tx_irq, rx_fifo_status, tx_fifo_status); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "se_dma_dbg:0x%x, m_cmd_ctrl:0x%x, dma_rxlen:0x%x, dma_rxlen_in:0x%x\n", se_dma_dbg, m_cmd_ctrl, se_dma_rx_len, se_dma_rx_len_in); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "dma_txlen:0x%x, dma_txlen_in:0x%x\n", se_dma_tx_len, se_dma_tx_len_in); } EXPORT_SYMBOL(geni_se_dump_dbg_regs); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 1e2524de6a63c6e806262b70f2a08ce5b7994728..a13bb4ddd0cf158bceaf19105684bb8b119adf29 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -235,6 +235,7 @@ config FUJITSU_LAPTOP depends on BACKLIGHT_CLASS_DEVICE depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP + select NEW_LEDS select LEDS_CLASS ---help--- This is a driver for laptops built by Fujitsu: diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index c514cb73bb5004d31163583f2cded50a657306fe..d7d69eadb9bba61239e323a21fa5377a4e1a3176 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -564,7 +564,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-hid: created platform device\n"); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index d122f33d43acbde48fbf2cdca38cc2e541ed7c69..1e6b4661c764539e677000ffde04c4e6b779aa82 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -15,9 +15,13 @@ #include #include +/* Returned when NOT in tablet mode on some HP Stream x360 11 models */ +#define VGBS_TABLET_MODE_FLAG_ALT 0x10 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */ -#define TABLET_MODE_FLAG 0x40 -#define DOCK_MODE_FLAG 0x80 +#define VGBS_TABLET_MODE_FLAG 0x40 +#define VGBS_DOCK_MODE_FLAG 0x80 + +#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT) MODULE_LICENSE("GPL"); MODULE_AUTHOR("AceLan Kao"); @@ -148,26 +152,60 @@ static void detect_tablet_mode(struct platform_device *device) if (ACPI_FAILURE(status)) return; - m = !(vgbs & TABLET_MODE_FLAG); + m = !(vgbs & VGBS_TABLET_MODE_FLAGS); input_report_switch(priv->input_dev, SW_TABLET_MODE, m); - m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0; + m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0; input_report_switch(priv->input_dev, SW_DOCK, m); } +/* + * There are several laptops (non 2-in-1) models out there which support VGBS, + * but simply always return 0, which we translate to SW_TABLET_MODE=1. This in + * turn causes userspace (libinput) to suppress events from the builtin + * keyboard and touchpad, making the laptop essentially unusable. + * + * Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination + * with libinput, leads to a non-usable system. Where as OTOH many people will + * not even notice when SW_TABLET_MODE is not being reported, a DMI based allow + * list is used here. This list mainly matches on the chassis-type of 2-in-1s. + * + * There are also some 2-in-1s which use the intel-vbtn ACPI interface to report + * SW_TABLET_MODE with a chassis-type of 8 ("Portable") or 10 ("Notebook"), + * these are matched on a per model basis, since many normal laptops with a + * possible broken VGBS ACPI-method also use these chassis-types. + */ +static const struct dmi_system_id dmi_switches_allow_list[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */), + }, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"), + }, + }, + {} /* Array terminator */ +}; + static bool intel_vbtn_has_switches(acpi_handle handle) { - const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); unsigned long long vgbs; acpi_status status; - /* - * Some normal laptops have a VGBS method despite being non-convertible - * and their VGBS method always returns 0, causing detect_tablet_mode() - * to report SW_TABLET_MODE=1 to userspace, which causes issues. - * These laptops have a DMI chassis_type of 9 ("Laptop"), do not report - * switches on any devices with a DMI chassis_type of 9. - */ - if (chassis_type && strcmp(chassis_type, "9") == 0) + if (!dmi_check_system(dmi_switches_allow_list)) return false; status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); @@ -272,7 +310,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-vbtn: created platform device\n"); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 8f85bb4fe7844112dd4cc3a11377b1aa18defdc2..98bd8213b03783cad2b9d8129458db6ee7214cf8 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2597,7 +2597,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, */ static int hotkey_kthread(void *data) { - struct tp_nvram_state s[2]; + struct tp_nvram_state s[2] = { 0 }; u32 poll_mask, event_mask; unsigned int si, so; unsigned long t; @@ -6879,8 +6879,10 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) list_for_each_entry(child, &device->children, node) { acpi_status status = acpi_evaluate_object(child->handle, "_BCL", NULL, &buffer); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + buffer.length = ACPI_ALLOCATE_BUFFER; continue; + } obj = (union acpi_object *)buffer.pointer; if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 63c57dc82ac1d5845c4f256376cac5fdee26ef04..4eda5065b5bbc90dca0bb2cb2793dc0417b19980 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -436,7 +436,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) int ret; int data; int bat_remove; - int soc; + int soc = 0; /* measure enable on GPADC1 */ data = MEAS1_GP1; @@ -499,7 +499,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) } mutex_unlock(&info->lock); - calc_soc(info, OCV_MODE_ACTIVE, &soc); + ret = calc_soc(info, OCV_MODE_ACTIVE, &soc); + if (ret < 0) + goto out; data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG); bat_remove = data & BAT_WU_LOG; diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 33c40f79d23d5e016e69ecde3d0f5785ba4048c0..2c35c13ad546f8e902eb13a1742c4c6304379c65 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client) vcell = max17040_read_reg(client, MAX17040_VCELL); - chip->vcell = vcell; + chip->vcell = (vcell >> 4) * 1250; } static void max17040_get_soc(struct i2c_client *client) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index ae656f16e83efdededdc25eadd108fe4586adbda..bceda8a67bebcbfbf536314963a5ec4d5393e8b0 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -480,6 +480,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(cp_ilim), POWER_SUPPLY_ATTR(irq_status), POWER_SUPPLY_ATTR(parallel_output_mode), + POWER_SUPPLY_ATTR(cc_toggle_enable), POWER_SUPPLY_ATTR(fg_type), POWER_SUPPLY_ATTR(charger_status), /* Local extensions of type int64_t */ diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index d76d6c98569cf15e8151020bc55c4e535f035f61..b566a62b4cb7a581cd6ac348da78ab3b884e1bcd 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -2149,16 +2149,13 @@ static int smb5_configure_typec(struct smb_charger *chg) } /* - * Across reboot, standard typeC cables get detected as legacy cables - * due to VBUS attachment prior to CC attach/dettach. To handle this, - * "early_usb_attach" flag is used, which assumes that across reboot, - * the cable connected can be standard typeC. However, its jurisdiction - * is limited to PD capable designs only. Hence, for non-PD type designs - * reset legacy cable detection by disabling/enabling typeC mode. + * Across reboot, standard typeC cables get detected as legacy + * cables due to VBUS attachment prior to CC attach/detach. Reset + * the legacy detection logic by enabling/disabling the typeC mode. */ - if (chg->pd_not_supported && (val & TYPEC_LEGACY_CABLE_STATUS_BIT)) { + if (val & TYPEC_LEGACY_CABLE_STATUS_BIT) { pval.intval = POWER_SUPPLY_TYPEC_PR_NONE; - smblib_set_prop_typec_power_role(chg, &pval); + rc = smblib_set_prop_typec_power_role(chg, &pval); if (rc < 0) { dev_err(chg->dev, "Couldn't disable TYPEC rc=%d\n", rc); return rc; @@ -2168,7 +2165,7 @@ static int smb5_configure_typec(struct smb_charger *chg) msleep(50); pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL; - smblib_set_prop_typec_power_role(chg, &pval); + rc = smblib_set_prop_typec_power_role(chg, &pval); if (rc < 0) { dev_err(chg->dev, "Couldn't enable TYPEC rc=%d\n", rc); return rc; diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 4fb61100240de61f2f5322e644687fc400beb6c3..03c78b2d9aad6943667ce04df6765e4ec08fff94 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -4447,7 +4447,12 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg, smblib_dbg(chg, PR_MISC, "power role change: %d --> %d!", chg->power_role, val->intval); - if (chg->power_role == val->intval) { + /* + * Force the power-role if the initial value is NONE, for the + * legacy cable detection WA. + */ + if (chg->power_role == val->intval && + chg->power_role != POWER_SUPPLY_TYPEC_PR_NONE) { smblib_dbg(chg, PR_MISC, "power role already in %d, ignore!", chg->power_role); return 0; diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index 31b01035d0ab32e833226726369e2cd7d45aa2aa..8cfba3614e601be4a0bba0b0f303536f1a9a15e0 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, u64 tmp, multi, rate; u32 value, prescale; - rate = clk_get_rate(ip->clk); - value = readl(ip->base + IPROC_PWM_CTRL_OFFSET); if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm))) @@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, else state->polarity = PWM_POLARITY_INVERSED; + rate = clk_get_rate(ip->clk); + if (rate == 0) { + state->period = 0; + state->duty_cycle = 0; + return; + } + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET); prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm); prescale &= IPROC_PWM_PRESCALE_MAX; diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index d6d2f20c45977aecf8f13c2306ab97384dd8e257..21df2816def76b599a9f7655dd1c2a153f612760 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -25,7 +25,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS config RAPIDIO_DMA_ENGINE bool "DMA Engine support for RapidIO" depends on RAPIDIO - select DMADEVICES + depends on DMADEVICES select DMA_ENGINE help Say Y here if you want to use DMA Engine frameork for RapidIO data diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 5940780648e0f305416f1574f1f6345bd8661213..f36a8a5261a13539e7f23b8ac7eba81b2ba75e4b 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2385,13 +2385,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) cdev_init(&md->cdev, &mport_fops); md->cdev.owner = THIS_MODULE; - ret = cdev_device_add(&md->cdev, &md->dev); - if (ret) { - rmcd_error("Failed to register mport %d (err=%d)", - mport->id, ret); - goto err_cdev; - } - INIT_LIST_HEAD(&md->doorbells); spin_lock_init(&md->db_lock); INIT_LIST_HEAD(&md->portwrites); @@ -2411,6 +2404,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; #endif + + ret = cdev_device_add(&md->cdev, &md->dev); + if (ret) { + rmcd_error("Failed to register mport %d (err=%d)", + mport->id, ret); + goto err_cdev; + } ret = rio_query_mport(mport, &attr); if (!ret) { md->properties.flags = attr.flags; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b9cae335a04cbf9746fdaeb4c24b1791aeea6158..09d3e2fb2a70e927c9fbf7d57a218b04123ffb9a 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1320,7 +1320,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { - struct regulator_map *node; + struct regulator_map *node, *new_node; int has_dev; if (supply == NULL) @@ -1331,6 +1331,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, else has_dev = 0; + new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); + if (new_node == NULL) + return -ENOMEM; + + new_node->regulator = rdev; + new_node->supply = supply; + + if (has_dev) { + new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); + if (new_node->dev_name == NULL) { + kfree(new_node); + return -ENOMEM; + } + } + + mutex_lock(®ulator_list_mutex); list_for_each_entry(node, ®ulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) @@ -1348,26 +1364,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); - return -EBUSY; + goto fail; } - node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); - if (node == NULL) - return -ENOMEM; - - node->regulator = rdev; - node->supply = supply; - - if (has_dev) { - node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); - if (node->dev_name == NULL) { - kfree(node); - return -ENOMEM; - } - } + list_add(&new_node->list, ®ulator_map_list); + mutex_unlock(®ulator_list_mutex); - list_add(&node->list, ®ulator_map_list); return 0; + +fail: + mutex_unlock(®ulator_list_mutex); + kfree(new_node->dev_name); + kfree(new_node); + return -EBUSY; } static void unset_regulator_supplies(struct regulator_dev *rdev) @@ -4797,19 +4806,16 @@ regulator_register(const struct regulator_desc *regulator_desc, /* add consumers devices */ if (init_data) { - mutex_lock(®ulator_list_mutex); for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { - mutex_unlock(®ulator_list_mutex); dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } - mutex_unlock(®ulator_list_mutex); } if (!rdev->desc->ops->get_voltage && diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index a2fd140eff81a16272ac7844a8e31fc03fb0a7f0..34f3b9778ffa1166697c2a820876cb1508c241fd 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -285,7 +285,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, return ret; } - drvdata->state = -EINVAL; + drvdata->state = -ENOTRECOVERABLE; drvdata->duty_cycle_table = duty_cycle_table; memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops, sizeof(drvdata->ops)); diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 0d33e3079f0dc2abb0cab3bca2a8bf46a885d58f..ef61cb709acd434c97b61a84c921799de856613d 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) { int ret; + q6v5->running = false; + qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 38a2e9e684df43898086ebaa119cb7752d981794..77a106e90124b3d855cd6fff997e7d7b77c766ac 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client, if (!ds1374) return -ENOMEM; + ds1374->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(ds1374->rtc)) + return PTR_ERR(ds1374->rtc); + ds1374->client = client; i2c_set_clientdata(client, ds1374); @@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client, device_set_wakeup_capable(&client->dev, 1); } - ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, - &ds1374_rtc_ops, THIS_MODULE); - if (IS_ERR(ds1374->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); - return PTR_ERR(ds1374->rtc); - } + ds1374->rtc->ops = &ds1374_rtc_ops; + + ret = rtc_register_device(ds1374->rtc); + if (ret) + return ret; #ifdef CONFIG_RTC_DRV_DS1374_WDT save_client = client; diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index a1c44d0c855780f8ba109e79c6bd62cb96dbb40d..30cbe22c57a8e2bb57ce80b769dc9716b601d1bf 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev, rtc_alarm64 = rtc_alarm * NSEC_PER_SEC; writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH); writel(rtc_alarm64, base + TIMER_ALARM_LOW); + writel(1, base + TIMER_IRQ_ENABLED); } else { /* * if this function was called with enabled=0 diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 304d905cb23fd20240cdb8b4953310cb5d1a1609..56f625371735f2e2dd91e686d978ae25bc33a0f5 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -186,7 +186,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = { int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) { - struct rtc_device *rtc; int ret; spin_lock_init(&info->lock); @@ -215,15 +214,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) writel_relaxed(0, info->rcnr); } - rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { + info->rtc->ops = &sa1100_rtc_ops; + info->rtc->max_user_freq = RTC_FREQ; + + ret = rtc_register_device(info->rtc); + if (ret) { clk_disable_unprepare(info->clk); - return PTR_ERR(rtc); + return ret; } - info->rtc = rtc; - - rtc->max_user_freq = RTC_FREQ; /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). @@ -272,6 +270,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev) info->irq_1hz = irq_1hz; info->irq_alarm = irq_alarm; + info->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", &pdev->dev); if (ret) { diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 56007a3e7f110358e27ad74563f24e428cbae473..fab09455ba9440571df9656b46a2cb73809097d3 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -40,6 +40,7 @@ MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; +static void *dasd_fba_zero_page; struct dasd_fba_private { struct dasd_fba_characteristics rdc_data; @@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count) ccw->cmd_code = DASD_FBA_CCW_WRITE; ccw->flags |= CCW_FLAG_SLI; ccw->count = count; - ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0)); + ccw->cda = (__u32) (addr_t) dasd_fba_zero_page; } /* @@ -811,6 +812,11 @@ dasd_fba_init(void) int ret; ASCEBC(dasd_fba_discipline.ebcname, 4); + + dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!dasd_fba_zero_page) + return -ENOMEM; + ret = ccw_driver_register(&dasd_fba_driver); if (!ret) wait_for_device_probe(); @@ -822,6 +828,7 @@ static void __exit dasd_fba_cleanup(void) { ccw_driver_unregister(&dasd_fba_driver); + free_page((unsigned long)dasd_fba_zero_page); } module_init(dasd_fba_init); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index df09ed53ab45909a0ba3aebcc3c245325bcf38f3..825a8f2703b4fc98e6e8e8590e116f1c24b86e4e 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -615,6 +615,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data) rc = css_evaluate_known_subchannel(sch, 1); if (rc == -EAGAIN) css_schedule_eval(sch->schid); + /* + * The loop might take long time for platforms with lots of + * known devices. Allow scheduling here. + */ + cond_resched(); } return 0; } diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 23c24a699cefe50f9b2d1970c0d2bcb312bcfbba..b7cb897cd83e04d87564467c7ae8b716eff83b70 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -915,7 +915,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + if (copy_to_user((int __user *) arg, reqcnt, + sizeof(u32) * AP_DEVICES)) rc = -EFAULT; kfree(reqcnt); return rc; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index eb917e93fa72ff742df95aad911fd8e2e41200de..8d30f9ac3e9d53f627c441442e969d7d23338d89 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1463,6 +1463,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, int extrasize; QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 91aa4bfcf8d612041ce5ffad18681f02fc01c62f..5bb278a604ed29cf4a753ad8b3ad52d0ae4e5f25 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -403,7 +403,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) return; } - del_timer(&req->timer); + del_timer_sync(&req->timer); zfcp_fsf_protstatus_eval(req); zfcp_fsf_fsfstatus_eval(req); req->handler(req); @@ -758,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { - del_timer(&req->timer); + del_timer_sync(&req->timer); /* lookup request again, list might have changed */ zfcp_reqlist_find_rm(adapter->req_list, req_id); zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 6e356325d8d98a120aa93ceb5c12d5898880ba14..54717fb84a54c553f299701784c0a18cf37e2fe9 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2481,13 +2481,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", @@ -2573,13 +2573,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index b7588de4484e58ea0001dd983564efc28e8cc93e..4cb6ee6e1212e1bf5459bada9e5be61e10b50d95 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -743,7 +743,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; - } else if (command != HBA_IU_TYPE_SCSI_TM_REQ) + } else return -EINVAL; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 1046947064a0b683853e7abe1b1a524424479abd..eecffc03084c0fab18e327dfc3b72d44f9df00b6 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, (fib_callback) aac_hba_callback, (void *) cmd); - + if (status != -EINPROGRESS) { + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 secs for completion */ for (count = 0; count < 15; ++count) { if (cmd->SCp.sent_command) { @@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) info = &aac->hba_map[bus][cid]; - if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host device reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); @@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) status = aac_hba_send(command, fib, (fib_callback) aac_tmf_callback, (void *) info); - + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state == 0) { @@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) info = &aac->hba_map[bus][cid]; - if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host target reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); @@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) (fib_callback) aac_tmf_callback, (void *) info); + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state <= 0) { @@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) } } - pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); + pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME); /* * Check the health of the controller @@ -1604,7 +1620,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost; struct aac_dev *aac; struct list_head *insert = &aac_devices; - int error = -ENODEV; + int error; int unique_id = 0; u64 dmamask; int mask_bits = 0; @@ -1629,7 +1645,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) error = pci_enable_device(pdev); if (error) goto out; - error = -ENODEV; if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -1661,8 +1676,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); - if (!shost) + if (!shost) { + error = -ENOMEM; goto out_disable_pdev; + } shost->irq = pdev->irq; shost->unique_id = unique_id; @@ -1687,8 +1704,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, sizeof(struct fib), GFP_KERNEL); - if (!aac->fibs) + if (!aac->fibs) { + error = -ENOMEM; goto out_free_host; + } + spin_lock_init(&aac->fib_lock); mutex_init(&aac->ioctl_mutex); diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index edce5f3cfdba041f53480050bd3c3b9002147c06..93ba83e3148eb5e195cf74ea11d26963124ec07b 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -454,7 +454,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index e93e047f431651f255f9bddd14ed39ab3312209f..65bb34ce93b94d50660971b7bab107a07a324298 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -575,7 +575,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_remove: fas216_remove(host); diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index 79aa88911b7f3472b7f5aa9bae57984cfc4de425..b5e4a25ea1ef3e8cbc89d613fb6b0ec6f655e9b1 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -382,7 +382,7 @@ static int powertecscsi_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index f987c40c47a13f2f40c4eaf1183e83f95051ad46..443813feaef4701b99c570b11e390d332cddaed8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3749,6 +3749,7 @@ static int cxlflash_probe(struct pci_dev *pdev, cfg->afu_cookie = cfg->ops->create_afu(pdev); if (unlikely(!cfg->afu_cookie)) { dev_err(dev, "%s: create_afu failed\n", __func__); + rc = -ENOMEM; goto out_remove; } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 24cbd0a2cc69facf325be96b5bc150cb2e882389..658c0726581f930b6153a5b6edbd732864af480c 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -267,9 +267,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) WARN_ON(!fcf_dev); new->fcf_dev = NULL; fcoe_fcf_device_delete(fcf_dev); - kfree(new); mutex_unlock(&cdev->lock); } + kfree(new); } /** diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 73ffc16ec0225385f6bc6105e50594b4b1b18efe..b521fc7650cb9847a6add028dcf8830f04a08a72 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, atomic64_inc(&fnic_stats->io_stats.io_completions); - io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + io_duration_time = jiffies_to_msecs(jiffies) - + jiffies_to_msecs(start_time); if(io_duration_time <= 10) atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index f570b8c5d857cce6e8d3d53e4d9cc861b8214558..11de2198bb87d319bd15710fa86ae92f5eb3e98e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -507,6 +507,12 @@ static ssize_t host_store_rescan(struct device *dev, return count; } +static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) +{ + device->offload_enabled = 0; + device->offload_to_be_enabled = 0; +} + static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1743,8 +1749,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, __func__, h->scsi_host->host_no, logical_drive->bus, logical_drive->target, logical_drive->lun); - logical_drive->offload_enabled = 0; - logical_drive->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(logical_drive); logical_drive->queue_depth = 8; } } @@ -2496,8 +2501,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h, IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { - dev->offload_enabled = 0; - dev->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(dev); } return hpsa_retry_cmd(h, c); @@ -3676,10 +3680,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (this_device->offload_config) { - this_device->offload_to_be_enabled = + bool offload_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); - if (hpsa_get_raid_map(h, scsi3addr, this_device)) - this_device->offload_to_be_enabled = 0; + /* + * Check to see if offload can be enabled. + */ + if (offload_enabled) { + rc = hpsa_get_raid_map(h, scsi3addr, this_device); + if (rc) /* could not load raid_map */ + goto out; + this_device->offload_to_be_enabled = 1; + } } out: @@ -3998,8 +4009,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; - this_device->offload_enabled = 0; - this_device->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(this_device); this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; @@ -5213,8 +5223,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, /* Handles load balance across RAID 1 members. * (2-drive R1 and R10 with even # of drives.) * Appropriate for SSDs, not optimal for HDDs + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 2); + if (le16_to_cpu(map->layout_map_count) != 2) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } if (dev->offload_to_mirror) map_index += le16_to_cpu(map->data_disks_per_row); dev->offload_to_mirror = !dev->offload_to_mirror; @@ -5222,8 +5236,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, case HPSA_RAID_ADM: /* Handles N-way mirrors (R1-ADM) * and R10 with # of drives divisible by 3.) + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 3); + if (le16_to_cpu(map->layout_map_count) != 3) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } offload_to_mirror = dev->offload_to_mirror; raid_map_helper(map, offload_to_mirror, @@ -5248,7 +5266,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, r5or6_blocks_per_row = le16_to_cpu(map->strip_size) * le16_to_cpu(map->data_disks_per_row); - BUG_ON(r5or6_blocks_per_row == 0); + if (r5or6_blocks_per_row == 0) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } stripesize = r5or6_blocks_per_row * le16_to_cpu(map->layout_map_count); #if BITS_PER_LONG == 32 @@ -8218,7 +8239,7 @@ static int detect_controller_lockup(struct ctlr_info *h) * * Called from monitor controller worker (hpsa_event_monitor_worker) * - * A Volume (or Volumes that comprise an Array set may be undergoing a + * A Volume (or Volumes that comprise an Array set) may be undergoing a * transformation, so we will be turning off ioaccel for all volumes that * make up the Array. */ @@ -8241,6 +8262,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) * Run through current device list used during I/O requests. */ for (i = 0; i < h->ndevices; i++) { + int offload_to_be_enabled = 0; + int offload_config = 0; + device = h->dev[i]; if (!device) @@ -8258,25 +8282,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) continue; ioaccel_status = buf[IOACCEL_STATUS_BYTE]; - device->offload_config = + + /* + * Check if offload is still configured on + */ + offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); - if (device->offload_config) - device->offload_to_be_enabled = + /* + * If offload is configured on, check to see if ioaccel + * needs to be enabled. + */ + if (offload_config) + offload_to_be_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + /* + * If ioaccel is to be re-enabled, re-enable later during the + * scan operation so the driver can get a fresh raidmap + * before turning ioaccel back on. + */ + if (offload_to_be_enabled) + continue; + /* * Immediately turn off ioaccel for any volume the * controller tells us to. Some of the reasons could be: * transformation - change to the LVs of an Array. * degraded volume - component failure - * - * If ioaccel is to be re-enabled, re-enable later during the - * scan operation so the driver can get a fresh raidmap - * before turning ioaccel back on. - * */ - if (!device->offload_to_be_enabled) - device->offload_enabled = 0; + hpsa_turn_off_ioaccel_for_device(device); } kfree(buf); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8839f509b19ab2b8e4785a78e097c600deceac06..0b3f4538c1d4d63801d0612c8246bbbb18eeef84 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -593,8 +593,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; - if (IS_ERR(fp)) - goto redisc; + if (IS_ERR(fp)) { + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + goto out; + } cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) @@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, new_rdata->disc_id = disc->disc_id; fc_rport_login(new_rdata); } - goto out; + goto free_fp; } rdata->disc_id = disc->disc_id; mutex_unlock(&rdata->rp_mutex); @@ -638,10 +642,10 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } +free_fp: + fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); - if (!IS_ERR(fp)) - fc_frame_free(fp); } /** diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 90a748551ede5d98f26ba52ff892f7bc57c2109f..2b3239765c2491170d2040f11abe10ac62e5167e 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) lockdep_assert_held(&lport->disc.disc_mutex); rdata = fc_rport_lookup(lport, port_id); - if (rdata) + if (rdata) { + kref_put(&rdata->kref, fc_rport_destroy); return rdata; + } if (lport->rport_priv_size > 0) rport_priv_size = lport->rport_priv_size; @@ -493,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, fc_rport_state_enter(rdata, RPORT_ST_DELETE); - kref_get(&rdata->kref); - if (rdata->event == RPORT_EV_NONE && - !queue_work(rport_event_queue, &rdata->event_work)) - kref_put(&rdata->kref, fc_rport_destroy); + if (rdata->event == RPORT_EV_NONE) { + kref_get(&rdata->kref); + if (!queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + } rdata->event = event; } diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 64a958a99f6a89896c16354502c34a397d18a1df..d82698b7dfe6ce97db6066626fe4028447ff080c 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -223,7 +223,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->num_scatter = si; } - task->data_dir = qc->dma_dir; + if (qc->tf.protocol == ATA_PROT_NODATA) + task->data_dir = DMA_NONE; + else + task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index fe084d47ed9e54b5ba405e8ed31d7e16b6450d9c..3447d19d4147af20dd14f3cac67bba0b7e90c850 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -332,7 +332,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; - rcu_read_lock(); scnprintf(tmp, sizeof(tmp), "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", phba->brd_no, @@ -341,7 +340,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.scsi_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -357,7 +356,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; + + spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; @@ -384,39 +385,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -424,14 +425,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } - rcu_read_unlock(); + spin_unlock_irq(shost->host_lock); if (!lport) goto buffer_done; @@ -491,11 +492,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); - /* RCU is already unlocked. */ + /* host_lock is already unlocked. */ goto buffer_done; - rcu_unlock_buf_done: - rcu_read_unlock(); + unlock_buf_done: + spin_unlock_irq(shost->host_lock); buffer_done: len = strnlen(buf, PAGE_SIZE); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 384f5cd7c3c81f3d010ceda13358db28bd778145..99b4ff78f9dced7986f9082207efa0e269f6fd94 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); /* This string MUST be consistent with other FC platforms * supported by Broadcom. @@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->SerialNumber, sizeof(ae->un.AttrString)); @@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelDesc, sizeof(ae->un.AttrString)); @@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t i, j, incr, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); /* Convert JEDEC ID to ascii for hardware version */ incr = vp->rev.biuRev; @@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, lpfc_release_version, sizeof(ae->un.AttrString)); @@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); @@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); len = strnlen(ae->un.AttrString, @@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", init_utsname()->sysname, @@ -1965,7 +1965,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); size = FOURBYTES + sizeof(uint32_t); @@ -1981,8 +1981,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); len = lpfc_vport_symbolic_node_name(vport, ae->un.AttrString, 256); @@ -2000,7 +2000,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Nothing is defined for this currently */ ae->un.AttrInt = cpu_to_be32(0); @@ -2017,7 +2017,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Each driver instance corresponds to a single port */ ae->un.AttrInt = cpu_to_be32(1); @@ -2034,8 +2034,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, sizeof(struct lpfc_name)); @@ -2053,8 +2053,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); len = strnlen(ae->un.AttrString, @@ -2073,7 +2073,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Driver doesn't have access to this information */ ae->un.AttrInt = cpu_to_be32(0); @@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "EMULEX", sizeof(ae->un.AttrString)); @@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ @@ -2134,7 +2134,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { @@ -2186,7 +2186,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (!(phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { @@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; hsp = (struct serv_parm *)&vport->fc_sparam; ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | @@ -2273,8 +2273,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "/sys/class/scsi_host/host%d", shost->host_no); @@ -2294,8 +2294,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", init_utsname()->nodename); @@ -2315,8 +2315,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2333,8 +2333,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); @@ -2351,8 +2351,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); len += (len & 3) ? (4 - (len & 3)) : 4; @@ -2370,7 +2370,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); else @@ -2388,7 +2388,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2403,8 +2403,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fabric_portname, sizeof(struct lpfc_name)); @@ -2421,8 +2421,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ @@ -2442,7 +2442,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Link Up - operational */ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); size = FOURBYTES + sizeof(uint32_t); @@ -2458,7 +2458,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; vport->fdmi_num_disc = lpfc_find_map_node(vport); ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); size = FOURBYTES + sizeof(uint32_t); @@ -2474,7 +2474,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2489,8 +2489,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "Smart SAN Initiator", sizeof(ae->un.AttrString)); @@ -2510,8 +2510,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2531,8 +2531,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "Smart SAN Version 2.0", sizeof(ae->un.AttrString)); @@ -2553,8 +2553,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -2573,7 +2573,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* SRIOV (type 3) is not supported */ if (vport->vpi) @@ -2593,7 +2593,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(0); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2608,7 +2608,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(1); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2756,7 +2756,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Registered Port List */ /* One entry (port) per adapter */ rh->rpl.EntryCnt = cpu_to_be32(1); - memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, + memcpy(&rh->rpl.pe.PortName, + &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); /* point to the HBA attribute block */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 9032793c405ebc18b9f0ec5d225351f18049de54..6a4b496081e4a32894804c0e8e72e15b557b8b95 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4112,7 +4112,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, out: if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); + if (mbox) + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; spin_unlock_irq(shost->host_lock); /* If the node is not being used by another discovery thread, diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 009aa0eee040800a3e585105631055985faad4e3..48d4d576d588e2d3792a53a2607d27cedbe46580 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1333,25 +1333,8 @@ struct fc_rdp_res_frame { /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ -/* - * Registered Port List Format - */ -struct lpfc_fdmi_reg_port_list { - uint32_t EntryCnt; - uint32_t pe; /* Variable-length array */ -}; - - /* Definitions for HBA / Port attribute entries */ -struct lpfc_fdmi_attr_def { /* Defined in TLV format */ - /* Structure is in Big Endian format */ - uint32_t AttrType:16; - uint32_t AttrLen:16; - uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ -}; - - /* Attribute Entry */ struct lpfc_fdmi_attr_entry { union { @@ -1362,7 +1345,13 @@ struct lpfc_fdmi_attr_entry { } un; }; -#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) +struct lpfc_fdmi_attr_def { /* Defined in TLV format */ + /* Structure is in Big Endian format */ + uint32_t AttrType:16; + uint32_t AttrLen:16; + /* Marks start of Value (ATTRIBUTE_ENTRY) */ + struct lpfc_fdmi_attr_entry AttrValue; +} __packed; /* * HBA Attribute Block @@ -1386,13 +1375,20 @@ struct lpfc_fdmi_hba_ident { struct lpfc_name PortName; }; +/* + * Registered Port List Format + */ +struct lpfc_fdmi_reg_port_list { + uint32_t EntryCnt; + struct lpfc_fdmi_port_entry pe; +} __packed; + /* * Register HBA(RHBA) */ struct lpfc_fdmi_reg_hba { struct lpfc_fdmi_hba_ident hi; - struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ -/* struct lpfc_fdmi_attr_block ab; */ + struct lpfc_fdmi_reg_port_list rpl; }; /* diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 768eba8c111d9867ddcca2875529a8d46935d10d..5bc33817568ea318a981864015ad23a00e2a9c28 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1712,7 +1712,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) lpfc_printf_log(phba, KERN_ERR, LOG_NVME, "6179 Unreg targetport %p timeout " diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a56a939792ac1ff621e396802ffce745ca15e8c6..2ab351260e8151e0956c9e753880b6bbf9fc1c58 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -17413,6 +17413,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) list_add_tail(&iocbq->list, &first_iocbq->list); } } + /* Free the sequence's header buffer */ + if (!first_iocbq) + lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); + return first_iocbq; } diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 1ff0f7de910584e11dc8f839a0d59751908903fc..64545b300dfc72a872819f6c08d1ffb16c228b32 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -653,27 +653,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) vport->port_state < LPFC_VPORT_READY) return -EAGAIN; } + /* - * This is a bit of a mess. We want to ensure the shost doesn't get - * torn down until we're done with the embedded lpfc_vport structure. - * - * Beyond holding a reference for this function, we also need a - * reference for outstanding I/O requests we schedule during delete - * processing. But once we scsi_remove_host() we can no longer obtain - * a reference through scsi_host_get(). - * - * So we take two references here. We release one reference at the - * bottom of the function -- after delinking the vport. And we - * release the other at the completion of the unreg_vpi that get's - * initiated after we've disposed of all other resources associated - * with the port. + * Take early refcount for outstanding I/O requests we schedule during + * delete processing for unreg_vpi. Always keep this before + * scsi_remove_host() as we can no longer obtain a reference through + * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; - if (!scsi_host_get(shost)) { - scsi_host_put(shost); - return VPORT_INVAL; - } + lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); @@ -820,8 +809,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport) if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); - } else + } else { scsi_host_put(shost); + } lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 82e01dbe90af46daab1eff0f901d92a0026b0c08..7c0eaa9ea1edfb85402ba10227499ec88488f3f0 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1044,6 +1044,8 @@ static void handle_error(struct mesh_state *ms) while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); + if (ms->dma_started) + halt_dma(ms); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; @@ -1356,7 +1358,8 @@ static void halt_dma(struct mesh_state *ms) ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } - scsi_dma_unmap(cmd); + if (cmd) + scsi_dma_unmap(cmd); ms->dma_started = 0; } @@ -1711,6 +1714,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd) spin_lock_irqsave(ms->host->host_lock, flags); + if (ms->dma_started) + halt_dma(ms); + /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 5be4212312cb02401961e964c4b0e95634320c3b..5becdde3ea324b88367a4f1aea2b01770813dc31 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -794,7 +794,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) - return res; + goto ex_err; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; @@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task) pm8001_ha = pm8001_find_ha_by_dev(dev); device_id = pm8001_dev->device_id; phy_id = pm8001_dev->attached_phy; - rc = pm8001_find_tag(task, &tag); - if (rc == 0) { + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { pm8001_printk("no tag for task:%p\n", task); return TMF_RESP_FUNC_FAILED; } @@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task) /* 2. Send Phy Control Hard Reset */ reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; phy->reset_success = false; phy->enable_completion = &completion; phy->reset_completion = &completion_reset; ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); - if (ret) - goto out; - PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("Waiting for local phy ctl\n")); - wait_for_completion(&completion); - if (!phy->reset_success) + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; goto out; + } - /* 3. Wait for Port Reset complete / Port reset TMO */ + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Waiting for local phy ctl\n")); + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Waiting for Port reset\n")); - wait_for_completion(&completion_reset); - if (phy->port_reset_status) { - pm8001_dev_gone_notify(dev); - goto out; + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + goto out; + } } /* diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 751941a3ed30307d35c980b021ccb94bb905d6dd..aa451c8b49e569ad87bc9572d48707118eacb68d 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) break; } + if (!abrt_conn) + wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; + qedi_ep->state = EP_STATE_DISCONN_START; ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); if (ret) { diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a9dc9c4a6382615175a127d9df9507baefa8dd72..47835d26a973018f5377f0c4b4d5f46a49d9d3e4 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -262,8 +262,8 @@ struct name_list_extended { struct get_name_list_extended *l; dma_addr_t ldma; struct list_head fcports; - spinlock_t fcports_lock; u32 size; + u8 sent; }; /* * Timeout timer counts in seconds @@ -2351,7 +2351,7 @@ typedef struct fc_port { unsigned int login_succ:1; unsigned int query:1; unsigned int id_changed:1; - unsigned int rscn_rcvd:1; + unsigned int scan_needed:1; struct work_struct nvme_del_work; struct completion nvme_del_done; @@ -2375,11 +2375,13 @@ typedef struct fc_port { unsigned long expires; struct list_head del_list_entry; struct work_struct free_work; - + struct work_struct reg_work; + uint64_t jiffies_at_registration; struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; uint16_t tgt_id; uint16_t old_tgt_id; + uint16_t sec_since_registration; uint8_t fcp_prio; @@ -2412,6 +2414,7 @@ typedef struct fc_port { struct qla_tgt_sess *tgt_session; struct ct_sns_desc ct_desc; enum discovery_state disc_state; + enum discovery_state next_disc_state; enum login_state fw_login_state; unsigned long dm_login_expire; unsigned long plogi_nack_done_deadline; @@ -3222,7 +3225,6 @@ enum qla_work_type { QLA_EVT_GPDB, QLA_EVT_PRLI, QLA_EVT_GPSC, - QLA_EVT_UPD_FCPORT, QLA_EVT_GNL, QLA_EVT_NACK, QLA_EVT_RELOGIN, diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 178974896b5c71cb14d92ab1b9772ce3be2dada2..b8e4abe804d5d3df7fe9d069a59ff3e68aa9ea64 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); extern void qla2x00_quiesce_io(scsi_qla_host_t *); extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); - +void qla_register_fcport_fn(struct work_struct *); extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); @@ -109,6 +109,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*, int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); int qla24xx_detect_sfp(scsi_qla_host_t *vha); int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); + void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *, @@ -208,7 +209,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *); extern void qla2x00_sp_compl(void *, int); extern void qla2xxx_qpair_sp_free_dma(void *); extern void qla2xxx_qpair_sp_compl(void *, int); -extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); +extern void qla24xx_sched_upd_fcport(fc_port_t *); void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index f621cb55ccfb27196605f849d8a56cb2a4df0b6c..c3195d4c25e5c3251bd6f6afae5f1ade10473c25 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3973,7 +3973,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) continue; - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; fcport->scan_state = QLA_FCPORT_FOUND; found = true; /* @@ -4009,20 +4009,19 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; continue; } if (fcport->scan_state != QLA_FCPORT_FOUND) { - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID) { + if (fcport->flags & FCF_FCP2_DEVICE) + fcport->logout_on_delete = 0; - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0) { ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, @@ -4033,7 +4032,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } } } else { - if (fcport->rscn_rcvd || + if (fcport->scan_needed || fcport->disc_state != DSC_LOGIN_COMPLETE) { if (fcport->login_retry == 0) { fcport->login_retry = @@ -4043,7 +4042,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) fcport->port_name, fcport->loop_id, fcport->login_retry); } - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; qla24xx_fcport_handle_login(vha, fcport); } } @@ -4058,7 +4057,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if (recheck) { list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->rscn_rcvd) { + if (fcport->scan_needed) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); break; @@ -4261,12 +4260,13 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) sp->rc = res; rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT); - if (!rc) { + if (rc) { qla24xx_sp_unmap(vha, sp); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return; } + return; } if (cmd == GPN_FT_CMD) { @@ -4316,6 +4316,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); WARN_ON(1); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); goto done_free_sp; } @@ -4349,8 +4351,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->done = qla2x00_async_gpnft_gnnft_sp_done; rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) + if (rval != QLA_SUCCESS) { + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); goto done_free_sp; + } ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f45759b353bea7b11e324b47ca88df900831e7c8..2ebf4e4e02344d69ef7e00e2dc43d8898a26df2b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -800,6 +800,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) if (res == QLA_FUNCTION_TIMEOUT) return; + sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); ea.sp = sp; ea.rc = res; @@ -827,25 +828,24 @@ qla24xx_async_gnl_sp_done(void *s, int res) (loop_id & 0x7fff)); } - spin_lock_irqsave(&vha->gnl.fcports_lock, flags); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); INIT_LIST_HEAD(&h); fcport = tf = NULL; if (!list_empty(&vha->gnl.fcports)) list_splice_init(&vha->gnl.fcports, &h); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { list_del_init(&fcport->gnl_entry); - spin_lock(&vha->hw->tgt.sess_lock); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); - spin_unlock(&vha->hw->tgt.sess_lock); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; qla2x00_fcport_event_handler(vha, &ea); } - spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* create new fcport if fw has knowledge of new sessions */ for (i = 0; i < n; i++) { port_id_t id; @@ -878,6 +878,8 @@ qla24xx_async_gnl_sp_done(void *s, int res) } } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + vha->gnl.sent = 0; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp->free(sp); @@ -897,27 +899,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); - spin_lock_irqsave(&vha->gnl.fcports_lock, flags); - if (!list_empty(&fcport->gnl_entry)) { - spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); - rval = QLA_SUCCESS; - goto done; - } - - spin_lock(&vha->hw->tgt.sess_lock); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->flags |= FCF_ASYNC_SENT; fcport->disc_state = DSC_GNL; fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; - spin_unlock(&vha->hw->tgt.sess_lock); list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); - spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); + if (vha->gnl.sent) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + return QLA_SUCCESS; + } + vha->gnl.sent = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; - fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gnlist"; sp->gen1 = fcport->rscn_gen; @@ -1204,11 +1203,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) vha->fcport_count++; ea->fcport->login_succ = 1; - ql_dbg(ql_dbg_disc, vha, 0x20d6, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, ea->fcport->port_name, - vha->fcport_count); - qla24xx_post_upd_fcport_work(vha, ea->fcport); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + qla24xx_sched_upd_fcport(ea->fcport); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else if (ea->fcport->login_succ) { /* * We have an existing session. A late RSCN delivery @@ -1326,6 +1323,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u16 data[2]; u64 wwn; + u16 sec; ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n", @@ -1457,6 +1455,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) qla24xx_post_prli_work(vha, fcport); break; + case DSC_UPD_FCPORT: + sec = jiffies_to_msecs(jiffies - + fcport->jiffies_at_registration)/1000; + if (fcport->sec_since_registration < sec && sec && + !(sec % 60)) { + fcport->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC - Slow Rport registration(%d Sec)\n", + __func__, fcport->port_name, sec); + } + + if (fcport->next_disc_state != DSC_DELETE_PEND) + fcport->next_disc_state = DSC_ADISC; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + default: break; } @@ -1572,8 +1586,10 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_PORT_ADDR: fcport = qla2x00_find_fcport_by_nportid (vha, &ea->id, 1); - if (fcport) - fcport->rscn_rcvd = 1; + if (fcport) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags == 0) { @@ -4741,6 +4757,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); @@ -5221,13 +5238,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { - fcport->vha = vha; - if (IS_SW_RESV_ADDR(fcport->d_id)) return; + ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", + __func__, fcport->port_name); + + fcport->disc_state = DSC_UPD_FCPORT; + fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); - fcport->disc_state = DSC_LOGIN_COMPLETE; fcport->deleted = 0; fcport->logout_on_delete = 1; fcport->login_retry = vha->hw->login_retry_count; @@ -5289,6 +5308,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } qla2x00_set_fcport_state(fcport, FCS_ONLINE); + + fcport->disc_state = DSC_LOGIN_COMPLETE; +} + +void qla_register_fcport_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, reg_work); + u32 rscn_gen = fcport->rscn_gen; + u16 data[2]; + + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + qla2x00_update_fcport(fcport->vha, fcport); + + if (rscn_gen != fcport->rscn_gen) { + /* RSCN(s) came in while registration */ + switch (fcport->next_disc_state) { + case DSC_DELETE_PEND: + qlt_schedule_sess_for_deletion(fcport); + break; + case DSC_ADISC: + data[0] = data[1] = 0; + qla2x00_post_async_adisc_work(fcport->vha, fcport, + data); + break; + default: + break; + } + } } /* diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index ac5d2d34aeeaecc570eaf8e5117598965e069a7c..07c5d7397d425f202144a5fac1a056830d568bab 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -329,14 +329,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (time_after(jiffies, wait_time)) break; - /* - * Check if it's UNLOADING, cause we cannot poll in - * this case, or else a NULL pointer dereference - * is triggered. - */ - if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) - return QLA_FUNCTION_TIMEOUT; - /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 5590d6e8b57624404df0cb481ab134f2e3374a5d..3e2f8ce1d9a9712c2d36fc3ae24b4ea9db90d2cf 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -477,6 +477,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; + if (!priv) { + /* nvme association has been torn down */ + return rval; + } + fcport = qla_rport->fcport; vha = fcport->vha; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b56cf790587e52f317c0f6e073c026dc9148300a..83ef790afb5dfc3fab09563db19f2f7a2b68f10f 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1997,6 +1997,11 @@ qla2x00_iospace_config(struct qla_hw_data *ha) /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; + + /* Check if FW supports MQ or not */ + if (!(ha->fw_attributes & BIT_6)) + goto mqiobase_exit; + if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -2714,7 +2719,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work) struct scsi_qla_host, iocb_work); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - int i = 20; + int i = 2; unsigned long flags; if (test_bit(UNLOADING, &base_vha->dpc_flags)) @@ -4601,7 +4606,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); - spin_lock_init(&vha->gnl.fcports_lock); init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->vref_waitq); @@ -4787,16 +4791,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, return qla2x00_post_work(vha, e); } -int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) +void qla24xx_sched_upd_fcport(fc_port_t *fcport) { - struct qla_work_evt *e; + unsigned long flags; - e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); - if (!e) - return QLA_FUNCTION_FAILED; + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; - e->u.fcport.fcport = fcport; - return qla2x00_post_work(vha, e); + spin_lock_irqsave(&fcport->vha->work_lock, flags); + if (fcport->disc_state == DSC_UPD_FCPORT) { + spin_unlock_irqrestore(&fcport->vha->work_lock, flags); + return; + } + fcport->jiffies_at_registration = jiffies; + fcport->sec_since_registration = 0; + fcport->next_disc_state = DSC_DELETED; + fcport->disc_state = DSC_UPD_FCPORT; + spin_unlock_irqrestore(&fcport->vha->work_lock, flags); + + queue_work(system_unbound_wq, &fcport->reg_work); } static @@ -5052,9 +5065,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_GPSC: qla24xx_async_gpsc(vha, e->u.fcport.fcport); break; - case QLA_EVT_UPD_FCPORT: - qla2x00_update_fcport(vha, e->u.fcport.fcport); - break; case QLA_EVT_GNL: qla24xx_async_gnl(vha, e->u.fcport.fcport); break; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 95206e227730c1d36d433382f56cded6e920dc00..29b79e85fa7f2a94ff3cf4459f9754015cd3fdc5 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -600,14 +600,9 @@ void qla2x00_async_nack_sp_done(void *s, int res) sp->fcport->login_succ = 1; vha->fcport_count++; - - ql_dbg(ql_dbg_disc, vha, 0x20f3, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, - sp->fcport->port_name, - vha->fcport_count); - sp->fcport->disc_state = DSC_UPD_FCPORT; - qla24xx_post_upd_fcport_work(vha, sp->fcport); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + qla24xx_sched_upd_fcport(sp->fcport); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; sp->fcport->disc_state = DSC_LOGIN_COMPLETE; @@ -1227,11 +1222,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; unsigned long flags; + u16 sec; - if (sess->disc_state == DSC_DELETE_PEND) + switch (sess->disc_state) { + case DSC_DELETE_PEND: return; - - if (sess->disc_state == DSC_DELETED) { + case DSC_DELETED: if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); if (sess->vha->fcport_count == 0) @@ -1240,6 +1236,24 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) return; + break; + case DSC_UPD_FCPORT: + /* + * This port is not done reporting to upper layer. + * let it finish + */ + sess->next_disc_state = DSC_DELETE_PEND; + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration)/1000; + if (sess->sec_since_registration < sec && sec && !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s %8phC : Slow Rport registration(%d Sec)\n", + __func__, sess->port_name, sec); + } + return; + default: + break; } if (sess->deleted == QLA_SESS_DELETED) @@ -4749,6 +4763,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha, goto out; } + if (sess->disc_state == DSC_UPD_FCPORT) { + u16 sec; + + /* + * Remote port registration is still going on from + * previous login. Allow it to finish before we + * accept the new login. + */ + sess->next_disc_state = DSC_DELETE_PEND; + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration) / 1000; + if (sess->sec_since_registration < sec && sec && + !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC - Slow Rport registration (%d Sec)\n", + __func__, sess->port_name, sec); + } + + if (!conflict_sess) + kmem_cache_free(qla_tgt_plogi_cachep, pla); + + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); sess->d_id = port_id; sess->login_gen++; @@ -4908,6 +4948,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (sess != NULL) { bool delete = false; + int sec; spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); switch (sess->fw_login_state) { case DSC_LS_PLOGI_PEND: @@ -4920,9 +4961,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } switch (sess->disc_state) { + case DSC_UPD_FCPORT: + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, + flags); + + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration)/1000; + if (sess->sec_since_registration < sec && sec && + !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s %8phC : Slow Rport registration(%d Sec)\n", + __func__, sess->port_name, sec); + } + qlt_send_term_imm_notif(vha, iocb, 1); + return 0; + case DSC_LOGIN_PEND: case DSC_GPDB: - case DSC_UPD_FCPORT: case DSC_LOGIN_COMPLETE: case DSC_ADISC: delete = false; @@ -5959,10 +6015,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, case MODE_DUAL: if (newfcport) { if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { - ql_dbg(ql_dbg_disc, vha, 0x20fe, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, fcport->port_name, vha->fcport_count); - qla24xx_post_upd_fcport_work(vha, fcport); + qla24xx_sched_upd_fcport(fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ff, "%s %d %8phC post gpsc fcp_cnt %d\n", diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a1dbae806fdea847d88bcb69cbb534b683cd00bd..d2b045eb727424cf1132b637e028e2fb4b2fd053 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5384,6 +5384,12 @@ static int __init scsi_debug_init(void) pr_err("submit_queues must be 1 or more\n"); return -EINVAL; } + + if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { + pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); + return -EINVAL; + } + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), GFP_KERNEL); if (sdebug_q_arr == NULL) diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 04d095488c764726efd5da82bc09ba72457e4318..6983473011980de09299120a41c5ca175b5374b5 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3172,7 +3172,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index b382465a1afb6fc06d50e2259cb905fcc25c3228..be9f143a38d9ec5d5d12b9eac16f9dd519d74fbd 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -21,6 +21,7 @@ #define UFS_ANY_VENDOR 0xFFFF #define UFS_ANY_MODEL "ANY_MODEL" +#define UFS_VENDOR_MICRON 0x12C #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE #define UFS_VENDOR_SKHYNIX 0x1AD diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 35ef6d52a0efc477743c33e2543e6b03a1309b59..3c850b3170e3aa56aa13d87ae3e72070a3c98600 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -423,6 +423,8 @@ static inline bool ufshcd_is_valid_pm_lvl(int lvl) static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_ANY_VENDOR, UFS_ANY_MODEL, @@ -799,20 +801,20 @@ static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba, u8 opcode = 0; u8 cmd_id = 0, idn = 0; sector_t lba = 0; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = 0; - if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); } @@ -2189,6 +2191,7 @@ static void ufshcd_ungate_work(struct work_struct *work) int ufshcd_hold(struct ufs_hba *hba, bool async) { int rc = 0; + bool flush_result; unsigned long flags; if (!ufshcd_is_clkgating_allowed(hba)) @@ -2221,7 +2224,9 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) } spin_unlock_irqrestore(hba->host->host_lock, flags); - flush_work(&hba->clk_gating.ungate_work); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; spin_lock_irqsave(hba->host->host_lock, flags); if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) goto start; @@ -2232,7 +2237,9 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) * If the timer was active but the callback was not running * we have nothing to do, just change state and return. */ - if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) { + if ((hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) + && !(work_pending(&hba->clk_gating.gate_work)) + && !hba->clk_gating.gate_wk_in_process) { hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); @@ -2284,7 +2291,9 @@ static void ufshcd_gate_work(struct work_struct *work) clk_gating.gate_work); unsigned long flags; + hba->clk_gating.gate_wk_in_process = true; spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == CLKS_OFF) goto rel_lock; /* @@ -2360,6 +2369,7 @@ static void ufshcd_gate_work(struct work_struct *work) rel_lock: spin_unlock_irqrestore(hba->host->host_lock, flags); out: + hba->clk_gating.gate_wk_in_process = false; return; } @@ -3059,13 +3069,13 @@ int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_cond_add_cmd_trace(hba, task_tag, + hba->lrb[task_tag].cmd ? "scsi_send" : "dev_cmd_send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_cond_add_cmd_trace(hba, task_tag, - hba->lrb[task_tag].cmd ? "scsi_send" : "dev_cmd_send"); ufshcd_update_tag_stats(hba, task_tag); return 0; } @@ -4564,7 +4574,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, } /* Check whether we need temp memory */ - if (param_offset != 0) { + if (param_offset != 0 || param_size < buff_len) { desc_buf = kzalloc(buff_len, GFP_KERNEL); if (!desc_buf) return -ENOMEM; @@ -5681,9 +5691,10 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); */ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { - int i; + int i = 0; int err; bool flag_res = 1; + ktime_t timeout; err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, QUERY_FLAG_IDN_FDEVICEINIT, NULL); @@ -5694,10 +5705,30 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) goto out; } - /* poll for max. 1000 iterations for fDeviceInit flag to clear */ - for (i = 0; i < 1000 && !err && flag_res; i++) + /* + * Some vendor devices are taking longer time to complete its internal + * initialization, so set fDeviceInit flag poll time to 5 secs + */ + timeout = ktime_add_ms(ktime_get(), 5000); + + /* poll for max. 5sec for fDeviceInit flag to clear */ + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + if (err || !flag_res || timedout) + break; + + /* + * Poll for this flag in a tight loop for first 1000 iterations. + * This is same as old logic which is working for most of the + * devices, so continue using the same. + */ + if (i == 1000) + msleep(20); + else + i++; + } if (err) dev_err(hba->dev, @@ -7675,7 +7706,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status; + u32 intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; @@ -7691,7 +7722,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) * read, make sure we handle them by checking the interrupt status * again in a loop until we process all of the reqs before returning. */ - do { + while (intr_status && retries--) { enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) @@ -7702,7 +7733,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) } intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - } while (intr_status && --retries); + } if (retval == IRQ_NONE) { dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", @@ -8027,7 +8058,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) /* command completed already */ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", __func__, tag); - goto out; + goto cleanup; } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", @@ -8061,6 +8092,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto out; } +cleanup: scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); @@ -10534,13 +10566,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* enable the host irq as host controller would be active soon */ ufshcd_enable_irq(hba); - /* Pull up RST_n before device reset */ - if (ufshcd_is_link_off(hba)) { - ret = ufshcd_deassert_device_reset(hba); - if (ret) - goto disable_irq_and_vops_clks; - } - /* * Call vendor specific resume callback. As these callbacks may access * vendor specific host controller register space call them when the @@ -10548,7 +10573,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) */ ret = ufshcd_vops_resume(hba, pm_op); if (ret) - goto assert_device_reset; + goto disable_irq_and_vops_clks; if (ufshcd_is_link_hibern8(hba)) { ret = ufshcd_uic_hibern8_exit(hba); @@ -10639,9 +10664,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba->hibern8_on_idle.state = HIBERN8_ENTERED; vendor_suspend: ufshcd_vops_suspend(hba, pm_op); -assert_device_reset: - if (ufshcd_is_link_off(hba)) - ufshcd_assert_device_reset(hba); disable_irq_and_vops_clks: ufshcd_disable_irq(hba); if (hba->clk_scaling.is_allowed) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index a32091884dcc57884f74c00f9fbef298152f7d1c..ccdcfa4ed1acb34ddc2214851cddd3bf1d617be5 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -480,6 +480,7 @@ struct ufs_clk_gating { struct device_attribute delay_perf_attr; struct device_attribute enable_attr; bool is_enabled; + bool gate_wk_in_process; int active_reqs; struct workqueue_struct *clk_gating_workq; }; diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 74ce019939c46fc7bdee21a8a71cb3a8e74ff5e4..439df920838516ab5846c14734c87f35d6af220c 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -2270,6 +2270,9 @@ int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode) if (!dev) return -ENODEV; + if (test_bit(SKIP_QMI, &quirks)) + return 0; + if (test_bit(ICNSS_FW_DOWN, &penv->state) || !test_bit(ICNSS_FW_READY, &penv->state)) { icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n", @@ -2363,6 +2366,9 @@ int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config, enum icnss_driver_mode mode, const char *host_version) { + if (test_bit(SKIP_QMI, &quirks)) + return 0; + if (test_bit(ICNSS_FW_DOWN, &penv->state) || !test_bit(ICNSS_FW_READY, &penv->state)) { icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n", @@ -2382,6 +2388,9 @@ EXPORT_SYMBOL(icnss_wlan_enable); int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode) { + if (test_bit(SKIP_QMI, &quirks)) + return 0; + if (test_bit(ICNSS_FW_DOWN, &penv->state)) { icnss_pr_dbg("FW down, ignoring wlan_disable state: 0x%lx\n", penv->state); diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index a24323780645fd6e65df95691f38e11e82583b20..f8c997267cecf90fa6b1f42abc45f068f704a5a7 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include "main.h" #include "qmi.h" #include "debug.h" @@ -2329,7 +2331,6 @@ EXPORT_SYMBOL(icnss_set_fw_log_mode); int icnss_force_wake_request(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); - int count = 0; if (!dev) return -ENODEV; @@ -2339,9 +2340,9 @@ int icnss_force_wake_request(struct device *dev) return -EINVAL; } - if (atomic_read(&priv->soc_wake_ref_count)) { - count = atomic_inc_return(&priv->soc_wake_ref_count); - icnss_pr_dbg("SOC already awake, Ref count: %d", count); + if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) { + icnss_pr_dbg("SOC already awake, Ref count: %d", + atomic_read(&priv->soc_wake_ref_count)); return 0; } @@ -2757,6 +2758,8 @@ EXPORT_SYMBOL(icnss_idle_restart); int icnss_exit_power_save(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); + unsigned int value = 0; + int ret; icnss_pr_dbg("Calling Exit Power Save\n"); @@ -2764,8 +2767,18 @@ int icnss_exit_power_save(struct device *dev) !test_bit(ICNSS_MODE_ON, &priv->state)) return 0; - return wlfw_power_save_send_msg(priv, - (enum wlfw_power_save_mode_v01)ICNSS_POWER_SAVE_EXIT); + value |= priv->smp2p_info.seq++; + value <<= ICNSS_SMEM_SEQ_NO_POS; + value |= ICNSS_POWER_SAVE_EXIT; + ret = qcom_smem_state_update_bits( + priv->smp2p_info.smem_state, + ICNSS_SMEM_VALUE_MASK, + value); + if (ret) + icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); + + icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); + return ret; } EXPORT_SYMBOL(icnss_exit_power_save); @@ -3150,6 +3163,20 @@ static void icnss_init_control_params(struct icnss_priv *priv) } } +static inline void icnss_get_smp2p_info(struct icnss_priv *priv) +{ + + priv->smp2p_info.smem_state = + qcom_smem_state_get(&priv->pdev->dev, + "wlan-smp2p-out", + &priv->smp2p_info.smem_bit); + if (IS_ERR(priv->smp2p_info.smem_state)) { + icnss_pr_dbg("Failed to get smem state %d", + PTR_ERR(priv->smp2p_info.smem_state)); + } + +} + static inline void icnss_runtime_pm_init(struct icnss_priv *priv) { pm_runtime_get_sync(&priv->pdev->dev); @@ -3271,6 +3298,7 @@ static int icnss_probe(struct platform_device *pdev) icnss_runtime_pm_init(priv); icnss_get_cpr_info(priv); + icnss_get_smp2p_info(priv); set_bit(ICNSS_COLD_BOOT_CAL, &priv->state); } @@ -3340,6 +3368,7 @@ static int icnss_remove(struct platform_device *pdev) static int icnss_pm_suspend(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); + unsigned int value = 0; int ret = 0; if (priv->magic != ICNSS_MAGIC) { @@ -3351,6 +3380,7 @@ static int icnss_pm_suspend(struct device *dev) icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state); if (!priv->ops || !priv->ops->pm_suspend || + IS_ERR(priv->smp2p_info.smem_state) || !test_bit(ICNSS_DRIVER_PROBED, &priv->state)) return 0; @@ -3358,11 +3388,23 @@ static int icnss_pm_suspend(struct device *dev) if (ret == 0) { if (priv->device_id == WCN6750_DEVICE_ID) { - ret = wlfw_power_save_send_msg(priv, - (enum wlfw_power_save_mode_v01) - ICNSS_POWER_SAVE_ENTER); + if (test_bit(ICNSS_PD_RESTART, &priv->state) || + !test_bit(ICNSS_MODE_ON, &priv->state)) + return 0; + + value |= priv->smp2p_info.seq++; + value <<= ICNSS_SMEM_SEQ_NO_POS; + value |= ICNSS_POWER_SAVE_ENTER; + + ret = qcom_smem_state_update_bits( + priv->smp2p_info.smem_state, + ICNSS_SMEM_VALUE_MASK, + value); if (ret) - return priv->ops->pm_resume(dev); + icnss_pr_dbg("Error in SMP2P sent ret: %d\n", + ret); + + icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); } priv->stats.pm_suspend++; set_bit(ICNSS_PM_SUSPEND, &priv->state); @@ -3386,6 +3428,7 @@ static int icnss_pm_resume(struct device *dev) icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state); if (!priv->ops || !priv->ops->pm_resume || + IS_ERR(priv->smp2p_info.smem_state) || !test_bit(ICNSS_DRIVER_PROBED, &priv->state)) goto out; @@ -3462,6 +3505,7 @@ static int icnss_pm_resume_noirq(struct device *dev) static int icnss_pm_runtime_suspend(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); + unsigned int value = 0; int ret = 0; if (priv->magic != ICNSS_MAGIC) { @@ -3470,17 +3514,29 @@ static int icnss_pm_runtime_suspend(struct device *dev) return -EINVAL; } - if (!priv->ops || !priv->ops->runtime_suspend) + if (!priv->ops || !priv->ops->runtime_suspend || + IS_ERR(priv->smp2p_info.smem_state)) goto out; icnss_pr_vdbg("Runtime suspend\n"); ret = priv->ops->runtime_suspend(dev); if (!ret) { - ret = wlfw_power_save_send_msg(priv, - (enum wlfw_power_save_mode_v01) - ICNSS_POWER_SAVE_ENTER); + if (test_bit(ICNSS_PD_RESTART, &priv->state) || + !test_bit(ICNSS_MODE_ON, &priv->state)) + return 0; + + value |= priv->smp2p_info.seq++; + value <<= ICNSS_SMEM_SEQ_NO_POS; + value |= ICNSS_POWER_SAVE_ENTER; + + ret = qcom_smem_state_update_bits( + priv->smp2p_info.smem_state, + ICNSS_SMEM_VALUE_MASK, + value); if (ret) - return priv->ops->runtime_resume(dev); + icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); + + icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); } out: return ret; @@ -3497,7 +3553,8 @@ static int icnss_pm_runtime_resume(struct device *dev) return -EINVAL; } - if (!priv->ops || !priv->ops->runtime_resume) + if (!priv->ops || !priv->ops->runtime_resume || + IS_ERR(priv->smp2p_info.smem_state)) goto out; icnss_pr_vdbg("Runtime resume, state: 0x%lx\n", priv->state); diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index dc740b19ae91921f4bf92e6acdb96d89c666d20d..5be127da214d895feae17e1113078340f839e881 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -23,6 +23,9 @@ #define ADRASTEA_DEVICE_ID 0xabcd #define QMI_WLFW_MAX_NUM_MEM_SEG 32 #define THERMAL_NAME_LENGTH 20 +#define ICNSS_SMEM_VALUE_MASK 0xFFFFFFFF +#define ICNSS_SMEM_SEQ_NO_POS 16 + extern uint64_t dynamic_feature_mask; enum icnss_bdf_type { @@ -170,7 +173,7 @@ struct icnss_fw_mem { }; enum icnss_power_save_mode { - ICNSS_POWER_SAVE_ENTER, + ICNSS_POWER_SAVE_ENTER = 1, ICNSS_POWER_SAVE_EXIT, }; struct icnss_stats { @@ -311,6 +314,12 @@ struct icnss_thermal_cdev { struct thermal_cooling_device *tcdev; }; +struct smp2p_out_info { + unsigned short seq; + unsigned int smem_bit; + struct qcom_smem_state *smem_state; +}; + struct icnss_priv { uint32_t magic; struct platform_device *pdev; @@ -387,6 +396,7 @@ struct icnss_priv { struct mutex dev_lock; uint32_t fw_error_fatal_irq; uint32_t fw_early_crash_irq; + struct smp2p_out_info smp2p_info; struct completion unblock_shutdown; struct adc_tm_param vph_monitor_params; struct adc_tm_chip *adc_tm_dev; diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c index 9550acbe9f18ae2bd56c9cdaf8b0e771ca521438..7088d6b1fc4505f10c4cb88178ad8f7b756bc1ab 100644 --- a/drivers/soc/qcom/qbt_handler.c +++ b/drivers/soc/qcom/qbt_handler.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ #define DEBUG @@ -371,6 +371,8 @@ static long qbt_ioctl( { struct qbt_wuhb_connected_status wuhb_connected_status; + memset(&wuhb_connected_status, 0, + sizeof(wuhb_connected_status)); wuhb_connected_status.is_wuhb_connected = drvdata->is_wuhb_connected; rc = copy_to_user((void __user *)priv_arg, diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c index 1555c4f444757ac05895024eb86661772bbdef9b..5de35201c7dba41dcf92ffe38e340ea995ae0b9b 100644 --- a/drivers/soc/qcom/ramdump.c +++ b/drivers/soc/qcom/ramdump.c @@ -241,6 +241,8 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, if ((unsigned long)device_mem & 0x7) { bytes_before = 8 - ((unsigned long)device_mem & 0x7); + bytes_before = min_t(unsigned long, (unsigned long)copy_size, + bytes_before); memcpy_fromio(alignbuf, device_mem, bytes_before); device_mem += bytes_before; alignbuf += bytes_before; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 1aba268c05ea8801ac94a30f9cf34904c34f4353..bfdb51ddcbaba5d1b1a7fb9b647fe1fbb268b20d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -829,4 +829,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 1e8ff6256079f1344e5e6a1ceb2192d620bf0d5b..b8dd75b8518b5dbcbf7b75765789b67f9f9e464b 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -559,13 +559,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct fsl_espi *espi = context_data; - u32 events; + u32 events, mask; spin_lock(&espi->lock); /* Get interrupt events(tx/rx) */ events = fsl_espi_read_reg(espi, ESPI_SPIE); - if (!events) { + mask = fsl_espi_read_reg(espi, ESPI_SPIM); + if (!(events & mask)) { spin_unlock(&espi->lock); return IRQ_NONE; } diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c index d5976615d924b7b1d5c4a930aeca4765cf01ea47..dc740b5f720ba0cbc1919a55f361ecb0c499b8ea 100644 --- a/drivers/spi/spi-lantiq-ssc.c +++ b/drivers/spi/spi-lantiq-ssc.c @@ -187,6 +187,7 @@ struct lantiq_ssc_spi { unsigned int tx_fifo_size; unsigned int rx_fifo_size; unsigned int base_cs; + unsigned int fdx_tx_level; }; static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg) @@ -484,6 +485,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) u32 data; unsigned int tx_free = tx_fifo_free(spi); + spi->fdx_tx_level = 0; while (spi->tx_todo && tx_free) { switch (spi->bits_per_word) { case 2 ... 8: @@ -512,6 +514,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) lantiq_ssc_writel(spi, data, LTQ_SPI_TB); tx_free--; + spi->fdx_tx_level++; } } @@ -523,6 +526,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi) u32 data; unsigned int rx_fill = rx_fifo_level(spi); + /* + * Wait until all expected data to be shifted in. + * Otherwise, rx overrun may occur. + */ + while (rx_fill != spi->fdx_tx_level) + rx_fill = rx_fifo_level(spi); + while (rx_fill) { data = lantiq_ssc_readl(spi, LTQ_SPI_RB); diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index bed7403bb6b3a8b1e172dd5ddb286ad058ad8e83..b9a7117b6dce3b8262af4de10d166c0d6f6e316a 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -99,7 +99,7 @@ static struct spi_test spi_tests[] = { { .description = "tx/rx-transfer - crossing PAGE_SIZE", .fill_option = FILL_COUNT_8, - .iterate_len = { ITERATE_MAX_LEN }, + .iterate_len = { ITERATE_LEN }, .iterate_tx_align = ITERATE_ALIGN, .iterate_rx_align = ITERATE_ALIGN, .transfer_count = 1, diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index ad1e55d3d5d596e26b25ca8b49aa23fc63b3748d..391a20b3d2fda5085e3501e98f95a363e5ee277d 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -254,7 +254,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) { u32 div, mbrdiv; - div = DIV_ROUND_UP(spi->clk_rate, speed_hz); + /* Ensure spi->clk_rate is even */ + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f589d8100e957e14abc353366efefa32a57accb0..1fd529a2d2f6b961b056f131e01feb732fad7948 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -432,6 +432,12 @@ static LIST_HEAD(spi_controller_list); */ static DEFINE_MUTEX(board_lock); +/* + * Prevents addition of devices with same chip select and + * addition of devices below an unregistering controller. + */ +static DEFINE_MUTEX(spi_add_lock); + /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected @@ -510,7 +516,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -538,6 +543,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + if (ctlr->cs_gpios) spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; @@ -1104,8 +1116,6 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); - spi_res_release(ctlr, msg); - spi_finalize_current_message(ctlr); return ret; @@ -1363,6 +1373,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); + /* In the prepare_messages callback the spi bus has the opportunity to + * split a transfer to smaller chunks. + * Release splited transfers here since spi_map_msg is done on the + * splited transfers. + */ + spi_res_release(ctlr, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { @@ -2306,6 +2323,10 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(&spi_add_lock); + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ @@ -2326,6 +2347,9 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(&spi_add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c index aa3e69c496685e7c56542f823e8e107a575b3167..1991fbfe64d689a46419d82150f2f4e2222b6565 100644 --- a/drivers/spi/spi_qsd.c +++ b/drivers/spi/spi_qsd.c @@ -1704,28 +1704,82 @@ static int msm_spi_transfer_one(struct spi_master *master, return status_error; } -static int msm_spi_prepare_transfer_hardware(struct spi_master *master) +static int msm_spi_pm_get_sync(struct device *dev) { - struct msm_spi *dd = spi_master_get_devdata(master); - int resume_state = 0; - - resume_state = pm_runtime_get_sync(dd->dev); - if (resume_state < 0) - goto spi_finalize; + int ret; /* * Counter-part of system-suspend when runtime-pm is not enabled. * This way, resume can be left empty and device will be put in * active mode only if client requests anything on the bus */ - if (!pm_runtime_enabled(dd->dev)) - resume_state = msm_spi_pm_resume_runtime(dd->dev); + if (!pm_runtime_enabled(dev)) { + dev_info(dev, "%s: pm_runtime not enabled\n", __func__); + ret = msm_spi_pm_resume_runtime(dev); + } else { + ret = pm_runtime_get_sync(dev); + } + + return ret; +} + +static int msm_spi_pm_put_sync(struct device *dev) +{ + int ret = 0; + + if (!pm_runtime_enabled(dev)) { + dev_info(dev, "%s: pm_runtime not enabled\n", __func__); + ret = msm_spi_pm_suspend_runtime(dev); + } else { + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + } + + return ret; +} + +static int msm_spi_prepare_message(struct spi_master *master, + struct spi_message *spi_msg) +{ + struct msm_spi *dd = spi_master_get_devdata(master); + int resume_state; + + resume_state = msm_spi_pm_get_sync(dd->dev); if (resume_state < 0) - goto spi_finalize; - if (dd->suspended) { - resume_state = -EBUSY; - goto spi_finalize; + return resume_state; + + return 0; +} + +static int msm_spi_unprepare_message(struct spi_master *master, + struct spi_message *spi_msg) +{ + struct msm_spi *dd = spi_master_get_devdata(master); + int ret; + + ret = msm_spi_pm_put_sync(dd->dev); + if (ret < 0) + return ret; + + return 0; +} + +static int msm_spi_prepare_transfer_hardware(struct spi_master *master) +{ + struct msm_spi *dd = spi_master_get_devdata(master); + int resume_state; + + if (!dd->pdata->shared_ee) { + resume_state = msm_spi_pm_get_sync(dd->dev); + if (resume_state < 0) + goto spi_finalize; + + if (dd->suspended) { + resume_state = -EBUSY; + goto spi_finalize; + } } + return 0; spi_finalize: @@ -1736,9 +1790,14 @@ static int msm_spi_prepare_transfer_hardware(struct spi_master *master) static int msm_spi_unprepare_transfer_hardware(struct spi_master *master) { struct msm_spi *dd = spi_master_get_devdata(master); + int ret; + + if (!dd->pdata->shared_ee) { + ret = msm_spi_pm_put_sync(dd->dev); + if (ret < 0) + return ret; + } - pm_runtime_mark_last_busy(dd->dev); - pm_runtime_put_autosuspend(dd->dev); return 0; } @@ -2234,6 +2293,8 @@ static struct msm_spi_platform_data *msm_spi_dt_to_pdata( &pdata->rt_priority, DT_OPT, DT_BOOL, 0}, {"qcom,shared", &pdata->is_shared, DT_OPT, DT_BOOL, 0}, + {"qcom,shared_ee", + &pdata->shared_ee, DT_OPT, DT_BOOL, 0}, {NULL, NULL, 0, 0, 0}, }; @@ -2557,6 +2618,12 @@ static int msm_spi_probe(struct platform_device *pdev) goto err_probe_reqmem; } + /* This property is required for Dual EE use case of spi */ + if (dd->pdata->shared_ee) { + master->prepare_message = msm_spi_prepare_message; + master->unprepare_message = msm_spi_unprepare_message; + } + pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 167047760d79adec1f7ec5e2762f1ec27f491ab1..e444e7cc6968d24934bed2ab43a8fa4af33bb6a8 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -232,6 +232,11 @@ static int spidev_message(struct spidev_data *spidev, for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) { + /* Ensure that also following allocations from rx_buf/tx_buf will meet + * DMA alignment requirements. + */ + unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN); + k_tmp->len = u_tmp->len; total += k_tmp->len; @@ -247,17 +252,17 @@ static int spidev_message(struct spidev_data *spidev, if (u_tmp->rx_buf) { /* this transfer needs space in RX bounce buffer */ - rx_total += k_tmp->len; + rx_total += len_aligned; if (rx_total > bufsiz) { status = -EMSGSIZE; goto done; } k_tmp->rx_buf = rx_buf; - rx_buf += k_tmp->len; + rx_buf += len_aligned; } if (u_tmp->tx_buf) { /* this transfer needs space in TX bounce buffer */ - tx_total += k_tmp->len; + tx_total += len_aligned; if (tx_total > bufsiz) { status = -EMSGSIZE; goto done; @@ -267,7 +272,7 @@ static int spidev_message(struct spidev_data *spidev, (uintptr_t) u_tmp->tx_buf, u_tmp->len)) goto done; - tx_buf += k_tmp->len; + tx_buf += len_aligned; } k_tmp->cs_change = !!u_tmp->cs_change; @@ -297,16 +302,16 @@ static int spidev_message(struct spidev_data *spidev, goto done; /* copy any rx data out of bounce buffer */ - rx_buf = spidev->rx_buffer; - for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; + n; + n--, k_tmp++, u_tmp++) { if (u_tmp->rx_buf) { if (copy_to_user((u8 __user *) - (uintptr_t) u_tmp->rx_buf, rx_buf, + (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf, u_tmp->len)) { status = -EFAULT; goto done; } - rx_buf += u_tmp->len; } } status = total; diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index e3df4bf521b50edb008a481bab2973155ea1f897..a97bbd89fae278180e391ec84287b282da58148a 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex); static struct kmem_cache *ashmem_area_cachep __read_mostly; static struct kmem_cache *ashmem_range_cachep __read_mostly; +/* + * A separate lockdep class for the backing shmem inodes to resolve the lockdep + * warning about the race between kswapd taking fs_reclaim before inode_lock + * and write syscall taking inode_lock and then fs_reclaim. + * Note that such race is impossible because ashmem does not support write + * syscalls operating on the backing shmem. + */ +static struct lock_class_key backing_shmem_inode_class; + static inline unsigned long range_size(struct ashmem_range *range) { return range->pgend - range->pgstart + 1; @@ -395,6 +404,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) if (!asma->file) { char *name = ASHMEM_NAME_DEF; struct file *vmfile; + struct inode *inode; if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') name = asma->name; @@ -406,6 +416,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) goto out; } vmfile->f_mode |= FMODE_LSEEK; + inode = file_inode(vmfile); + lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class); asma->file = vmfile; /* * override mmap operation of the vmfile so that it can't be diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 65dc6c51037e30edf30b1ad7e0d6eea7c0390e86..7956abcbae22bfdd2294b1220d892763bd431b3a 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -667,6 +667,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev) if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; + if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) + return -EINVAL; + return 0; } diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index b71078339e860d8d6713f28ec20f2a31588d18a4..860247d718184575780b5fa73f4652e0ac1e2708 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0] & mask; connect = !!val; + ret = gb_pm_runtime_get_sync(bundle); + if (ret) + return ret; + + ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); + if (ret) + goto exit; + /* update ucontrol */ if (gbvalue.value.integer_value[0] != val) { for (wi = 0; wi < wlist->num_widgets; wi++) { @@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, gbvalue.value.integer_value[0] = cpu_to_le32(ucontrol->value.integer.value[0]); - ret = gb_pm_runtime_get_sync(bundle); - if (ret) - return ret; - ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id, GB_AUDIO_INVALID_INDEX, &gbvalue); - - gb_pm_runtime_put_autosuspend(bundle); - - if (ret) { - dev_err_ratelimited(codec->dev, - "%d:Error in %s for %s\n", ret, - __func__, kcontrol->id.name); - return ret; - } } - return 0; +exit: + gb_pm_runtime_put_autosuspend(bundle); + if (ret) + dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, + __func__, kcontrol->id.name); + return ret; } #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index 256039ce561e6c89f4cc4e123f6046ff59308c3e..81a3370551dbcb386763d803e6db052e3fa36232 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -678,7 +678,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) /* setup default format */ fmt_src.pad = priv->src_sd_pad; fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; - v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); + ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); if (ret) { v4l2_err(sd, "failed to get src_sd format\n"); goto unreg; diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 17b4b9257b49588aa505566656e356ce6805018e..0ddf41b5a734a0fd0b5428bea843662d3db88fc4 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -1535,21 +1535,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); - if (sub_skb) { - skb_reserve(sub_skb, 12); - skb_put_data(sub_skb, pdata, nSubframe_Length); - } else { - sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC); - if (sub_skb) { - sub_skb->data = pdata; - sub_skb->len = nSubframe_Length; - skb_set_tail_pointer(sub_skb, nSubframe_Length); - } else { - DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes); - break; - } + if (!sub_skb) { + DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes); + break; } + skb_reserve(sub_skb, 12); + skb_put_data(sub_skb, pdata, nSubframe_Length); + subframes[nr_subframes++] = sub_skb; if (nr_subframes >= MAX_SUBFRAME_COUNT) { diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 2066a1d9bc84e7240f508a33106678dd850b8902..87244a20897637acfe7196d2f9dc117290d15783 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -2484,7 +2484,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev) ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1)); if (ret < 0) return ret; - priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8; + priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8; } else priv->EEPROMTxPowerLevelCCK = 0x10; RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK); diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 65ad9773018ee6ffe76ed7685124798c02bc4230..7686805dfe0f2c9176cc2f7e4e7815339ff63e65 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -532,13 +532,8 @@ static void hfa384x_usb_defer(struct work_struct *data) */ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) { - memset(hw, 0, sizeof(*hw)); hw->usb = usb; - /* set up the endpoints */ - hw->endp_in = usb_rcvbulkpipe(usb, 1); - hw->endp_out = usb_sndbulkpipe(usb, 2); - /* Set up the waitq */ init_waitqueue_head(&hw->cmdq); diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 8d32b1603d10abd1630aad02b38588dcff82efd0..9eee72aff72335ec208469e2976d04b307449b29 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - const struct usb_endpoint_descriptor *epd; - const struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; - if (iface_desc->desc.bNumEndpoints != 2) { - result = -ENODEV; - goto failed; - } - - result = -EINVAL; - epd = &iface_desc->endpoint[1].desc; - if (!usb_endpoint_is_bulk_in(epd)) - goto failed; - epd = &iface_desc->endpoint[2].desc; - if (!usb_endpoint_is_bulk_out(epd)) + result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); + if (result) goto failed; dev = interface_to_usbdev(interface); @@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, } /* Initialize the hw data */ + hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); + hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); hfa384x_create(hw, dev); hw->wlandev = wlandev; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1633e26662687e6f41ff076f0150227fa4b06278..2602b57936d4bca1b52679532ebfa1a8211c6e10 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1381,14 +1381,27 @@ static u32 iscsit_do_crypto_hash_sg( sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + while (data_length) { - u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); + u32 cur_len = min_t(u32, data_length, sg->length); ahash_request_set_crypt(hash, sg, NULL, cur_len); crypto_ahash_update(hash); data_length -= cur_len; - page_off = 0; /* iscsit_map_iovec has already checked for invalid sg pointers */ sg = sg_next(sg); } diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index f25049ba4a85b13ac3c77e8cab88ed969de5feb2..db93bd0a9b886b725ab44e69ac6db2d42997ba7f 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1183,7 +1183,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) } void iscsi_target_login_sess_out(struct iscsi_conn *conn, - struct iscsi_np *np, bool zero_tsih, bool new_sess) + bool zero_tsih, bool new_sess) { if (!new_sess) goto old_sess_out; @@ -1201,7 +1201,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, conn->sess = NULL; old_sess_out: - iscsi_stop_login_thread_timer(np); /* * If login negotiation fails check if the Time2Retain timer * needs to be restarted. @@ -1441,8 +1440,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: + iscsi_stop_login_thread_timer(np); tpg_np = conn->tpg_np; - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; if (tpg) { diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3b8e3639ff5d01c6dbeb31ffb1201bb2453a822c..fc95e6150253f6ed744bb9ae955960b8bb1a3230 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); extern int iscsi_target_login_thread(void *); extern void iscsi_handle_login_thread_timeout(struct timer_list *t); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 8a5e8d17a942620d8f17bb519bbb4fffd369e494..5db8842a8026534942876bf372dc757d668d149e 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -554,12 +554,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) { - struct iscsi_np *np = login->np; bool zero_tsih = login->zero_tsih; iscsi_remove_failed_auth_entry(conn); iscsi_target_nego_release(conn); - iscsi_target_login_sess_out(conn, np, zero_tsih, true); + iscsi_target_login_sess_out(conn, zero_tsih, true); } struct conn_timeout { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8da89925a874df9740fec483c2d491715ea558df..99314e5162447368a342174e1b611d2da246bc39 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -612,7 +612,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) size = round_up(size+offset, PAGE_SIZE); while (size) { - flush_dcache_page(virt_to_page(start)); + flush_dcache_page(vmalloc_to_page(start)); start += PAGE_SIZE; size -= PAGE_SIZE; } @@ -687,8 +687,10 @@ static void scatter_data_area(struct tcmu_dev *udev, from = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } block_remaining = DATA_BLOCK_SIZE; dbi = tcmu_cmd_get_dbi(tcmu_cmd); @@ -733,7 +735,6 @@ static void scatter_data_area(struct tcmu_dev *udev, memcpy(to + offset, from + sg->length - sg_remaining, copy_bytes); - tcmu_flush_dcache_range(to, copy_bytes); } sg_remaining -= copy_bytes; @@ -742,8 +743,10 @@ static void scatter_data_area(struct tcmu_dev *udev, kunmap_atomic(from - sg->offset); } - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, @@ -789,13 +792,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = tcmu_cmd_get_dbi(cmd); page = tcmu_get_block_page(udev, dbi); from = kmap_atomic(page); + flush_dcache_page(page); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); if (read_len < copy_bytes) copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; - tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, copy_bytes); @@ -1018,7 +1021,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1083,7 +1086,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) cdb_off = CMDR_OFF + cmd_head + base_command_size; memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); entry->req.cdb_off = cdb_off; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, command_size); UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1231,7 +1234,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + /* + * Flush max. up to end of cmd ring since current entry might + * be a padding that is shorter than sizeof(*entry) + */ + size_t ring_left = head_to_end(udev->cmdr_last_cleaned, + udev->cmdr_size); + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? + ring_left : sizeof(*entry)); if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { UPDATE_HEAD(udev->cmdr_last_cleaned, diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 4dc30e7890f6c4c99eed6a8109ffd7aaf880d2a0..140386d7c75a335c26cbb80ff67aa4bfd070f114 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -505,8 +505,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); - if (IS_ERR(common->base)) - return PTR_ERR(common->base); + if (IS_ERR(common->base)) { + ret = PTR_ERR(common->base); + goto error_unregister; + } idle = 0; /* polling delay is not needed */ } diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c index c12211eaaac4dfd514cf1d908c5328133d9cc088..0b9f835d931f0fe95b8b1e9e235cc05d3d5dc11b 100644 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c @@ -46,20 +46,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { /* * Temperature values in milli degree celsius - * ADC code values from 530 to 923 + * ADC code values from 13 to 107, see TRM + * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, - 117000, 118000, 120000, 122000, 123000, + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, + 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h index b87c8659ec60874b628f2de8cf3234080c4cd0dc..8a081abce4b5fdd72fc5d6416f15c6de73307259 100644 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h @@ -67,9 +67,13 @@ * and thresholds for OMAP4430. */ -/* ADC conversion table limits */ -#define OMAP4430_ADC_START_VALUE 0 -#define OMAP4430_ADC_END_VALUE 127 +/* + * ADC conversion table limits. Ignore values outside the TRM listed + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter + * "18.4.10.2.3 ADC Codes Versus Temperature". + */ +#define OMAP4430_ADC_START_VALUE 13 +#define OMAP4430_ADC_END_VALUE 107 /* bandgap clock limits (no control on 4430) */ #define OMAP4430_MAX_FREQ 32768 #define OMAP4430_MIN_FREQ 32768 diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 452e034aedc13393d9bbb2f9d46fc3837d7d4442..343da0031299ff1d1094c777ffa7437e6458de09 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -183,7 +183,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, data = ti_bandgap_get_sensor_data(bgp, id); - if (!IS_ERR_OR_NULL(data)) + if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index b6b7ea6b5b82eeb7338b9cbb763dcf9dcca51648..10704522c3532108929ed2806bbcdbb48a6ee945 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -301,12 +301,16 @@ struct device *serdev_tty_port_register(struct tty_port *port, * be ignored. */ if (parent->bus == &platform_bus_type) { - char tty_port_name[7]; - - sprintf(tty_port_name, "%s%d", drv->name, idx); - if (pdev_tty_port && - !strcmp(pdev_tty_port, tty_port_name)) { - platform = true; + if (pdev_tty_port) { + unsigned long pdev_idx; + int tty_len = strlen(drv->name); + + if (!strncmp(pdev_tty_port, drv->name, tty_len)) { + if (!kstrtoul(pdev_tty_port + tty_len, 10, + &pdev_idx) && pdev_idx == idx) { + platform = true; + } + } } } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e1a5887b6d91d7ee7bdd52f6f86907098501d6c2..d2df7d71d6667e74b4ec01fcdfe874e432fcffe9 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -1062,8 +1062,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) serial8250_apply_quirks(uart); ret = uart_add_one_port(&serial8250_reg, &uart->port); - if (ret == 0) - ret = uart->port.line; + if (ret) + goto err; + + ret = uart->port.line; } else { dev_info(uart->port.dev, "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", @@ -1088,6 +1090,11 @@ int serial8250_register_8250_port(struct uart_8250_port *up) mutex_unlock(&serial_mutex); return ret; + +err: + uart->port.dev = NULL; + mutex_unlock(&serial_mutex); + return ret; } EXPORT_SYMBOL(serial8250_register_8250_port); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index d39162e71f59dc6a8c9d37d1c7c9535f61b690ee..195f58c5b477fb75a371a5bac8b2a211a18b7019 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -638,6 +638,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -708,9 +726,9 @@ static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x), EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358), EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358), - EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x), + EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8), EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2), EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4), diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index a019286f8bb65c5673285bdd5ad06c7b0a63aae8..cbd006fb7fbb986bde3407be6e2e02b8bb500e4f 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -781,7 +781,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); count = dma->rx_size - state.residue; - + if (count < dma->rx_size) + dmaengine_terminate_async(dma->rxchan); + if (!count) + goto unlock; ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += ret; @@ -843,7 +846,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) spin_unlock_irqrestore(&priv->rx_dma_lock, flags); __dma_rx_do_complete(p); - dmaengine_terminate_all(dma->rxchan); } static int omap_8250_rx_dma(struct uart_8250_port *p) @@ -1227,11 +1229,11 @@ static int omap8250_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_dma_lock); device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, -1); pm_runtime_irq_safe(&pdev->dev); - pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 02091782bc1e7cb1f6f19dc258a02ad78c4db84c..725e5842b8acc93a89ce805c2cc02694dde964ab 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5236,6 +5236,17 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch384_4 }, + /* + * Realtek RealManage + */ + { PCI_VENDOR_ID_REALTEK, 0x816a, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_REALTEK, 0x816b, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + /* Fintek PCI serial cards */ { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 }, { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 20b799219826e9bfd82366d2016a5edf3893c580..60ca19eca1f63941b63145f64ebc1106c7415e02 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1861,6 +1861,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) unsigned char status; unsigned long flags; struct uart_8250_port *up = up_to_u8250p(port); + bool skip_rx = false; if (iir & UART_IIR_NO_INT) return 0; @@ -1869,7 +1870,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) status = serial_port_in(port, UART_LSR); - if (status & (UART_LSR_DR | UART_LSR_BI)) { + /* + * If port is stopped and there are no error conditions in the + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer + * overflow. Not servicing, RX FIFO would trigger auto HW flow + * control when FIFO occupancy reaches preset threshold, thus + * halting RX. This only works when auto HW flow control is + * available. + */ + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && + !(port->read_status_mask & UART_LSR_DR)) + skip_rx = true; + + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); } @@ -2259,6 +2273,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already @@ -2268,8 +2286,6 @@ int serial8250_do_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&port->lock, flags); - if (up->port.irqflags & IRQF_SHARED) - disable_irq_nosync(port->irq); wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); @@ -2281,9 +2297,10 @@ int serial8250_do_startup(struct uart_port *port) iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + if (port->irqflags & IRQF_SHARED) enable_irq(port->irq); - spin_unlock_irqrestore(&port->lock, flags); /* * If the interrupt is not reasserted, or we otherwise diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 1d501154e9f7823e940b8a1ed7e3469ab477347f..45e4f29521430d3a4fbdb525b95723d5f4d54681 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2252,9 +2252,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_disable(uap->clk); } -static void __init -pl011_console_get_options(struct uart_amba_port *uap, int *baud, - int *parity, int *bits) +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) { if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; @@ -2287,7 +2286,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, } } -static int __init pl011_console_setup(struct console *co, char *options) +static int pl011_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; @@ -2355,8 +2354,8 @@ static int __init pl011_console_setup(struct console *co, char *options) * * Returns 0 if console matches; otherwise non-zero to use default matching */ -static int __init pl011_console_match(struct console *co, char *name, int idx, - char *options) +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) { unsigned char iotype; resource_size_t addr; @@ -2594,7 +2593,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, static int pl011_register_port(struct uart_amba_port *uap) { - int ret; + int ret, i; /* Ensure interrupts from this UART are masked and cleared */ pl011_write(0, uap, REG_IMSC); @@ -2605,6 +2604,9 @@ static int pl011_register_port(struct uart_amba_port *uap) if (ret < 0) { dev_err(uap->port.dev, "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; return ret; } } diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index ec7534cd49efed81d85a395c06da753d78541f25..deb2c0266baa5f4aab6aa4b59032b60883e807e9 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -114,9 +114,9 @@ #define WAIT_XFER_MAX_ITER (2) #define WAIT_XFER_MAX_TIMEOUT_US (10000) #define WAIT_XFER_MIN_TIMEOUT_US (9000) -#define IPC_LOG_PWR_PAGES (6) -#define IPC_LOG_MISC_PAGES (10) -#define IPC_LOG_TX_RX_PAGES (10) +#define IPC_LOG_PWR_PAGES (10) +#define IPC_LOG_MISC_PAGES (30) +#define IPC_LOG_TX_RX_PAGES (30) #define DATA_BYTES_PER_LINE (32) #define M_IRQ_BITS (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN |\ @@ -188,6 +188,8 @@ struct msm_geni_serial_port { void *ipc_log_pwr; void *ipc_log_misc; void *console_log; + void *ipc_log_single; + void *ipc_log_irqstatus; unsigned int cur_baud; int ioctl_count; int edge_count; @@ -568,6 +570,7 @@ static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd, unsigned long arg) { int ret = -ENOIOCTLCMD; + struct msm_geni_serial_port *port = GET_DEV_PORT(uport); switch (cmd) { case TIOCPMGET: { @@ -582,6 +585,16 @@ static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd, ret = !pm_runtime_status_suspended(uport->dev); break; } + case TIOCFAULT: { + geni_se_dump_dbg_regs(&port->serial_rsc, + uport->membase, port->ipc_log_misc); + port->ipc_log_rx = port->ipc_log_single; + port->ipc_log_tx = port->ipc_log_single; + port->ipc_log_misc = port->ipc_log_single; + port->ipc_log_pwr = port->ipc_log_single; + ret = 0; + break; + } default: break; } @@ -1700,6 +1713,40 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done, return 0; } +static void check_rx_buf(char *buf, struct uart_port *uport, int size) +{ + struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); + unsigned int rx_data; + bool fault = false; + + rx_data = *(u32 *)buf; + /* check for first 4 bytes of RX data for faulty zero pattern */ + if (rx_data == 0x0) { + if (size <= 4) { + fault = true; + } else { + /* + * check for last 4 bytes of data in RX buffer for + * faulty pattern + */ + if (memcmp(buf+(size-4), "\x0\x0\x0\x0", 4) == 0) + fault = true; + } + + if (fault) { + IPC_LOG_MSG(msm_port->ipc_log_rx, + "RX Invalid packet %s\n", __func__); + geni_se_dump_dbg_regs(&msm_port->serial_rsc, + uport->membase, msm_port->ipc_log_misc); + /* + * Add 2 msecs delay in order for dma rx transfer + * to be actually completed. + */ + udelay(2000); + } + } +} + static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) { struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); @@ -1728,6 +1775,10 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) __func__, rx_bytes); goto exit_handle_dma_rx; } + + /* Check RX buffer data for faulty pattern*/ + check_rx_buf((char *)msm_port->rx_buf, uport, rx_bytes); + if (drop_rx) goto exit_handle_dma_rx; @@ -1743,6 +1794,14 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) tty_flip_buffer_push(tport); dump_ipc(msm_port->ipc_log_rx, "DMA Rx", (char *)msm_port->rx_buf, 0, rx_bytes); + + /* + * DMA_DONE interrupt doesn't confirm that the DATA is copied to + * DDR memory, sometimes we are queuing the stale data from previous + * transfer to tty flip_buffer, adding memset to zero + * change to idenetify such scenario. + */ + memset(msm_port->rx_buf, 0, rx_bytes); exit_handle_dma_rx: return ret; @@ -1886,6 +1945,12 @@ static void msm_geni_serial_handle_isr(struct uart_port *uport, dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT); + if (m_irq_status || s_irq_status || + dma_tx_status || dma_rx_status) + IPC_LOG_MSG(msm_port->ipc_log_irqstatus, + "%s: sirq:0x%x mirq:0x%x dma_txirq:0x%x dma_rxirq:0x%x\n", + __func__, s_irq_status, m_irq_status, + dma_tx_status, dma_rx_status); if (dma_tx_status) { geni_write_reg_nolog(dma_tx_status, uport->membase, @@ -2837,7 +2902,7 @@ static void console_unregister(struct uart_driver *drv) static void msm_geni_serial_debug_init(struct uart_port *uport, bool console) { struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); - char name[30]; + char name[35]; msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL); if (IS_ERR_OR_NULL(msm_port->dbg)) @@ -2880,6 +2945,25 @@ static void msm_geni_serial_debug_init(struct uart_port *uport, bool console) if (!msm_port->ipc_log_misc) dev_info(uport->dev, "Err in Misc IPC Log\n"); } + /* New set of UART IPC log for RX Invalid case */ + memset(name, 0, sizeof(name)); + if (!msm_port->ipc_log_single) { + scnprintf(name, sizeof(name), "%s%s", + dev_name(uport->dev), "_single"); + msm_port->ipc_log_single = ipc_log_context_create( + IPC_LOG_MISC_PAGES, name, 0); + if (!msm_port->ipc_log_single) + dev_info(uport->dev, "Err in single IPC Log\n"); + } + memset(name, 0, sizeof(name)); + if (!msm_port->ipc_log_irqstatus) { + scnprintf(name, sizeof(name), "%s%s", + dev_name(uport->dev), "_irqstatus"); + msm_port->ipc_log_irqstatus = ipc_log_context_create( + IPC_LOG_MISC_PAGES, name, 0); + if (!msm_port->ipc_log_irqstatus) + dev_info(uport->dev, "Err in irqstatus IPC Log\n"); + } } else { memset(name, 0, sizeof(name)); if (!msm_port->console_log) { diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 0d405cc58e722fbcb72b0c726ba2d307feb3df6a..cd0768c3e773e3a7642c3f7c78cde2cb455dc618 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1050,7 +1050,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport) } #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE -static int __init qcom_geni_console_setup(struct console *co, char *options) +static int qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 2a49b6d876b87b0bdf23ce1f85bfb77bd44c0e02..1528a7ba2bf4df5ce3ef497fc24ad72b705c669f 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, struct s3c24xx_uart_info *info = ourport->info; struct clk *clk; unsigned long rate; - unsigned int cnt, baud, quot, clk_sel, best_quot = 0; + unsigned int cnt, baud, quot, best_quot = 0; char clkname[MAX_CLK_NAME_LENGTH]; int calc_deviation, deviation = (1 << 30) - 1; - clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : - ourport->info->def_clk_sel; for (cnt = 0; cnt < info->num_clks; cnt++) { - if (!(clk_sel & (1 << cnt))) + /* Keep selected clock if provided */ + if (ourport->cfg->clk_sel && + !(ourport->cfg->clk_sel & (1 << cnt))) continue; sprintf(clkname, "clk_uart_baud%d", cnt); @@ -1755,9 +1755,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret + 1; } - ret = platform_get_irq(platdev, 1); - if (ret > 0) - ourport->tx_irq = ret; + if (!s3c24xx_serial_has_interrupt_mask(port)) { + ret = platform_get_irq(platdev, 1); + if (ret > 0) + ourport->tx_irq = ret; + } /* * DMA is currently supported only on DT platforms, if DMA properties * are specified. diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 31950a38f0fb7bb5c4055b2f5fee9c9097854989..23f9b0cdff086e8d47deb3416be04ef3bf6f5cb7 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1236,6 +1236,7 @@ static int cdns_uart_console_setup(struct console *co, char *options) int bits = 8; int parity = 'n'; int flow = 'n'; + unsigned long time_out; if (!port->membase) { pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", @@ -1246,6 +1247,13 @@ static int cdns_uart_console_setup(struct console *co, char *options) if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); + /* Wait for tx_empty before setting up the console */ + time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT); + + while (time_before(jiffies, time_out) && + cdns_uart_tx_empty(port) != TIOCSER_TEMT) + cpu_relax(); + return uart_set_options(port, co, baud, parity, bits, flow); } diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 58b454c34560a76f098269644f6368049312a543..10a832a2135e2974b235e5dc0e2720cc7a3d704a 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -604,6 +604,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) port->index = vcc_table_add(port); if (port->index == -1) { pr_err("VCC: no more TTY indices left for allocation\n"); + rv = -ENOMEM; goto free_ldc; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 982d9684c65e3ef0c6908d60296f98687c2debfb..758f522f331e4401145184dd3631be48d9aaf06c 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1199,7 +1199,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned int user; - unsigned short *newscreen; + unsigned short *oldscreen, *newscreen; struct uni_screen *new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); @@ -1297,10 +1297,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - kfree(vc->vc_screenbuf); + oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); + kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 5de81431c8351c80d2b652b4d349684db52547fb..6a82030cf1efb950752cf45116d2dffc8b181906 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty, console_lock(); vcp = vc_cons[i].d; if (vcp) { + int ret; + int save_scan_lines = vcp->vc_scan_lines; + int save_font_height = vcp->vc_font.height; + if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) vcp->vc_font.height = v.v_clin; vcp->vc_resize_user = 1; - vc_resize(vcp, v.v_cols, v.v_rows); + ret = vc_resize(vcp, v.v_cols, v.v_rows); + if (ret) { + vcp->vc_scan_lines = save_scan_lines; + vcp->vc_font.height = save_font_height; + console_unlock(); + return ret; + } } console_unlock(); } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index ea7883e1fbe28a307adf233c59e2f279116bb7b2..41453bf6fc0bd025adac27a954ffe72adfe2f2e1 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb) if (current_size < expected_size) { /* notification is transmitted fragmented, reassemble */ if (acm->nb_size < expected_size) { - if (acm->nb_size) { - kfree(acm->notification_buffer); - acm->nb_size = 0; - } + u8 *new_buffer; alloc_size = roundup_pow_of_two(expected_size); - /* - * kmalloc ensures a valid notification_buffer after a - * use of kfree in case the previous allocation was too - * small. Final freeing is done on disconnect. - */ - acm->notification_buffer = - kmalloc(alloc_size, GFP_ATOMIC); - if (!acm->notification_buffer) + /* Final freeing is done on disconnect. */ + new_buffer = krealloc(acm->notification_buffer, + alloc_size, GFP_ATOMIC); + if (!new_buffer) { + acm->nb_index = 0; goto exit; + } + + acm->notification_buffer = new_buffer; acm->nb_size = alloc_size; + dr = (struct usb_cdc_notification *)acm->notification_buffer; } copy_size = min(current_size, diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 419804c9c974b7b953b45f0e18c31c5ac48d66c9..db36a796af8c316b3ef5eddc6c662c4b043dc1ac 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -827,6 +827,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo if (rv < 0) return rv; + if (!usblp->present) { + count = -ENODEV; + goto done; + } + if ((avail = usblp->rstatus) < 0) { printk(KERN_ERR "usblp%d: error %d reading from printer\n", usblp->minor, (int)avail); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 877aa2b00c7f6726e71868c708705740bcef3d2e..ffba55db10dd11699fe5e924944683d3a497bc69 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1208,6 +1208,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, } } +/* + * usb_disable_device_endpoints -- Disable all endpoints for a device + * @dev: the device whose endpoints are being disabled + * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. + */ +static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) +{ + struct usb_hcd *hcd = bus_to_hcd(dev->bus); + int i; + + if (hcd->driver->check_bandwidth) { + /* First pass: Cancel URBs, leave endpoint pointers intact. */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, false); + usb_disable_endpoint(dev, i + USB_DIR_IN, false); + } + /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); + } + /* Second pass: remove endpoint pointers */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, true); + usb_disable_endpoint(dev, i + USB_DIR_IN, true); + } +} + /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled @@ -1221,7 +1249,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; - struct usb_hcd *hcd = bus_to_hcd(dev->bus); /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) @@ -1267,22 +1294,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); - if (hcd->driver->check_bandwidth) { - /* First pass: Cancel URBs, leave endpoint pointers intact. */ - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, false); - usb_disable_endpoint(dev, i + USB_DIR_IN, false); - } - /* Remove endpoints from the host controller internal state */ - mutex_lock(hcd->bandwidth_mutex); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - /* Second pass: remove endpoint pointers */ - } - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + + usb_disable_device_endpoints(dev, skip_ep0); } /** @@ -1694,6 +1707,9 @@ EXPORT_SYMBOL(usb_set_interface_timeout); * The caller must own the device lock. * * Return: Zero on success, else a negative error code. + * + * If this routine fails the device will probably be in an unusable state + * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { @@ -1709,10 +1725,7 @@ int usb_reset_configuration(struct usb_device *dev) * calls during probe() are fine */ - for (i = 1; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; @@ -1725,34 +1738,10 @@ int usb_reset_configuration(struct usb_device *dev) mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } - /* Make sure we have enough bandwidth for each alternate setting 0 */ - for (i = 0; i < config->desc.bNumInterfaces; i++) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - retval = usb_hcd_alloc_bandwidth(dev, NULL, - intf->cur_altsetting, alt); - if (retval < 0) - break; - } - /* If not, reinstate the old alternate settings */ + /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ + retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { -reset_old_alts: - for (i--; i >= 0; i--) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - usb_hcd_alloc_bandwidth(dev, NULL, - alt, intf->cur_altsetting); - } usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; @@ -1761,8 +1750,12 @@ int usb_reset_configuration(struct usb_device *dev) USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (retval < 0) - goto reset_old_alts; + if (retval < 0) { + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; + } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e0b77674869ce6de9ec0a45f7982f7a460104b7b..4ee8105310989e96082dd9050a42535c82f942fc 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -25,17 +25,23 @@ static unsigned int quirk_count; static char quirks_param[128]; -static int quirks_param_set(const char *val, const struct kernel_param *kp) +static int quirks_param_set(const char *value, const struct kernel_param *kp) { - char *p, *field; + char *val, *p, *field; u16 vid, pid; u32 flags; size_t i; int err; + val = kstrdup(value, GFP_KERNEL); + if (!val) + return -ENOMEM; + err = param_set_copystring(val, kp); - if (err) + if (err) { + kfree(val); return err; + } mutex_lock(&quirk_mutex); @@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) if (!quirk_list) { quirk_count = 0; mutex_unlock(&quirk_mutex); + kfree(val); return -ENOMEM; } - for (i = 0, p = (char *)val; p && *p;) { + for (i = 0, p = val; p && *p;) { /* Each entry consists of VID:PID:flags */ field = strsep(&p, ":"); if (!field) @@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) unlock: mutex_unlock(&quirk_mutex); + kfree(val); return 0; } @@ -362,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Sound Devices MixPre-D */ + { USB_DEVICE(0x0926, 0x0208), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -385,6 +397,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Generic RTL8153 based ethernet adapters */ { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + /* SONiX USB DEVICE Touchpad */ + { USB_DEVICE(0x0c45, 0x7056), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -457,6 +473,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, + /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, @@ -501,6 +519,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { */ static const struct usb_device_id usb_endpoint_blacklist[] = { { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 }, { } }; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 7e88fdfe3cf5c7c23ba7e9878904a8e72db7686c..b93b18ba89df691820b0e88faeda4c1d533e49b0 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -888,7 +888,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, size_t srclen, n; int cfgno; void *src; + int retval; + retval = usb_lock_device_interruptible(udev); + if (retval < 0) + return -EINTR; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). @@ -913,6 +917,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, off -= srclen; } } + usb_unlock_device(udev); return count - nleft; } diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index c35c93f16a49acdc419a3b94a62cecdac55f143d..a9e86f5e6eaa3fb74a236b426160a2201aec1559 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -499,6 +499,7 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->gadget_enabled) { retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); if (retval) { + hsotg->gadget.udc = NULL; dwc2_hsotg_remove(hsotg); goto error; } @@ -507,7 +508,8 @@ static int dwc2_driver_probe(struct platform_device *dev) return 0; error: - dwc2_lowlevel_hw_disable(hsotg); + if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) + dwc2_lowlevel_hw_disable(hsotg); return retval; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f820795599b246c8315d214e59c2320e8607813d..46c7d1450a3429dbfb81440f748e7cfc45cc483e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -373,7 +373,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; - u32 timeout = 3000; + u32 timeout = 5000; u32 saved_config = 0; u32 reg; @@ -812,7 +812,7 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) dbg_log_string("START for %s(%d)", dep->name, dep->number); dwc3_stop_active_transfer(dwc, dep->number, true); - if (dep->number == 1 && dwc->ep0state != EP0_SETUP_PHASE) { + if (dep->number == 0 && dwc->ep0state != EP0_SETUP_PHASE) { unsigned int dir; dbg_log_string("CTRLPEND(%d)", dwc->ep0state); @@ -1243,26 +1243,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, * dwc3_prepare_one_trb - setup one TRB from one request * @dep: endpoint for which this request is prepared * @req: dwc3_request pointer + * @trb_length: buffer size of the TRB * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, - struct dwc3_request *req, unsigned chain, unsigned node) + struct dwc3_request *req, unsigned int trb_length, + unsigned chain, unsigned node) { struct dwc3_trb *trb; - unsigned int length; dma_addr_t dma; unsigned stream_id = req->request.stream_id; unsigned short_not_ok = req->request.short_not_ok; unsigned no_interrupt = req->request.no_interrupt; - if (req->request.num_sgs > 0) { - length = sg_dma_len(req->start_sg); + if (req->request.num_sgs > 0) dma = sg_dma_address(req->start_sg); - } else { - length = req->request.length; + else dma = req->request.dma; - } trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1274,7 +1272,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, + __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, stream_id, short_not_ok, no_interrupt); } @@ -1284,16 +1282,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct scatterlist *sg = req->start_sg; struct scatterlist *s; int i; - + unsigned int length = req->request.length; unsigned int remaining = req->request.num_mapped_sgs - req->num_queued_sgs; + /* + * If we resume preparing the request, then get the remaining length of + * the request and resume where we left off. + */ + for_each_sg(req->request.sg, s, req->num_queued_sgs, i) + length -= sg_dma_len(s); + for_each_sg(sg, s, remaining, i) { - unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; + unsigned int trb_length; unsigned chain = true; + trb_length = min_t(unsigned int, length, sg_dma_len(s)); + + length -= trb_length; + /* * IOMMU driver is coalescing the list of sgs which shares a * page boundary into one and giving it to USB driver. With @@ -1301,7 +1310,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * sgs passed. So mark the chain bit to false if it isthe last * mapped sg. */ - if (i == remaining - 1) + if ((i == remaining - 1) || !length) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1311,7 +1320,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, i); + dwc3_prepare_one_trb(dep, req, trb_length, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1321,8 +1330,37 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + !rem && !chain) { + struct dwc3 *dwc = dep->dwc; + struct dwc3_trb *trb; + + req->needs_extra_trb = true; + + /* Prepare normal TRB */ + dwc3_prepare_one_trb(dep, req, trb_length, true, i); + + /* Prepare one extra TRB to handle ZLP */ + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, + !req->direction, 1, + req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { - dwc3_prepare_one_trb(dep, req, chain, i); + dwc3_prepare_one_trb(dep, req, trb_length, chain, i); } /* @@ -1337,6 +1375,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->num_queued_sgs++; + /* + * The number of pending SG entries may not correspond to the + * number of mapped SG entries. If all the data are queued, then + * don't include unused SG entries. + */ + if (length == 0) { + req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs; + break; + } + if (!dwc3_calc_trbs_left(dep)) break; } @@ -1356,7 +1404,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1366,6 +1414,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt); } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && (IS_ALIGNED(req->request.length, maxp))) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; @@ -1373,17 +1422,27 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); - /* Now prepare one extra TRB to handle ZLP */ + /* Prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - false, 1, req->request.stream_id, + !req->direction, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment for OUT */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { - dwc3_prepare_one_trb(dep, req, false, 0); + dwc3_prepare_one_trb(dep, req, length, false, 0); } } @@ -3035,8 +3094,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, status); if (req->needs_extra_trb) { + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + + /* Reclaim MPS padding TRB for ZLP */ + if (!req->direction && req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (IS_ALIGNED(req->request.length, maxp))) + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + req->needs_extra_trb = false; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 2200c8f9f33371031c53029e53d10d663ff5111a..e7a4c3b67d030df2d7d039e69778d9863e151fe0 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1184,9 +1184,11 @@ static int ncm_unwrap_ntb(struct gether *port, int ndp_index; unsigned dg_len, dg_len2; unsigned ndp_len; + unsigned block_len; struct sk_buff *skb2; int ret = -EINVAL; - unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; @@ -1208,8 +1210,9 @@ static int ncm_unwrap_ntb(struct gether *port, } tmp++; /* skip wSequence */ + block_len = get_ncm(&tmp, opts->block_length); /* (d)wBlockLength */ - if (get_ncm(&tmp, opts->block_length) > max_size) { + if (block_len > ntb_max) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } @@ -1218,15 +1221,23 @@ static int ncm_unwrap_ntb(struct gether *port, /* Run through all the NDP's in the NTB */ do { - /* NCM 3.2 */ - if (((ndp_index % 4) != 0) && - (ndp_index < opts->nth_size)) { + /* + * NCM 3.2 + * dwNdpIndex + */ + if (((ndp_index % 4) != 0) || + (ndp_index < opts->nth_size) || + (ndp_index > (block_len - + opts->ndp_size))) { INFO(port->func.config->cdev, "Bad index: %#X\n", ndp_index); goto err; } - /* walk through NDP */ + /* + * walk through NDP + * dwSignature + */ tmp = (void *)(skb->data + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); @@ -1237,14 +1248,15 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 + * wLength * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry * Each entry is a dgram index and a dgram length. */ if ((ndp_len < opts->ndp_size - + 2 * 2 * (opts->dgram_item_len * 2)) - || (ndp_len % opts->ndplen_align != 0)) { + + 2 * 2 * (opts->dgram_item_len * 2)) || + (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %#X\n", ndp_len); goto err; @@ -1261,8 +1273,21 @@ static int ncm_unwrap_ntb(struct gether *port, do { index = index2; + /* wDatagramIndex[0] */ + if ((index < opts->nth_size) || + (index > block_len - opts->dpe_size)) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index); + goto err; + } + dg_len = dg_len2; - if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ + /* + * wDatagramLength[0] + * ethernet hdr + crc or larger than max frame size + */ + if ((dg_len < 14 + crc_len) || + (dg_len > frame_max)) { INFO(port->func.config->cdev, "Bad dgram length: %#X\n", dg_len); goto err; @@ -1286,6 +1311,13 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + /* wDatagramIndex[1] */ + if (index2 > block_len - opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1302,7 +1334,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; - if (index2 == 0 || dg_len2 == 0) break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 106988a6661ab404743b121e3cdf2fa42c49eed3..785826ab5348ed7560d2f434cf9294543c218966 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) goto err_sts; return 0; + err_sts: - usb_ep_free_request(fu->ep_status, stream->req_status); - stream->req_status = NULL; -err_out: usb_ep_free_request(fu->ep_out, stream->req_out); stream->req_out = NULL; +err_out: + usb_ep_free_request(fu->ep_in, stream->req_in); + stream->req_in = NULL; out: return -ENOMEM; } diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 09f90447fed5d4a998ac650de2d32db9245fb412..cfa64265533759145e247cc2ada703d5fda55774 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -14,6 +14,7 @@ #define __U_F_H__ #include +#include /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 @@ -21,21 +22,36 @@ #define vla_item(groupname, type, name, n) \ size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = (n) * sizeof(type); \ - groupname##__next = offset + size; \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + size_t size = array_size(n, sizeof(type)); \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, size, \ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ offset; \ }) #define vla_item_with_sz(groupname, type, name, n) \ - size_t groupname##_##name##__sz = (n) * sizeof(type); \ - size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = groupname##_##name##__sz; \ - groupname##__next = offset + size; \ - offset; \ + size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ + size_t groupname##_##name##__offset = ({ \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, groupname##_##name##__sz,\ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ + offset; \ }) #define vla_ptr(ptr, groupname, name) \ diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 01b44e15962378d02e260ee37872ded457bc763d..e174b1b889da5d68f819f904c8d18352eab5e2b7 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -283,6 +283,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) * in that case reinit is passed as 1 */ if (reinit) { + int i; /* Enable interrupts */ temp = bdc_readl(bdc->regs, BDC_BDCSC); temp |= BDC_GIE; @@ -292,6 +293,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) /* Initialize SRR to 0 */ memset(bdc->srr.sr_bds, 0, NUM_SR_ENTRIES * sizeof(struct bdc_bd)); + /* clear ep flags to avoid post disconnect stops/deconfigs */ + for (i = 1; i < bdc->num_eps; ++i) + bdc->bdc_ep_array[i]->flags = 0; } else { /* One time initiaization only */ /* Enable status report function pointers */ @@ -604,9 +608,14 @@ static int bdc_remove(struct platform_device *pdev) static int bdc_suspend(struct device *dev) { struct bdc *bdc = dev_get_drvdata(dev); + int ret; - clk_disable_unprepare(bdc->clk); - return 0; + /* Halt the controller */ + ret = bdc_stop(bdc); + if (!ret) + clk_disable_unprepare(bdc->clk); + + return ret; } static int bdc_resume(struct device *dev) diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d49c6dc1082dc980b47620a63fb28e35f5780c67..9ddc0b4e92c9c94235cb24a5e5d90a6e7682a96d 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep) } bdc_dbg_bd_list(bdc, ep); /* only for ep0: config ep is called for ep0 from connect event */ - ep->flags |= BDC_EP_ENABLED; if (ep->ep_num == 1) return ret; @@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) __func__, ep->name, start_bdi, end_bdi); dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", ep, (void *)ep->usb_ep.desc); - /* Stop the ep to see where the HW is ? */ - ret = bdc_stop_ep(bdc, ep->ep_num); - /* if there is an issue with stopping ep, then no need to go further */ - if (ret) + /* if still connected, stop the ep to see where the HW is ? */ + if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) { + ret = bdc_stop_ep(bdc, ep->ep_num); + /* if there is an issue, then no need to go further */ + if (ret) + return 0; + } else return 0; /* @@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep) __func__, ep->name, ep->flags); if (!(ep->flags & BDC_EP_ENABLED)) { - dev_warn(bdc->dev, "%s is already disabled\n", ep->name); + if (bdc->gadget.speed != USB_SPEED_UNKNOWN) + dev_warn(bdc->dev, "%s is already disabled\n", + ep->name); return 0; } spin_lock_irqsave(&bdc->lock, flags); diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index ee872cad5270552cf283a50c52f7307c8a5b3f6d..a87caad8d1c7e3ebeb1360b1b0105fec229c4081 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; done: - if (dev) + if (dev) { net2280_remove(pdev); + kfree(dev); + } return retval; } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 8608ac513fb764e4c8069cf870ecd1e12e068d1d..caf9f6b1cd3464206ce14bf707e9d2082b95b409 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ce0eaf7d7c12a11f1fd9fb0c76d7275613669bfb..087402aec5cbeb43ab851364d6a99fff96a2b53a 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -14,7 +14,6 @@ */ /*-------------------------------------------------------------------------*/ -#include #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index de764459e05a6d30ccb9ba85e51532b009d33ebb..9d93e7441bbca0f0d81553553298923cb9c212e6 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -192,12 +192,10 @@ static int mv_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(r); hcd->regs = ehci_mv->op_regs; - hcd->irq = platform_get_irq(pdev, 0); - if (!hcd->irq) { - dev_err(&pdev->dev, "Cannot get irq."); - retval = -ENODEV; + retval = platform_get_irq(pdev, 0); + if (retval < 0) goto err_disable_clk; - } + hcd->irq = retval; ehci = hcd_to_ehci(hcd); ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs; diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index c0c4dcca6f3cbb12a607325b4ef8773704ce6ff3..a4a88b6de3c48c143abba910304d3cb078b24f04 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -156,9 +156,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 76c3f29562d2b9d2e3263f3d3dc1b45b92728140..448d7b11dec4cab8987d4fe2999af8e65e2f3249 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) static int xhci_endpoint_context_show(struct seq_file *s, void *unused) { - int dci; + int ep_index; dma_addr_t dma; struct xhci_hcd *xhci; struct xhci_ep_ctx *ep_ctx; @@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); - for (dci = 1; dci < 32; dci++) { - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); - dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); + for (ep_index = 0; ep_index < 31; ep_index++) { + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), le32_to_cpu(ep_ctx->ep_info2), diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0af3c48453bdc0544a05ddfdefdbef4bae1a9210..64318a8935e1667593f143449dc72f5b17388ed9 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -736,15 +736,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, { u32 pls = status_reg & PORT_PLS_MASK; - /* resume state is a xHCI internal state. - * Do not report it to usb core, instead, pretend to be U3, - * thus usb core knows it's not ready for transfer - */ - if (pls == XDEV_RESUME) { - *status |= USB_SS_PORT_LS_U3; - return; - } - /* When the CAS bit is set then warm reset * should be performed on port */ @@ -766,6 +757,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, */ pls |= USB_PORT_STAT_CONNECTION; } else { + /* + * Resume state is an xHCI internal state. Do not report it to + * usb core, instead, pretend to be U3, thus usb core knows + * it's not ready for transfer. + */ + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; + return; + } + /* * If CAS bit isn't set but the Port is already at * Compliance Mode, fake a connection so the USB core diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 9b02e3e3f998dc4d9e8fced2c6610f8e2ee6e31c..1a6a23e57201d6f4d524e2e1dd35e48b29c8d239 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -47,7 +47,10 @@ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 +#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 +#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 static const char hcd_name[] = "xhci_hcd"; @@ -226,13 +229,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1042) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1142) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x2142) + (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 81dfc88007337324ad4af959ea1076f62493b4c1..299ff2d3b429d289ff094e427b9ffbf78c522fdc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3189,10 +3189,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, wait_for_completion(cfg_cmd->completion); - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; xhci_free_command(xhci, cfg_cmd); cleanup: xhci_free_command(xhci, stop_cmd); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 92875a264b14a4885897d9f585708b0c9cd99650..9c1ca20d4139d83f4e2ac99529a3dc8a91f32989 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -2,8 +2,9 @@ /* * Native support for the I/O-Warrior USB devices * - * Copyright (c) 2003-2005 Code Mercenaries GmbH - * written by Christian Lucht + * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH + * written by Christian Lucht and + * Christoph Jung * * based on @@ -817,14 +818,28 @@ static int iowarrior_probe(struct usb_interface *interface, /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); - if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && - ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100))) - /* IOWarrior56 has wMaxPacketSize different from report size */ - dev->report_size = 7; + + /* + * Some devices need the report size to be different than the + * endpoint size. + */ + if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { + switch (dev->product_id) { + case USB_DEVICE_ID_CODEMERCS_IOW56: + case USB_DEVICE_ID_CODEMERCS_IOW56AM: + dev->report_size = 7; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW28: + case USB_DEVICE_ID_CODEMERCS_IOW28L: + dev->report_size = 4; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW100: + dev->report_size = 13; + break; + } + } /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 10c2a71297cb60dc58a3f580a3cf45f9a374133f..377dae0f9947599a92cc9d75c19a7b41929f8ceb 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -436,7 +436,7 @@ static int lvs_rh_probe(struct usb_interface *intf, USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT); if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) { dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret); - return ret; + return ret < 0 ? ret : -EINVAL; } /* submit urb to poll interrupt endpoint */ diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 6376be1f5fd22585821bd9f8cc8a227518ed1f5e..4877bf82ad395dc4038d5f0bdef3d5f7ed5ff1f5 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, u8 swap8, fromkern = kernbuffer ? 1 : 0; u16 swap16; u32 swap32, flag = (length >> 28) & 1; - char buf[4]; + u8 buf[4]; /* if neither kernbuffer not userbuffer are given, assume * data in obuf diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index be0505b8b5d4e5515996faf25ef7485e978c9248..785080f79073823f90dee58b3ce942c5bbe253b2 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, dev->cntl_buffer[0]); - retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL); + retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); if (retval >= 0) timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 86069352013262d04b3ca4e884c152cfdf238906..408e964522ab931fc1fe9057fd65e5211152b0e2 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -128,8 +128,12 @@ static void mtu3_device_disable(struct mtu3 *mtu) mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); - if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) { mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + if (mtu->is_u3_ip) + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_DUAL_MODE); + } mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7ae121567098c9442350a9e140322754b3f6a18b..46ec30a2c51683bcead0a10adc52b19b7fc937a4 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -271,6 +271,8 @@ static struct usb_serial_driver cp210x_device = { .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, .tx_empty = cp210x_tx_empty, + .throttle = usb_serial_generic_throttle, + .unthrottle = usb_serial_generic_unthrottle, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_attach, @@ -893,6 +895,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, u32 baud; u16 bits; u32 ctl_hs; + u32 flow_repl; cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud); @@ -993,6 +996,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) { dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__); + /* + * When the port is closed, the CP210x hardware disables + * auto-RTS and RTS is deasserted but it leaves auto-CTS when + * in hardware flow control mode. When re-opening the port, if + * auto-CTS is enabled on the cp210x, then auto-RTS must be + * re-enabled in the driver. + */ + flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); + flow_repl &= ~CP210X_SERIAL_RTS_MASK; + flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL); + flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); + cp210x_write_reg_block(port, + CP210X_SET_FLOW, + &flow_ctl, + sizeof(flow_ctl)); + cflag |= CRTSCTS; } else { dev_dbg(dev, "%s - flow control = NONE\n", __func__); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 3c0f38cd3a5a478225e26bb0f30b437d1316112b..b2364e3794295e424e55e1ad96ef8a03ed8daba7 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -703,6 +703,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, @@ -1026,6 +1027,11 @@ static const struct usb_device_id id_table_combined[] = { /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, + /* FreeCalypso USB adapters */ + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; @@ -2037,12 +2043,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) static int ftdi_process_packet(struct usb_serial_port *port, - struct ftdi_private *priv, char *packet, int len) + struct ftdi_private *priv, unsigned char *buf, int len) { + unsigned char status; int i; - char status; char flag; - char *ch; if (len < 2) { dev_dbg(&port->dev, "malformed packet\n"); @@ -2052,7 +2057,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, /* Compare new line status to the old one, signal if different/ N.B. packet may be processed more than once, but differences are only processed once. */ - status = packet[0] & FTDI_STATUS_B0_MASK; + status = buf[0] & FTDI_STATUS_B0_MASK; if (status != priv->prev_status) { char diff_status = status ^ priv->prev_status; @@ -2078,13 +2083,12 @@ static int ftdi_process_packet(struct usb_serial_port *port, } /* save if the transmitter is empty or not */ - if (packet[1] & FTDI_RS_TEMT) + if (buf[1] & FTDI_RS_TEMT) priv->transmit_empty = 1; else priv->transmit_empty = 0; - len -= 2; - if (!len) + if (len == 2) return 0; /* status only */ /* @@ -2092,40 +2096,41 @@ static int ftdi_process_packet(struct usb_serial_port *port, * data payload to avoid over-reporting. */ flag = TTY_NORMAL; - if (packet[1] & FTDI_RS_ERR_MASK) { + if (buf[1] & FTDI_RS_ERR_MASK) { /* Break takes precedence over parity, which takes precedence * over framing errors */ - if (packet[1] & FTDI_RS_BI) { + if (buf[1] & FTDI_RS_BI) { flag = TTY_BREAK; port->icount.brk++; usb_serial_handle_break(port); - } else if (packet[1] & FTDI_RS_PE) { + } else if (buf[1] & FTDI_RS_PE) { flag = TTY_PARITY; port->icount.parity++; - } else if (packet[1] & FTDI_RS_FE) { + } else if (buf[1] & FTDI_RS_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Overrun is special, not associated with a char */ - if (packet[1] & FTDI_RS_OE) { + if (buf[1] & FTDI_RS_OE) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } - port->icount.rx += len; - ch = packet + 2; + port->icount.rx += len - 2; if (port->port.console && port->sysrq) { - for (i = 0; i < len; i++, ch++) { - if (!usb_serial_handle_sysrq_char(port, *ch)) - tty_insert_flip_char(&port->port, *ch, flag); + for (i = 2; i < len; i++) { + if (usb_serial_handle_sysrq_char(port, buf[i])) + continue; + tty_insert_flip_char(&port->port, buf[i], flag); } } else { - tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len); + tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, + len - 2); } - return len; + return len - 2; } static void ftdi_process_read_urb(struct urb *urb) diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e8373528264c3634a9e474f2836a82d2b5b8b7a5..3d47c6d72256e383afad8d42cd2e5b5d1ccb01e5 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -39,6 +39,13 @@ #define FTDI_LUMEL_PD12_PID 0x6002 +/* + * Custom USB adapters made by Falconia Partners LLC + * for FreeCalypso project, ID codes allocated to Falconia by FTDI. + */ +#define FTDI_FALCONIA_JTAG_BUF_PID 0x7150 +#define FTDI_FALCONIA_JTAG_UNBUF_PID 0x7151 + /* Sienna Serial Interface by Secyourit GmbH */ #define FTDI_SIENNA_PID 0x8348 @@ -160,6 +167,7 @@ #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ +#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index e287fd52c575bbfe4ce03cf4e50206f16e2a04e5..734f18d0a7f730d3d6ccd3ded94c3242cc820f1a 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; - *buf_ptr++ = IUU_SET_LED; + if (xmas) { - get_random_bytes(buf_ptr, 6); - *(buf_ptr+7) = 1; + buf_ptr[0] = IUU_SET_LED; + get_random_bytes(buf_ptr + 1, 6); + buf_ptr[7] = 1; } else { iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255); } @@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; + if (xmas) { iuu_rxcmd(urb); return; - } else { - *buf_ptr++ = IUU_SET_LED; - iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); } + + iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); + usb_fill_bulk_urb(port->write_urb, port->serial->dev, usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress), diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 6e44aaafdcb1065f8c56a41398728d5d4103d4b5..c773db129bf94d2ae0b4141cafb53d18e26c0765 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -528,6 +528,7 @@ static void option_instat_callback(struct urb *urb); /* Cellient products */ #define CELLIENT_VENDOR_ID 0x2692 #define CELLIENT_PRODUCT_MEN200 0x9005 +#define CELLIENT_PRODUCT_MPL200 0x9025 /* Hyundai Petatel Inc. products */ #define PETATEL_VENDOR_ID 0x1ff4 @@ -1094,14 +1095,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), - .driver_info = RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, @@ -1182,6 +1187,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1819,6 +1826,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), @@ -1976,6 +1985,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), + .driver_info = RSVD(1) | RSVD(4) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 7751b94ac7f5e231465ad3e0b18cbfb970fcd1b3..2d78ad2842a4432ad4e5dbc8fbc56d1301f4f9a6 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -94,6 +94,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index c98db6b650a5caf2d14bbfb06488c585f6924fc9..a897680473a7851c56ea7ea4ea3c1f907225bbb8 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -121,6 +121,7 @@ /* Hewlett-Packard POS Pole Displays */ #define HP_VENDOR_ID 0x03f0 +#define HP_LD381GC_PRODUCT_ID 0x0183 #define HP_LM920_PRODUCT_ID 0x026b #define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index d147feae83e69484719a01189b2ffd3e4fd2f576..0f60363c1bbc8aae517beac6b9ed15e405f86726 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 27d8b4b6ff593c119162a1e4ed06b3161199b693..658b0cd8e27eef5115e1d29b727541fd1c73e812 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -669,8 +669,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, if (devinfo->resetting) { cmnd->result = DID_ERROR << 16; cmnd->scsi_done(cmnd); - spin_unlock_irqrestore(&devinfo->lock, flags); - return 0; + goto zombie; } /* Find a free uas-tag */ @@ -706,6 +705,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); err = uas_submit_urbs(cmnd, devinfo); + /* + * in case of fatal errors the SCSI layer is peculiar + * a command that has finished is a success for the purpose + * of queueing, no matter how fatal the error + */ + if (err == -ENODEV) { + cmnd->result = DID_ERROR << 16; + cmnd->scsi_done(cmnd); + goto zombie; + } if (err) { /* If we did nothing, give up now */ if (cmdinfo->state & SUBMIT_STATUS_URB) { @@ -716,6 +725,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, } devinfo->cmnd[idx] = cmnd; +zombie: spin_unlock_irqrestore(&devinfo->lock, flags); return 0; } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index f6c3681fa2e9efa4d861199aa2fe12a1c8334652..88275842219ef7a38565e319140a6c36e95b4962 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_BROKEN_FUA ), + US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), /* Reported by Andrey Rahmatullin */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 37157ed9a881a358968d85803428f95786628fc5..dcdfcdfd2ad134a7fa5b9bec590a34ae1d9b9435 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,13 @@ * and don't forget to CC: the USB development list */ +/* Reported-by: Till Dörges */ +UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, + "Sony", + "PSZ-HA*", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Julian Groß */ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, "LaCie", @@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported-by: Thinh Nguyen */ +UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index a18112a83faed2df09e49c0a5a93d2fce0823c5f..dda8bd39c9186624cf727f7cec2c5cb8452da27c 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -64,11 +64,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) static int ucsi_acpi_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct ucsi_acpi *ua; struct resource *res; acpi_status status; int ret; + if (adev->dep_unmet) + return -EPROBE_DEFER; + ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL); if (!ua) return -ENOMEM; diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 66783a37f450cc5df7e7935e983c0f2365b73115..58e7336b2748b94d81e74d85499c883c54c74ff3 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -181,6 +182,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); /* * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND @@ -407,6 +409,19 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); + if (vdev->err_trigger) { + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); + if (vdev->req_trigger) { + eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } + mutex_unlock(&vdev->igate); } mutex_unlock(&driver_lock); @@ -623,6 +638,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, return 0; } +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -696,7 +717,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; - u16 orig_cmd; + u16 cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -716,10 +737,7 @@ static long vfio_pci_ioctl(void *device_data, * Is it really there? Enable memory decode for * implicit access in pci_map_rom(). */ - pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); - pci_write_config_word(pdev, PCI_COMMAND, - orig_cmd | PCI_COMMAND_MEMORY); - + cmd = vfio_pci_memory_lock_and_enable(vdev); io = pci_map_rom(pdev, &size); if (io) { info.flags = VFIO_REGION_INFO_FLAG_READ; @@ -727,8 +745,8 @@ static long vfio_pci_ioctl(void *device_data, } else { info.size = 0; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); - pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -865,8 +883,16 @@ static long vfio_pci_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - return vdev->reset_works ? - pci_try_reset_function(vdev->pdev) : -EINVAL; + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { struct vfio_pci_hot_reset_info hdr; @@ -946,8 +972,9 @@ static long vfio_pci_ioctl(void *device_data, int32_t *group_fds; struct vfio_pci_group_entry *groups; struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; bool slot = false; - int i, count = 0, ret = 0; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -999,9 +1026,9 @@ static long vfio_pci_ioctl(void *device_data, * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (i = 0; i < hdr.count; i++) { + for (group_idx = 0; group_idx < hdr.count; group_idx++) { struct vfio_group *group; - struct fd f = fdget(group_fds[i]); + struct fd f = fdget(group_fds[group_idx]); if (!f.file) { ret = -EBADF; break; @@ -1014,8 +1041,9 @@ static long vfio_pci_ioctl(void *device_data, break; } - groups[i].group = group; - groups[i].id = vfio_external_user_iommu_id(group); + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); } kfree(group_fds); @@ -1034,13 +1062,63 @@ static long vfio_pci_ioctl(void *device_data, ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_validate_devs, &info, slot); - if (!ret) - /* User has access, do the reset */ - ret = pci_reset_bus(vdev->pdev); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); hot_reset_release: - for (i--; i >= 0; i--) - vfio_group_put_external_user(groups[i].group); + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); kfree(groups); return ret; @@ -1121,6 +1199,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &vdev->vma_list); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + mutex_unlock(&vdev->vma_lock); + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + ret = VM_FAULT_SIGBUS; + +up_out: + up_read(&vdev->memory_lock); + return ret; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_pci_device *vdev = device_data; @@ -1170,8 +1444,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - req_len, vma->vm_page_prot); + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; } static void vfio_pci_request(void *device_data, unsigned int count) @@ -1243,6 +1523,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&vdev->irqlock); mutex_init(&vdev->ioeventfds_lock); INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { @@ -1338,12 +1621,6 @@ static struct pci_driver vfio_pci_driver = { .err_handler = &vfio_err_handlers, }; -struct vfio_devices { - struct vfio_device **devices; - int cur_index; - int max_index; -}; - static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) { struct vfio_devices *devs = data; @@ -1365,6 +1642,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) return 0; } +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + /* * Attempt to do a bus/slot reset if there are devices affected by a reset for * this device that are needs_reset and all of the affected devices are unused diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 36bc8f104e42ee180fe5b44273a3f961c723e475..a1a26465d224c13eb86cb697d351d4d8348b1a7e 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -398,6 +398,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) *(__le32 *)(&p->write[off]) = cpu_to_le32(write); } +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); +} + /* * Restore the *real* BARs after we detect a FLR or backdoor reset. * (backdoor = some device specific technique that we didn't catch) @@ -558,13 +572,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, new_cmd = le32_to_cpu(val); + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); - phys_io = !!(phys_cmd & PCI_COMMAND_IO); - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); - new_io = !!(new_cmd & PCI_COMMAND_IO); + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); /* * If the user is writing mem/io enable (new_mem/io) and we @@ -581,8 +600,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, } count = vfio_default_config_write(vdev, pos, count, perm, offset, val); - if (count < 0) + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); return count; + } /* * Save current memory/io enable bits in vconfig to allow for @@ -593,6 +615,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, *virt_cmd &= cpu_to_le16(~mask); *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); } /* Emulate INTx disable */ @@ -830,8 +854,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_EXP_DEVCAP, &cap); - if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } /* @@ -909,8 +936,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_AF_CAP, &cap); - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } return count; @@ -1708,6 +1738,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN]); vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 94594dc63c4173ad79b08ac70f5f31542422d499..bdfdd506bc58814a6b2778bbc080e91abfaebfcb 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; + u16 cmd; if (!is_irq_none(vdev)) return -EINVAL; @@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); kfree(vdev->ctx); return ret; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : @@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; + u16 cmd; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; @@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, if (vdev->ctx[vector].trigger) { irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; @@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ + cmd = vfio_pci_memory_lock_and_enable(vdev); if (msix) { struct msi_msg msg; @@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); @@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; + u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); @@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); /* * Both disable paths above use pci_intx_for_msi() to clear DisINTx diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index cde3b5d3441ad5e98a579d0050317245216ce36c..17d2bae5b013c6fa1778edb104b14fc9ca9f8b00 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -76,6 +76,11 @@ struct vfio_pci_dummy_resource { struct list_head res_next; }; +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; @@ -111,6 +116,9 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) @@ -149,6 +157,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, unsigned int type, unsigned int subtype, const struct vfio_pci_regops *ops, size_t size, u32 flags, void *data); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index a6029d0a55244d2e34bd73febacfbb147117efe0..3d0ec2bbe131f046e0cffa96d39b5d640a8d3c93 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -165,6 +165,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, size_t x_start = 0, x_end = 0; resource_size_t end; void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; if (pci_resource_start(pdev, bar)) @@ -180,6 +181,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -187,13 +196,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, * filling large ROM BARs much faster. */ io = pci_map_rom(pdev, &x_start); - if (!io) - return -ENOMEM; + if (!io) { + done = -ENOMEM; + goto out; + } x_end = end; } else { int ret = vfio_pci_setup_barmap(vdev, bar); - if (ret) - return ret; + if (ret) { + done = ret; + goto out; + } io = vdev->barmap[bar]; } @@ -210,6 +223,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); return done; } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 6c50de8fd10f219cd6828d63d40ff14fa2dbe049..482e2d27b47b938912b628a1b7f893aa07690c8a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -343,6 +343,32 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, int prot, unsigned long *pfn) { @@ -384,12 +410,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, vaddr = untagged_addr(vaddr); +retry: vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - if (!follow_pfn(vma, vaddr, pfn) && - is_invalid_reserved_pfn(*pfn)) - ret = 0; + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; } up_read(&mm->mmap_sem); @@ -1195,13 +1225,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1219,6 +1252,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1248,7 +1286,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1257,14 +1295,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; } /* diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 7891bd40ebd82daf8342b88fad75f9d583c482ac..6ee320259e4f7eacc9ca3f168bf0425c4bee11fb 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -383,6 +383,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) return val < vq->num; } +static struct virtio_transport vhost_transport = { + .transport = { + .get_local_cid = vhost_transport_get_local_cid, + + .init = virtio_transport_do_socket_init, + .destruct = virtio_transport_destruct, + .release = virtio_transport_release, + .connect = virtio_transport_connect, + .shutdown = virtio_transport_shutdown, + .cancel_pkt = vhost_transport_cancel_pkt, + + .dgram_enqueue = virtio_transport_dgram_enqueue, + .dgram_dequeue = virtio_transport_dgram_dequeue, + .dgram_bind = virtio_transport_dgram_bind, + .dgram_allow = virtio_transport_dgram_allow, + + .stream_enqueue = virtio_transport_stream_enqueue, + .stream_dequeue = virtio_transport_stream_dequeue, + .stream_has_data = virtio_transport_stream_has_data, + .stream_has_space = virtio_transport_stream_has_space, + .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, + .stream_is_active = virtio_transport_stream_is_active, + .stream_allow = virtio_transport_stream_allow, + + .notify_poll_in = virtio_transport_notify_poll_in, + .notify_poll_out = virtio_transport_notify_poll_out, + .notify_recv_init = virtio_transport_notify_recv_init, + .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, + .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, + .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, + .notify_send_init = virtio_transport_notify_send_init, + .notify_send_pre_block = virtio_transport_notify_send_pre_block, + .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, + .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, + + .set_buffer_size = virtio_transport_set_buffer_size, + .set_min_buffer_size = virtio_transport_set_min_buffer_size, + .set_max_buffer_size = virtio_transport_set_max_buffer_size, + .get_buffer_size = virtio_transport_get_buffer_size, + .get_min_buffer_size = virtio_transport_get_min_buffer_size, + .get_max_buffer_size = virtio_transport_get_max_buffer_size, + }, + + .send_pkt = vhost_transport_send_pkt, +}; + static void vhost_vsock_handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, @@ -439,7 +485,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && le64_to_cpu(pkt->hdr.dst_cid) == vhost_transport_get_local_cid()) - virtio_transport_recv_pkt(pkt); + virtio_transport_recv_pkt(&vhost_transport, pkt); else virtio_transport_free_pkt(pkt); @@ -792,52 +838,6 @@ static struct miscdevice vhost_vsock_misc = { .fops = &vhost_vsock_fops, }; -static struct virtio_transport vhost_transport = { - .transport = { - .get_local_cid = vhost_transport_get_local_cid, - - .init = virtio_transport_do_socket_init, - .destruct = virtio_transport_destruct, - .release = virtio_transport_release, - .connect = virtio_transport_connect, - .shutdown = virtio_transport_shutdown, - .cancel_pkt = vhost_transport_cancel_pkt, - - .dgram_enqueue = virtio_transport_dgram_enqueue, - .dgram_dequeue = virtio_transport_dgram_dequeue, - .dgram_bind = virtio_transport_dgram_bind, - .dgram_allow = virtio_transport_dgram_allow, - - .stream_enqueue = virtio_transport_stream_enqueue, - .stream_dequeue = virtio_transport_stream_dequeue, - .stream_has_data = virtio_transport_stream_has_data, - .stream_has_space = virtio_transport_stream_has_space, - .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, - .stream_is_active = virtio_transport_stream_is_active, - .stream_allow = virtio_transport_stream_allow, - - .notify_poll_in = virtio_transport_notify_poll_in, - .notify_poll_out = virtio_transport_notify_poll_out, - .notify_recv_init = virtio_transport_notify_recv_init, - .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, - .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, - .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, - .notify_send_init = virtio_transport_notify_send_init, - .notify_send_pre_block = virtio_transport_notify_send_pre_block, - .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, - .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, - - .set_buffer_size = virtio_transport_set_buffer_size, - .set_min_buffer_size = virtio_transport_set_min_buffer_size, - .set_max_buffer_size = virtio_transport_set_max_buffer_size, - .get_buffer_size = virtio_transport_get_buffer_size, - .get_min_buffer_size = virtio_transport_get_min_buffer_size, - .get_max_buffer_size = virtio_transport_get_max_buffer_size, - }, - - .send_pkt = vhost_transport_send_pkt, -}; - static int __init vhost_vsock_init(void) { int ret; diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 787792c3d08d66b93012c27deed878e820c06e12..40d5fea8513cff5c4308dcee210a52a3c93aa90c 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -21,52 +21,6 @@ config VGA_CONSOLE Say Y. -config VGACON_SOFT_SCROLLBACK - bool "Enable Scrollback Buffer in System RAM" - depends on VGA_CONSOLE - default n - help - The scrollback buffer of the standard VGA console is located in - the VGA RAM. The size of this RAM is fixed and is quite small. - If you require a larger scrollback buffer, this can be placed in - System RAM which is dynamically allocated during initialization. - Placing the scrollback buffer in System RAM will slightly slow - down the console. - - If you want this feature, say 'Y' here and enter the amount of - RAM to allocate for this buffer. If unsure, say 'N'. - -config VGACON_SOFT_SCROLLBACK_SIZE - int "Scrollback Buffer Size (in KB)" - depends on VGACON_SOFT_SCROLLBACK - range 1 1024 - default "64" - help - Enter the amount of System RAM to allocate for scrollback - buffers of VGA consoles. Each 64KB will give you approximately - 16 80x25 screenfuls of scrollback buffer. - -config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT - bool "Persistent Scrollback History for each console by default" - depends on VGACON_SOFT_SCROLLBACK - default n - help - Say Y here if the scrollback history should persist by default when - switching between consoles. Otherwise, the scrollback history will be - flushed each time the console is switched. This feature can also be - enabled using the boot command line parameter - 'vgacon.scrollback_persistent=1'. - - This feature might break your tool of choice to flush the scrollback - buffer, e.g. clear(1) will work fine but Debian's clear_console(1) - will be broken, which might cause security issues. - You can use the escape sequence \e[3J instead if this feature is - activated. - - Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each - created tty device. - So if you use a RAM-constrained system, say N here. - config MDA_CONSOLE depends on !M68K && !PARISC && ISA tristate "MDA text console (dual-headed)" diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 7f2526b43b3364665df9350bb80d4f1eb168a6c4..02b24ae8b9cb6b5041d65907067ab27b9c2c5e9c 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -31,17 +31,14 @@ #include #include -#define FONT_DATA ((unsigned char *)font_vga_8x16.data) +#define NEWPORT_LEN 0x10000 -/* borrowed from fbcon.c */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FONT_EXTRA_WORDS 3 +#define FONT_DATA ((unsigned char *)font_vga_8x16.data) static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; +static unsigned long newport_addr; static int logo_active; static int topscan; @@ -519,6 +516,7 @@ static int newport_set_font(int unit, struct console_font *op) FNTSIZE(new_data) = size; FNTCHARCNT(new_data) = op->charcount; REFCOUNT(new_data) = 0; /* usage counter */ + FNTSUM(new_data) = 0; p = new_data; for (i = 0; i < op->charcount; i++) { @@ -701,7 +699,6 @@ const struct consw newport_con = { static int newport_probe(struct gio_device *dev, const struct gio_device_id *id) { - unsigned long newport_addr; int err; if (!dev->resource.start) @@ -711,7 +708,7 @@ static int newport_probe(struct gio_device *dev, return -EBUSY; /* we only support one Newport as console */ newport_addr = dev->resource.start + 0xF0000; - if (!request_mem_region(newport_addr, 0x10000, "Newport")) + if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport")) return -ENODEV; npregs = (struct newport_regs *)/* ioremap cannot fail */ @@ -719,6 +716,11 @@ static int newport_probe(struct gio_device *dev, console_lock(); err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); + + if (err) { + iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); + } return err; } @@ -726,6 +728,7 @@ static void newport_remove(struct gio_device *dev) { give_up_console(&newport_con); iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); } static struct gio_device_id newport_ids[] = { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index bfaa9ec4bc1fd12aa228d2d4c270428bfcc71d95..55507df335bdde05429a4389224f9aaad696b92a 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -165,210 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c) write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } -#ifdef CONFIG_VGACON_SOFT_SCROLLBACK -/* software scrollback */ -struct vgacon_scrollback_info { - void *data; - int tail; - int size; - int rows; - int cnt; - int cur; - int save; - int restore; -}; - -static struct vgacon_scrollback_info *vgacon_scrollback_cur; -static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; -static bool scrollback_persistent = \ - IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); -module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); -MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); - -static void vgacon_scrollback_reset(int vc_num, size_t reset_size) -{ - struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; - - if (scrollback->data && reset_size > 0) - memset(scrollback->data, 0, reset_size); - - scrollback->cnt = 0; - scrollback->tail = 0; - scrollback->cur = 0; -} - -static void vgacon_scrollback_init(int vc_num) -{ - int pitch = vga_video_num_columns * 2; - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - int rows = size / pitch; - void *data; - - data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, - GFP_NOWAIT); - - vgacon_scrollbacks[vc_num].data = data; - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - - vgacon_scrollback_cur->rows = rows - 1; - vgacon_scrollback_cur->size = rows * pitch; - - vgacon_scrollback_reset(vc_num, size); -} - -static void vgacon_scrollback_switch(int vc_num) -{ - if (!scrollback_persistent) - vc_num = 0; - - if (!vgacon_scrollbacks[vc_num].data) { - vgacon_scrollback_init(vc_num); - } else { - if (scrollback_persistent) { - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - } else { - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(vc_num, size); - } - } -} - -static void vgacon_scrollback_startup(void) -{ - vgacon_scrollback_cur = &vgacon_scrollbacks[0]; - vgacon_scrollback_init(0); -} - -static void vgacon_scrollback_update(struct vc_data *c, int t, int count) -{ - void *p; - - if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || - c->vc_num != fg_console) - return; - - p = (void *) (c->vc_origin + t * c->vc_size_row); - - while (count--) { - scr_memcpyw(vgacon_scrollback_cur->data + - vgacon_scrollback_cur->tail, - p, c->vc_size_row); - - vgacon_scrollback_cur->cnt++; - p += c->vc_size_row; - vgacon_scrollback_cur->tail += c->vc_size_row; - - if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) - vgacon_scrollback_cur->tail = 0; - - if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) - vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; - - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_restore_screen(struct vc_data *c) -{ - c->vc_origin = c->vc_visible_origin; - vgacon_scrollback_cur->save = 0; - - if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { - scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, - c->vc_screenbuf_size > vga_vram_size ? - vga_vram_size : c->vc_screenbuf_size); - vgacon_scrollback_cur->restore = 1; - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_scrolldelta(struct vc_data *c, int lines) -{ - int start, end, count, soff; - - if (!lines) { - vgacon_restore_screen(c); - return; - } - - if (!vgacon_scrollback_cur->data) - return; - - if (!vgacon_scrollback_cur->save) { - vgacon_cursor(c, CM_ERASE); - vgacon_save_screen(c); - c->vc_origin = (unsigned long)c->vc_screenbuf; - vgacon_scrollback_cur->save = 1; - } - - vgacon_scrollback_cur->restore = 0; - start = vgacon_scrollback_cur->cur + lines; - end = start + abs(lines); - - if (start < 0) - start = 0; - - if (start > vgacon_scrollback_cur->cnt) - start = vgacon_scrollback_cur->cnt; - - if (end < 0) - end = 0; - - if (end > vgacon_scrollback_cur->cnt) - end = vgacon_scrollback_cur->cnt; - - vgacon_scrollback_cur->cur = start; - count = end - start; - soff = vgacon_scrollback_cur->tail - - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); - soff -= count * c->vc_size_row; - - if (soff < 0) - soff += vgacon_scrollback_cur->size; - - count = vgacon_scrollback_cur->cnt - start; - - if (count > c->vc_rows) - count = c->vc_rows; - - if (count) { - int copysize; - - int diff = c->vc_rows - count; - void *d = (void *) c->vc_visible_origin; - void *s = (void *) c->vc_screenbuf; - - count *= c->vc_size_row; - /* how much memory to end of buffer left? */ - copysize = min(count, vgacon_scrollback_cur->size - soff); - scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); - d += copysize; - count -= copysize; - - if (count) { - scr_memcpyw(d, vgacon_scrollback_cur->data, count); - d += count; - } - - if (diff) - scr_memcpyw(d, s, diff * c->vc_size_row); - } else - vgacon_cursor(c, CM_MOVE); -} - -static void vgacon_flush_scrollback(struct vc_data *c) -{ - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(c->vc_num, size); -} -#else -#define vgacon_scrollback_startup(...) do { } while (0) -#define vgacon_scrollback_init(...) do { } while (0) -#define vgacon_scrollback_update(...) do { } while (0) -#define vgacon_scrollback_switch(...) do { } while (0) - static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) @@ -382,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) vga_set_mem_top(c); } -static void vgacon_flush_scrollback(struct vc_data *c) -{ -} -#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ - static const char *vgacon_startup(void) { const char *display_desc = NULL; @@ -569,10 +360,7 @@ static const char *vgacon_startup(void) vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; - if (!vga_init_done) { - vgacon_scrollback_startup(); - vga_init_done = true; - } + vga_init_done = true; return display_desc; } @@ -863,7 +651,6 @@ static int vgacon_switch(struct vc_data *c) vgacon_doresize(c, c->vc_cols, c->vc_rows); } - vgacon_scrollback_switch(c->vc_num); return 0; /* Redrawing not needed */ } @@ -1380,7 +1167,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { - vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), @@ -1444,7 +1230,6 @@ const struct consw vga_con = { .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, - .con_flush_scrollback = vgacon_flush_scrollback, }; EXPORT_SYMBOL(vga_con); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 35ebeeccde4dfda132c9e2c9dcd988f909d2d98a..436365efae7310c3559a02356161a74925d2c5d2 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, } static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index cb93a6b3816092860f8ebb9173cb0963a3910dc9..5742a0dc774e9743deba50477c1a12b1fef74039 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -102,12 +102,6 @@ static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; -/* Software scrollback */ -static int fbcon_softback_size = 32768; -static unsigned long softback_buf, softback_curr; -static unsigned long softback_in; -static unsigned long softback_top, softback_end; -static int softback_lines; /* console mappings */ static int first_fb_vc; static int last_fb_vc = MAX_NR_CONSOLES - 1; @@ -148,8 +142,6 @@ static int margin_color; static const struct consw fb_con; -#define CM_SOFTBACK (8) - #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) static int fbcon_set_origin(struct vc_data *); @@ -355,18 +347,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info, return color; } -static void fbcon_update_softback(struct vc_data *vc) -{ - int l = fbcon_softback_size / vc->vc_size_row; - - if (l > 5) - softback_end = softback_buf + l * vc->vc_size_row; - else - /* Smaller scrollback makes no sense, and 0 would screw - the operation totally */ - softback_top = 0; -} - static void fb_flashcursor(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, queue); @@ -396,7 +376,7 @@ static void fb_flashcursor(struct work_struct *work) c = scr_readw((u16 *) vc->vc_pos); mode = (!ops->cursor_flash || ops->cursor_state.enable) ? CM_ERASE : CM_DRAW; - ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); } @@ -453,13 +433,7 @@ static int __init fb_console_setup(char *this_opt) } if (!strncmp(options, "scrollback:", 11)) { - options += 11; - if (*options) { - fbcon_softback_size = simple_strtoul(options, &options, 0); - if (*options == 'k' || *options == 'K') { - fbcon_softback_size *= 1024; - } - } + pr_warn("Ignoring scrollback size option\n"); continue; } @@ -988,31 +962,6 @@ static const char *fbcon_startup(void) set_blitting_type(vc, info); - if (info->fix.type != FB_TYPE_TEXT) { - if (fbcon_softback_size) { - if (!softback_buf) { - softback_buf = - (unsigned long) - kmalloc(fbcon_softback_size, - GFP_KERNEL); - if (!softback_buf) { - fbcon_softback_size = 0; - softback_top = 0; - } - } - } else { - if (softback_buf) { - kfree((void *) softback_buf); - softback_buf = 0; - softback_top = 0; - } - } - if (softback_buf) - softback_in = softback_top = softback_curr = - softback_buf; - softback_lines = 0; - } - /* Setup default font */ if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) @@ -1181,9 +1130,6 @@ static void fbcon_init(struct vc_data *vc, int init) if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); - if (vc == svc && softback_buf) - fbcon_update_softback(vc); - if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); @@ -1346,7 +1292,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - int y; int c = scr_readw((u16 *) vc->vc_pos); ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); @@ -1360,16 +1305,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode) fbcon_add_cursor_timer(info); ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; - if (mode & CM_SOFTBACK) { - mode &= ~CM_SOFTBACK; - y = softback_lines; - } else { - if (softback_lines) - fbcon_set_origin(vc); - y = 0; - } - ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } @@ -1440,8 +1377,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, if (con_is_visible(vc)) { update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -1579,99 +1514,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) scrollback_current = 0; } -static void fbcon_redraw_softback(struct vc_data *vc, struct display *p, - long delta) -{ - int count = vc->vc_rows; - unsigned short *d, *s; - unsigned long n; - int line = 0; - - d = (u16 *) softback_curr; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - n = softback_curr + delta * vc->vc_size_row; - softback_lines -= delta; - if (delta < 0) { - if (softback_curr < softback_top && n < softback_buf) { - n += softback_end - softback_buf; - if (n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else if (softback_curr >= softback_top - && n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else { - if (softback_curr > softback_in && n >= softback_end) { - n += softback_buf - softback_end; - if (n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } else if (softback_curr <= softback_in && n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } - if (n == softback_curr) - return; - softback_curr = n; - s = (u16 *) softback_curr; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - while (count--) { - unsigned short *start; - unsigned short *le; - unsigned short c; - int x = 0; - unsigned short attr = 1; - - start = s; - le = advance_row(s, 1); - do { - c = scr_readw(s); - if (attr != (c & 0xff00)) { - attr = c & 0xff00; - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start; - start = s; - } - } - if (c == scr_readw(d)) { - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start + 1; - start = s + 1; - } else { - x++; - start++; - } - } - s++; - d++; - } while (s < le); - if (s > start) - fbcon_putcs(vc, start, s - start, line, x); - line++; - if (d == (u16 *) softback_end) - d = (u16 *) softback_buf; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - if (s == (u16 *) softback_end) - s = (u16 *) softback_buf; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - } -} - static void fbcon_redraw_move(struct vc_data *vc, struct display *p, int line, int count, int dy) { @@ -1811,31 +1653,6 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p, } } -static inline void fbcon_softback_note(struct vc_data *vc, int t, - int count) -{ - unsigned short *p; - - if (vc->vc_num != fg_console) - return; - p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); - - while (count) { - scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); - count--; - p = advance_row(p, 1); - softback_in += vc->vc_size_row; - if (softback_in == softback_end) - softback_in = softback_buf; - if (softback_in == softback_top) { - softback_top += vc->vc_size_row; - if (softback_top == softback_end) - softback_top = softback_buf; - } - } - softback_curr = softback_in; -} - static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { @@ -1858,8 +1675,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (softback_top) - fbcon_softback_note(vc, t, count); if (logo_shown >= 0) goto redraw_up; switch (p->scrollmode) { @@ -2152,6 +1967,9 @@ static void updatescrollmode(struct display *p, } } +#define PITCH(w) (((w) + 7) >> 3) +#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ + static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { @@ -2161,6 +1979,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; + if (p->userfont && FNTSIZE(vc->vc_font.data)) { + int size; + int pitch = PITCH(vc->vc_font.width); + + /* + * If user font, ensure that a possible change to user font + * height or width will not allow a font data out-of-bounds access. + * NOTE: must use original charcount in calculation as font + * charcount can change and cannot be used to determine the + * font data allocated size. + */ + if (pitch <= 0) + return -EINVAL; + size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data)); + if (size > FNTSIZE(vc->vc_font.data)) + return -EINVAL; + } + virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, @@ -2209,14 +2045,6 @@ static int fbcon_switch(struct vc_data *vc) info = registered_fb[con2fb_map[vc->vc_num]]; ops = info->fbcon_par; - if (softback_top) { - if (softback_lines) - fbcon_set_origin(vc); - softback_top = softback_curr = softback_in = softback_buf; - softback_lines = 0; - fbcon_update_softback(vc); - } - if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; @@ -2442,6 +2270,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) if (font->width <= 8) { j = vc->vc_font.height; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 32 - j); @@ -2450,6 +2281,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } } else if (font->width <= 16) { j = vc->vc_font.height * 2; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 64 - j); @@ -2457,6 +2291,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) fontdata += j; } } else if (font->width <= 24) { + if (font->charcount * (vc->vc_font.height * sizeof(u32)) > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { for (j = 0; j < vc->vc_font.height; j++) { *data++ = fontdata[0]; @@ -2469,6 +2306,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } } else { j = vc->vc_font.height * 4; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 128 - j); @@ -2550,9 +2390,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int cnt; char *old_data = NULL; - if (con_is_visible(vc) && softback_lines) - fbcon_set_origin(vc); - resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); if (p->userfont) old_data = vc->vc_font.data; @@ -2578,8 +2415,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, cols /= w; rows /= h; vc_resize(vc, cols, rows); - if (con_is_visible(vc) && softback_buf) - fbcon_update_softback(vc); } else if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); @@ -2623,7 +2458,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, int size; int i, csum; u8 *new_data, *data = font->data; - int pitch = (font->width+7) >> 3; + int pitch = PITCH(font->width); /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ @@ -2639,7 +2474,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = h * pitch * charcount; + size = CALC_FONTSZ(h, pitch, charcount); new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); @@ -2738,19 +2573,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) { - unsigned long p; - int line; - - if (vc->vc_num != fg_console || !softback_lines) - return (u16 *) (vc->vc_origin + offset); - line = offset / vc->vc_size_row; - if (line >= softback_lines) - return (u16 *) (vc->vc_origin + offset - - softback_lines * vc->vc_size_row); - p = softback_curr + offset; - if (p >= softback_end) - p += softback_buf - softback_end; - return (u16 *) p; + return (u16 *) (vc->vc_origin + offset); } static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, @@ -2764,22 +2587,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, x = offset % vc->vc_cols; y = offset / vc->vc_cols; - if (vc->vc_num == fg_console) - y += softback_lines; ret = pos + (vc->vc_cols - x) * 2; - } else if (vc->vc_num == fg_console && softback_lines) { - unsigned long offset = pos - softback_curr; - - if (pos < softback_curr) - offset += softback_end - softback_buf; - offset /= 2; - x = offset % vc->vc_cols; - y = offset / vc->vc_cols; - ret = pos + (vc->vc_cols - x) * 2; - if (ret == softback_end) - ret = softback_buf; - if (ret == softback_in) - ret = vc->vc_origin; } else { /* Should not happen */ x = y = 0; @@ -2807,106 +2615,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); scr_writew(a, p++); - if (p == (u16 *) softback_end) - p = (u16 *) softback_buf; - if (p == (u16 *) softback_in) - p = (u16 *) vc->vc_origin; } } -static void fbcon_scrolldelta(struct vc_data *vc, int lines) -{ - struct fb_info *info = registered_fb[con2fb_map[fg_console]]; - struct fbcon_ops *ops = info->fbcon_par; - struct display *disp = &fb_display[fg_console]; - int offset, limit, scrollback_old; - - if (softback_top) { - if (vc->vc_num != fg_console) - return; - if (vc->vc_mode != KD_TEXT || !lines) - return; - if (logo_shown >= 0) { - struct vc_data *conp2 = vc_cons[logo_shown].d; - - if (conp2->vc_top == logo_lines - && conp2->vc_bottom == conp2->vc_rows) - conp2->vc_top = 0; - if (logo_shown == vc->vc_num) { - unsigned long p, q; - int i; - - p = softback_in; - q = vc->vc_origin + - logo_lines * vc->vc_size_row; - for (i = 0; i < logo_lines; i++) { - if (p == softback_top) - break; - if (p == softback_buf) - p = softback_end; - p -= vc->vc_size_row; - q -= vc->vc_size_row; - scr_memcpyw((u16 *) q, (u16 *) p, - vc->vc_size_row); - } - softback_in = softback_curr = p; - update_region(vc, vc->vc_origin, - logo_lines * vc->vc_cols); - } - logo_shown = FBCON_LOGO_CANSHOW; - } - fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); - fbcon_redraw_softback(vc, disp, lines); - fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); - return; - } - - if (!scrollback_phys_max) - return; - - scrollback_old = scrollback_current; - scrollback_current -= lines; - if (scrollback_current < 0) - scrollback_current = 0; - else if (scrollback_current > scrollback_max) - scrollback_current = scrollback_max; - if (scrollback_current == scrollback_old) - return; - - if (fbcon_is_inactive(vc, info)) - return; - - fbcon_cursor(vc, CM_ERASE); - - offset = disp->yscroll - scrollback_current; - limit = disp->vrows; - switch (disp->scrollmode) { - case SCROLL_WRAP_MOVE: - info->var.vmode |= FB_VMODE_YWRAP; - break; - case SCROLL_PAN_MOVE: - case SCROLL_PAN_REDRAW: - limit -= vc->vc_rows; - info->var.vmode &= ~FB_VMODE_YWRAP; - break; - } - if (offset < 0) - offset += limit; - else if (offset >= limit) - offset -= limit; - - ops->var.xoffset = 0; - ops->var.yoffset = offset * vc->vc_font.height; - ops->update_start(info); - - if (!scrollback_current) - fbcon_cursor(vc, CM_DRAW); -} - static int fbcon_set_origin(struct vc_data *vc) { - if (softback_lines) - fbcon_scrolldelta(vc, softback_lines); return 0; } @@ -2970,8 +2683,6 @@ static void fbcon_modechanged(struct fb_info *info) fbcon_set_palette(vc, color_table); update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -3413,7 +3124,6 @@ static const struct consw fb_con = { .con_font_default = fbcon_set_def_font, .con_font_copy = fbcon_copy_font, .con_set_palette = fbcon_set_palette, - .con_scrolldelta = fbcon_scrolldelta, .con_set_origin = fbcon_set_origin, .con_invert_region = fbcon_invert_region, .con_screen_pos = fbcon_screen_pos, @@ -3670,9 +3380,6 @@ static void fbcon_exit(void) } #endif - kfree((void *)softback_buf); - softback_buf = 0UL; - for_each_registered_fb(i) { int pending = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 21912a3ba32f87197253a55c94ad3aee5648bd7d..c023009f297897eb48a5855c3afcb71fc0cbfae8 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -62,7 +62,7 @@ struct fbcon_ops { void (*clear_margins)(struct vc_data *vc, struct fb_info *info, int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg); + int fg, int bg); int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ @@ -152,13 +152,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) -/* Font */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FNTSUM(fd) (((int *)(fd))[-4]) -#define FONT_EXTRA_WORDS 4 - /* * Scroll Method */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 78f3a5621478228f139602405030da219ad95d21..71ad6967a70ee1f6b05c0604768e893c3524af1a 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index fd098ff17574b7c19042448d82f7a1e4238bc5a7..31fe5dd651d44663ec153465c9f8a27649d8fc27 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c index c0d445294aa7c3521e1045480e2636995d93db6c..ac72d4f85f7d01ecb88c55196cf32f826d48973c 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "fbcon.h" #include "fbcon_rotate.h" diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index e165a3fad29adfdee870b6b737561b6bea938aa8..b2dd1370e39b2e2f4fb45c516b1b171601cfad48 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 93390312957ffa3d89627deffc8eb6643cfb74f5..adff8d6ffe6f9f7c9d5d481a2b59de52000e4176 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "fbcon.h" @@ -80,7 +81,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, } static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_tilecursor cursor; int use_sw = (vc->vc_cursor_type & 0x10); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index cc1006375cacb6289116cdf2975ac932e09c47b9..f50cc1a7c31a995d8bd56b10e87b6b76567b1e5f 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -449,7 +449,7 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (efi_enabled(EFI_BOOT) && + if (efi_enabled(EFI_MEMMAP) && !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c index 2af08beb4fcb9fe405bf238a392eaed78f9dc5fc..176024d06c0ef78ae5bf57660938a85fd18ea8ca 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_util.c +++ b/drivers/video/fbdev/msm/mdss_mdp_util.c @@ -1027,7 +1027,7 @@ static int mdss_mdp_get_img(struct msmfb_data *img, data->srcp_attachment->dma_map_attrs |= - DMA_ATTR_DELAYED_UNMAP; + (DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_SKIP_CPU_SYNC); data->srcp_table = dma_buf_map_attachment(data->srcp_attachment, diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index 5d3a444083f74c713b784a474586f64c69db1642..2018e1ca33eb68e707a5cf8e4975afc4752ea2e2 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info) #else printk(KERN_ERR "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); + kfree(info->monspecs.modedb); return -1; #endif default: diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index a06d9c25765c544bc7efa8da382edecf5943a8e1..0bd582e845f31b643e49d23935097b59b5983e08 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -531,8 +531,11 @@ int dispc_runtime_get(void) DSSDBG("dispc_runtime_get\n"); r = pm_runtime_get_sync(&dispc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dispc.pdev->dev); + return r; + } + return 0; } EXPORT_SYMBOL(dispc_runtime_get); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index 8e1d60d48dbb0edb507093581bd45833bd563d0c..50792d31533bfa73d6d128fcc4d1505618ef1228 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -1148,8 +1148,11 @@ static int dsi_runtime_get(struct platform_device *dsidev) DSSDBG("dsi_runtime_get\n"); r = pm_runtime_get_sync(&dsi->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dsi->pdev->dev); + return r; + } + return 0; } static void dsi_runtime_put(struct platform_device *dsidev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index f0cac9e0eb944ed55f455881aeea7124d456701f..faebf9a773ba55793fd7696d57f0ac10dffd8a6b 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -779,8 +779,11 @@ int dss_runtime_get(void) DSSDBG("dss_runtime_get\n"); r = pm_runtime_get_sync(&dss.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dss.pdev->dev); + return r; + } + return 0; } void dss_runtime_put(void) @@ -844,7 +847,7 @@ static const struct dss_features omap34xx_dss_feats = { }; static const struct dss_features omap3630_dss_feats = { - .fck_div_max = 32, + .fck_div_max = 31, .dss_fck_multiplier = 1, .parent_clk_name = "dpll4_ck", .dpi_select_source = &dss_dpi_select_source_omap2_omap3, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 28de56e21c74bae5e27d45c8162674e023abe599..9fd9a02bb871de5f62301c1f85f9a6854fd8febf 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -50,9 +50,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index 2e2fcc3d6d4f7b7ade7edcea0bb24463f062f799..13f3a5ce552943f419597b5039d181a55acd8522 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -54,9 +54,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index 392464da12e419ce518d790cd800d46089fd454e..96714b4596d2dbfbddf72bc3f3904a1c99a91e50 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -402,8 +402,11 @@ static int venc_runtime_get(void) DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&venc.pdev->dev); + return r; + } + return 0; } static void venc_runtime_put(void) diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index d59c8a59f58270214eb859e0c27c65cae81f1f48..90dee3e6f8bc7a6388121dab5ceebb3e643cdeea 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2446,8 +2446,8 @@ static int pxafb_remove(struct platform_device *dev) free_pages_exact(fbi->video_mem, fbi->video_mem_size); - dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, - fbi->dma_buff_phys); + dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, + fbi->dma_buff_phys); return 0; } diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index f1dcc6766d1ef457d18c5e09e35521c896154162..1781ca697f66b89ce04c7a2903e059d21b26a68f 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb, static void smtc_unmap_smem(struct smtcfb_info *sfb) { if (sfb && sfb->fb->screen_base) { + if (sfb->chip_id == 0x720) + sfb->fb->screen_base -= 0x00200000; iounmap(sfb->fb->screen_base); sfb->fb->screen_base = NULL; } diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 2c6a576ed84c72851db228b38e6d3aedfee8d67a..4b83109202b1cdbde3d30ef17f3019823b70cda0 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i char oldop = setop(0); char oldsr = setsr(0); char oldmask = selectmask(); - const char *cdat = image->data; + const unsigned char *cdat = image->data; u32 dx = image->dx; char __iomem *where; int y; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 6228b48d1e1277fa149735f2eb2d3a6189da011b..df7980aef927a4641f7a85cad7d7dbce54acc2c6 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -828,6 +828,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); + if (unlikely(vq->broken)) + return false; + virtio_mb(vq->weak_barriers); return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); } diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 9a1c761258ce4655b29ca46249df4e4a55cda64c..5d0ea419070dc6aa83a113478ee1d798e3b90ec0 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -688,9 +688,9 @@ static int __init watchdog_init(int sioaddr) * into the module have been registered yet. */ watchdog.sioaddr = sioaddr; - watchdog.ident.options = WDIOC_SETTIMEOUT - | WDIOF_MAGICCLOSE - | WDIOF_KEEPALIVEPING; + watchdog.ident.options = WDIOF_MAGICCLOSE + | WDIOF_KEEPALIVEPING + | WDIOF_CARDRESET; snprintf(watchdog.ident.identity, sizeof(watchdog.ident.identity), "%s watchdog", @@ -704,6 +704,13 @@ static int __init watchdog_init(int sioaddr) wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); + /* + * We don't want WDTMOUT_STS to stick around till regular reboot. + * Write 1 to the bit to clear it to zero. + */ + superio_outb(sioaddr, F71808FG_REG_WDT_CONF, + wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS)); + superio_exit(sioaddr); err = watchdog_set_timeout(timeout); diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 10b2090f3e5e751eb7392fc39dc2e8c1eb82fa7b..1c322caecf7f1fcb9f1968ad1caa3933475a26ca 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -947,6 +947,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) if (IS_ERR_OR_NULL(watchdog_kworker)) return -ENODEV; + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + kthread_init_work(&wd_data->work, watchdog_ping_work); hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wd_data->timer.function = watchdog_timer_expired; @@ -967,15 +976,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) } } - device_initialize(&wd_data->dev); - wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); - wd_data->dev.class = &watchdog_class; - wd_data->dev.parent = wdd->parent; - wd_data->dev.groups = wdd->groups; - wd_data->dev.release = watchdog_core_data_release; - dev_set_drvdata(&wd_data->dev, wdd); - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); - /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 6fa7209f24f43fb8c849305608c050ee96f05b51..b23edf64c2b210c8c8f3975fe0bc0b3b4119ce0b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages) if (xen_hotplug_unpopulated) { st = reserve_additional_memory(); if (st != BP_ECANCELED) { + int rc; + mutex_unlock(&balloon_mutex); - wait_event(balloon_wq, + rc = wait_event_interruptible(balloon_wq, !list_empty(&ballooned_pages)); mutex_lock(&balloon_mutex); - return 0; + return rc ? -ENOMEM : 0; } } @@ -632,6 +634,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) out_undo: mutex_unlock(&balloon_mutex); free_xenballooned_pages(pgno, pages); + /* + * NB: free_xenballooned_pages will only subtract pgno pages, but since + * target_unpopulated is incremented with nr_pages at the start we need + * to remove the remaining ones also, or accounting will be screwed. + */ + balloon_stats.target_unpopulated -= nr_pages - pgno; return ret; } EXPORT_SYMBOL(alloc_xenballooned_pages); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 8d49b91d92cd33a343eac0cb9810412cfeaab99b..95e5a9300ff041942311f2abc29a3adde0e38cb6 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -154,7 +154,7 @@ int get_evtchn_to_irq(unsigned evtchn) /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { - return irq_get_handler_data(irq); + return irq_get_chip_data(irq); } /* Constructors for packed IRQ information. */ @@ -375,7 +375,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; - irq_set_handler_data(irq, info); + irq_set_chip_data(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } @@ -424,14 +424,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; list_del(&info->list); - irq_set_handler_data(irq, NULL); + irq_set_chip_data(irq, NULL); WARN_ON(info->refcnt > 0); @@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { int evtchn = evtchn_from_irq(irq); - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (info->refcnt > 0) { info->refcnt--; @@ -1105,7 +1105,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; @@ -1139,7 +1139,7 @@ int evtchn_make_refcounted(unsigned int evtchn) if (irq == -1) return -ENOENT; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) return -ENOENT; @@ -1167,7 +1167,7 @@ int evtchn_get(unsigned int evtchn) if (irq == -1) goto done; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) goto done; diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index d97fcfc5e55879ff42513d009ef26fb345369fa6..f6589563ff711bb540d9f319fd2e4e01160aaa69 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, goto fail_detach; } + /* Check that we have zero offset. */ + if (sgt->sgl->offset) { + ret = ERR_PTR(-EINVAL); + pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", + sgt->sgl->offset); + goto fail_unmap; + } + /* Check number of pages that imported buffer has. */ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { ret = ERR_PTR(-EINVAL); diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 5f6b77ea34fb5b5ddef4d4fb5e3ba722194ded6f..128375ff80b8c42dd1136835d70820cbb84c5538 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); asmlinkage __visible void xen_maybe_preempt_hcall(void) { if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) - && need_resched())) { + && need_resched() && !preempt_count())) { /* * Clear flag as we may be rescheduled on a different * cpu. diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e94a61eaeceb08a0172d50a3b8070f0c72ce9705..f7b553faadb1014a2336cc363bd23dc8af09cb14 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -365,8 +365,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, int i, j; for (i = 0; i < nr_pages; i++) { - err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_gfn(vaddr), 0); + unsigned long gfn; + + if (is_vmalloc_addr(vaddr)) + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); + else + gfn = virt_to_gfn(vaddr); + + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 619128b5583702559dae61480262644e3542b3e7..c579966a0e5c2e67edbb3021bcba1186dfad9e4f 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -515,10 +515,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) } #ifdef CONFIG_9P_FSCACHE - if (v9ses->fscache) { + if (v9ses->fscache) v9fs_cache_session_put_cookie(v9ses); - kfree(v9ses->cachetag); - } + kfree(v9ses->cachetag); #endif kfree(v9ses->uname); kfree(v9ses->aname); diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 14a6c1b90c9fb8e0652ad848fa46536cfd96484b..9a1e761b64a2bc8026f65c2d783cd8967143ac05 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483f742bd42955d8995c69eb5d2891a..ba084b0b214b90a52d9d303ea629fe5a568377f3 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -794,6 +812,12 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 069273a2483f9069c77fc435ce4a7671be98589b..fc6c42eeb659c6eb4735cb68da452a9e1b4619fd 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -299,15 +299,17 @@ void afs_dynroot_depopulate(struct super_block *sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); - inode_lock(root->d_inode); - - /* Remove all the pins for dirs created for manually added cells */ - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { - if (subdir->d_fsdata) { - subdir->d_fsdata = NULL; - dput(subdir); + if (root) { + inode_lock(root->d_inode); + + /* Remove all the pins for dirs created for manually added cells */ + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { + if (subdir->d_fsdata) { + subdir->d_fsdata = NULL; + dput(subdir); + } } - } - inode_unlock(root->d_inode); + inode_unlock(root->d_inode); + } } diff --git a/fs/block_dev.c b/fs/block_dev.c index 049101f55ca98f4c7037d87bc865dc85c827ef37..800f666c75a95606b3d36dc11d26a52889f62d72 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1793,6 +1793,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) struct gendisk *disk = bdev->bd_disk; struct block_device *victim = NULL; + /* + * Sync early if it looks like we're the last one. If someone else + * opens the block device between now and the decrement of bd_openers + * then we did a sync that we didn't need to, but that's not the end + * of the world and we want to avoid long (could be several minute) + * syncs while holding the mutex. + */ + if (bdev->bd_openers == 1) + sync_blockdev(bdev); + mutex_lock_nested(&bdev->bd_mutex, for_part); if (for_part) bdev->bd_part_count--; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c9943d70e2cb2df002971482da998a88bd5ff554..8007b6aacec608082c2a7fca06460d9b0aa8d649 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1347,7 +1347,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); - extent_buffer_get(eb_rewin); + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), + eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > @@ -1421,8 +1422,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - extent_buffer_get(eb); - btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); @@ -1430,6 +1429,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, + btrfs_header_level(eb)); + btrfs_tree_read_lock(eb); if (tm) __tree_mod_log_rewind(fs_info, eb, time_seq, tm); else diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 15cb96ad15d8c1bc41deda728120f9cb51413bec..554727d82d432b9e7e96645333e06994554b4cd9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3271,6 +3271,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); int btrfs_parse_options(struct btrfs_fs_info *info, char *options, unsigned long new_flags); int btrfs_sync_fs(struct super_block *sb, int wait); +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid); static inline __printf(2, 3) __cold void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9740f7b5d4fb4f4030b6c45a26e9bd0d0f62c5dc..cb21ffd3bba7cb0b07c98bbc79b6d2d2e0188036 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1500,9 +1500,16 @@ int btrfs_init_fs_root(struct btrfs_root *root) spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + /* + * Don't assign anonymous block device to roots that are not exposed to + * userspace, the id pool is limited to 1M + */ + if (is_fstree(root->root_key.objectid) && + btrfs_root_refs(&root->root_item) > 0) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } mutex_lock(&root->objectid_mutex); ret = btrfs_find_highest_objectid(root, @@ -4437,6 +4444,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) cache->io_ctl.inode = NULL; iput(inode); } + ASSERT(cache->io_ctl.pages == NULL); btrfs_put_block_group(cache); } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 1f3755b3a37ae00af6e350d766c3ae65c6973302..665ec85cb09b831deee4c2d0fe4c6031ac01b1ef 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, return type; } -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, - int check_generation) +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; @@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } -static struct dentry *btrfs_get_parent(struct dentry *child) +struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = d_inode(child); struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index 57488ecd7d4ef59adb0c57f3836d2803eb3170ed..f32f4113c976a95a072a65e558810ebc561a2a51 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -18,4 +18,9 @@ struct btrfs_fid { u64 parent_root_objectid; } __attribute__ ((packed)); +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation); +struct dentry *btrfs_get_parent(struct dentry *child); + #endif diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ec3aa76d19b7f4a81f01b41e1a1e575acc510543..ce5e0f6c6af4f74d56a27783c3cdff1d8caa5be6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1057,12 +1057,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_BLOCK_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else if (is_data == BTRFS_REF_TYPE_DATA) { @@ -1071,12 +1070,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_DATA_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else { @@ -1086,8 +1084,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, } btrfs_print_leaf((struct extent_buffer *)eb); - btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", - eb->start, type); + btrfs_err(eb->fs_info, + "eb %llu iref 0x%lx invalid extent inline ref type %d", + eb->start, (unsigned long)iref, type); WARN_ON(1); return BTRFS_REF_TYPE_INVALID; @@ -9099,8 +9098,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, */ if (!for_reloc && !root_dropped) btrfs_add_dead_root(root); - if (err && err != -EAGAIN) - btrfs_handle_fs_error(fs_info, err, NULL); return err; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 520b70b543314ef26ad7963e9188d5cf6395caae..82d597b16152c109adc27efc6d35df2b6c4436ee 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4270,6 +4270,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) /* once for us */ free_extent_map(em); + + cond_resched(); /* Allow large-extent preemption. */ } } return try_release_extent_state(tree, page, mask); @@ -5375,9 +5377,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, } } -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dstv, - unsigned long start, unsigned long len) +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dstv, + unsigned long start, unsigned long len) { size_t cur; size_t offset; @@ -5398,7 +5400,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); - if (copy_to_user(dst, kaddr + offset, cur)) { + if (probe_user_write(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index a3598b24441e14a51466ff13be9613fc08822f49..d5089cadd7c49209722066dc958c07a1db9317aa 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -448,9 +448,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, void read_extent_buffer(const struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len); -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dst, unsigned long start, - unsigned long len); +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dst, unsigned long start, + unsigned long len); void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, const void *src); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index dc1841855a69abf770445f81ba8f54926d474936..646152f305843039da5e9ee0335838432c620f31 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3010,14 +3010,14 @@ static int btrfs_zero_range(struct inode *inode, if (ret < 0) goto out; space_reserved = true; - ret = btrfs_qgroup_reserve_data(inode, &data_reserved, - alloc_start, bytes_to_reserve); - if (ret) - goto out; ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); if (ret) goto out; + ret = btrfs_qgroup_reserve_data(inode, &data_reserved, + alloc_start, bytes_to_reserve); + if (ret) + goto out; ret = btrfs_prealloc_file_range(inode, mode, alloc_start, alloc_end - alloc_start, i_blocksize(inode), diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index c9965e89097fdc2d593723c00f03fa6f5bce35f2..652b0b16e93e2566623c9b0162b8a190facd63c1 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1167,7 +1167,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root, ret = update_cache_item(trans, root, inode, path, offset, io_ctl->entries, io_ctl->bitmaps); out: - io_ctl_free(io_ctl); if (ret) { invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; @@ -1332,6 +1331,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * them out later */ io_ctl_drop_pages(io_ctl); + io_ctl_free(io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state); @@ -2169,7 +2169,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { - struct btrfs_free_space *left_info; + struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *right_info; bool merged = false; u64 offset = info->offset; @@ -2184,7 +2184,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); - else + else if (!right_info) left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8dd2702ce859e261e5300f97419001cb4fdd82d8..64d459ca76d06f9676752dd273c8341befb67480 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -628,7 +628,21 @@ static noinline void compress_file_range(struct inode *inode, PAGE_SET_WRITEBACK | page_error_op | PAGE_END_WRITEBACK); - goto free_pages_out; + + /* + * Ensure we only free the compressed pages if we have + * them allocated, as we can still reach here with + * inode_need_compress() == false. + */ + if (pages) { + for (i = 0; i < nr_pages; i++) { + WARN_ON(pages[i]->mapping); + put_page(pages[i]); + } + kfree(pages); + } + + return; } } @@ -706,13 +720,6 @@ static noinline void compress_file_range(struct inode *inode, *num_added += 1; return; - -free_pages_out: - for (i = 0; i < nr_pages; i++) { - WARN_ON(pages[i]->mapping); - put_page(pages[i]); - } - kfree(pages); } static void free_async_extent_pages(struct async_extent *async_extent) @@ -4458,6 +4465,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) } } + free_anon_bdev(dest->anon_dev); + dest->anon_dev = 0; out_end_trans: trans->block_rsv = NULL; trans->bytes_reserved = 0; @@ -5553,12 +5562,14 @@ void btrfs_evict_inode(struct inode *inode) } /* - * this returns the key found in the dir entry in the location pointer. + * Return the key found in the dir entry in the location pointer, fill @type + * with BTRFS_FT_*, and return 0. + * * If no dir entries were found, returns -ENOENT. * If found a corrupted location in dir entry, returns -EUCLEAN. */ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, - struct btrfs_key *location) + struct btrfs_key *location, u8 *type) { const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; @@ -5591,6 +5602,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, __func__, name, btrfs_ino(BTRFS_I(dir)), location->objectid, location->type, location->offset); } + if (!ret) + *type = btrfs_dir_type(path->nodes[0], di); out: btrfs_free_path(path); return ret; @@ -5826,6 +5839,11 @@ static struct inode *new_simple_dir(struct super_block *s, return inode; } +static inline u8 btrfs_inode_type(struct inode *inode) +{ + return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; +} + struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) { struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); @@ -5833,18 +5851,31 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *sub_root = root; struct btrfs_key location; + u8 di_type = 0; int index; int ret = 0; if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); - ret = btrfs_inode_by_name(dir, dentry, &location); + ret = btrfs_inode_by_name(dir, dentry, &location, &di_type); if (ret < 0) return ERR_PTR(ret); if (location.type == BTRFS_INODE_ITEM_KEY) { inode = btrfs_iget(dir->i_sb, &location, root, NULL); + if (IS_ERR(inode)) + return inode; + + /* Do extra check against inode mode with di_type */ + if (btrfs_inode_type(inode) != di_type) { + btrfs_crit(fs_info, +"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", + inode->i_mode, btrfs_inode_type(inode), + di_type); + iput(inode); + return ERR_PTR(-EUCLEAN); + } return inode; } @@ -6455,11 +6486,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(ret); } -static inline u8 btrfs_inode_type(struct inode *inode) -{ - return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; -} - /* * utility function to add 'inode' into 'parent_inode' with * a give name and a given sequence number. @@ -6993,6 +7019,14 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, extent_start = found_key.offset; if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { + /* Only regular file could have regular/prealloc extent */ + if (!S_ISREG(inode->vfs_inode.i_mode)) { + err = -EUCLEAN; + btrfs_crit(fs_info, + "regular/prealloc extent found for non-regular inode %llu", + btrfs_ino(inode)); + goto out; + } extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); @@ -8879,20 +8913,17 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, /* * Qgroup reserved space handler * Page here will be either - * 1) Already written to disk - * In this case, its reserved space is released from data rsv map - * and will be freed by delayed_ref handler finally. - * So even we call qgroup_free_data(), it won't decrease reserved - * space. - * 2) Not written to disk - * This means the reserved space should be freed here. However, - * if a truncate invalidates the page (by clearing PageDirty) - * and the page is accounted for while allocating extent - * in btrfs_check_data_free_space() we let delayed_ref to - * free the entire extent. + * 1) Already written to disk or ordered extent already submitted + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. + * Qgroup will be handled by its qgroup_record then. + * btrfs_qgroup_free_data() call will do nothing here. + * + * 2) Not written to disk yet + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED + * bit of its io_tree, and free the qgroup reserved data space. + * Since the IO will never happen for this page. */ - if (PageDirty(page)) - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); + btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a5ae02bf3652b48f9ea64a38c9a7b459ec07545d..01a90fa03c24ffe99bf5eb03651fa61ecc642a07 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2079,9 +2079,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2089,10 +2094,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2180,6 +2189,11 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { + ret = fault_in_pages_writeable(ubuf + sk_offset, + *buf_size - sk_offset); + if (ret) + break; + ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index df49931ffe92292bbc5c1aa400ad4c519151ff42..4b217e9a581ce63359c66a529830ad31a930a18d 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * offset is supposed to be a tree block which * must be aligned to nodesize. */ - if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = (struct btrfs_extent_data_ref *)(&iref->offset); @@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * must be aligned to nodesize. */ if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; default: pr_cont("(extent %llu has INVALID ref type %d)\n", diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index dbc685ca017f2fe899da26ceaf637ad47eab7940..5dec52bd2897b8e91c6597c0528e380e9d99e46c 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -297,6 +297,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, exist_re = insert_root_entry(&exist->roots, re); if (exist_re) kfree(re); + } else { + kfree(re); } kfree(be); return exist; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 6a5b16a119eda27c80a746b0c50948ba21c889fc..40f5b4dcb927665b93c216f7c810638bc0e63502 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -432,6 +432,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, char *compress_type; bool compress_force = false; enum btrfs_compression_type saved_compress_type; + int saved_compress_level; bool saved_compress_force; int no_compress = 0; @@ -514,6 +515,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, info->compress_type : BTRFS_COMPRESS_NONE; saved_compress_force = btrfs_test_opt(info, FORCE_COMPRESS); + saved_compress_level = info->compress_level; if (token == Opt_compress || token == Opt_compress_force || strncmp(args[0].from, "zlib", 4) == 0) { @@ -537,6 +539,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } else if (strncmp(args[0].from, "lzo", 3) == 0) { compress_type = "lzo"; info->compress_type = BTRFS_COMPRESS_LZO; + info->compress_level = 0; btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATASUM); @@ -552,6 +555,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, no_compress = 0; } else if (strncmp(args[0].from, "no", 2) == 0) { compress_type = "no"; + info->compress_level = 0; + info->compress_type = 0; btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); compress_force = false; @@ -572,11 +577,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, */ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); } - if ((btrfs_test_opt(info, COMPRESS) && - (info->compress_type != saved_compress_type || - compress_force != saved_compress_force)) || - (!btrfs_test_opt(info, COMPRESS) && - no_compress == 1)) { + if (no_compress == 1) { + btrfs_info(info, "use no compression"); + } else if ((info->compress_type != saved_compress_type) || + (compress_force != saved_compress_force) || + (info->compress_level != saved_compress_level)) { btrfs_info(info, "%s %s compression, level %d", (compress_force) ? "force" : "use", compress_type, info->compress_level); @@ -996,8 +1001,8 @@ static int btrfs_parse_subvol_options(const char *options, char **subvol_name, return error; } -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, - u64 subvol_objectid) +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *fs_root; @@ -1278,6 +1283,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); const char *compress_type; + const char *subvol_name; if (btrfs_test_opt(info, DEGRADED)) seq_puts(seq, ",degraded"); @@ -1362,8 +1368,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",ref_verify"); seq_printf(seq, ",subvolid=%llu", BTRFS_I(d_inode(dentry))->root->root_key.objectid); - seq_puts(seq, ",subvol="); - seq_dentry(seq, dentry, " \t\n\\"); + subvol_name = btrfs_get_subvol_name_from_objectid(info, + BTRFS_I(d_inode(dentry))->root->root_key.objectid); + if (!IS_ERR(subvol_name)) { + seq_puts(seq, ",subvol="); + seq_escape(seq, subvol_name, " \t\n\\"); + kfree(subvol_name); + } return 0; } @@ -1408,8 +1419,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, goto out; } } - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), - subvol_objectid); + subvol_name = btrfs_get_subvol_name_from_objectid( + btrfs_sb(mnt->mnt_sb), subvol_objectid); if (IS_ERR(subvol_name)) { root = ERR_CAST(subvol_name); subvol_name = NULL; @@ -2314,9 +2325,7 @@ static int btrfs_unfreeze(struct super_block *sb) static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); - struct btrfs_fs_devices *cur_devices; struct btrfs_device *dev, *first_dev = NULL; - struct list_head *head; /* * Lightweight locking of the devices. We should not need @@ -2326,18 +2335,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) * least until until the rcu_read_unlock. */ rcu_read_lock(); - cur_devices = fs_info->fs_devices; - while (cur_devices) { - head = &cur_devices->devices; - list_for_each_entry_rcu(dev, head, dev_list) { - if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) - continue; - if (!dev->name) - continue; - if (!first_dev || dev->devid < first_dev->devid) - first_dev = dev; - } - cur_devices = cur_devices->seed; + list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) + continue; + if (!dev->name) + continue; + if (!first_dev || dev->devid < first_dev->devid) + first_dev = dev; } if (first_dev) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index aefb0169d46d75145e4c257adf8da11cc03e4b70..afec808a763b1e344dd9a4c3a7c286e10548f4e4 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" @@ -766,7 +767,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, { int error = 0; struct btrfs_device *dev; + unsigned int nofs_flag; + nofs_flag = memalloc_nofs_save(); list_for_each_entry(dev, &fs_devices->devices, dev_list) { struct hd_struct *disk; struct kobject *disk_kobj; @@ -785,6 +788,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, if (error) break; } + memalloc_nofs_restore(nofs_flag); return error; } diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 2eec1dd3803af1503e99d6c6c99c575e66a9bc33..82d874b1043837786351e4c3fb3f29c3b547fd64 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -38,7 +38,13 @@ static struct file_system_type test_type = { struct inode *btrfs_new_test_inode(void) { - return new_inode(test_mnt->mnt_sb); + struct inode *inode; + + inode = new_inode(test_mnt->mnt_sb); + if (inode) + inode_init_owner(inode, NULL, S_IFREG); + + return inode; } static int btrfs_init_test_fs(void) diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 64043f02882067b5ab94695cb0267dacbd837c65..648633aae968c7dd9a1d43bc7ba2da8d893093be 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -232,6 +232,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) return ret; } + inode->i_mode = S_IFREG; BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; BTRFS_I(inode)->location.offset = 0; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 928ac2c4899e752f2868d3e0e396780a406bb42e..3e903e6a33870293d850660f565346b10b0c0e39 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3422,11 +3422,13 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_free_path(path); out_unlock: mutex_unlock(&dir->log_mutex); - if (ret == -ENOSPC) { + if (err == -ENOSPC) { btrfs_set_log_full_commit(root->fs_info, trans); - ret = 0; - } else if (ret < 0) - btrfs_abort_transaction(trans, ret); + err = 0; + } else if (err < 0 && err != -ENOENT) { + /* ENOENT can be returned if the entry hasn't been fsynced yet */ + btrfs_abort_transaction(trans, err); + } btrfs_end_log_trans(root); @@ -3988,11 +3990,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, fs_info->csum_root, ds + cs, ds + cs + cl - 1, &ordered_sums, 0); - if (ret) { - btrfs_release_path(dst_path); - kfree(ins_data); - return ret; - } + if (ret) + break; } } } @@ -4005,7 +4004,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ - ret = 0; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e0ba1e9ddcdf045171a19889b39ac724fab9349a..815b655b8f10a0480500ea95fc5ee13527c95045 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -155,7 +156,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * * global::fs_devs - add, remove, updates to the global list * - * does not protect: manipulation of the fs_devices::devices list! + * does not protect: manipulation of the fs_devices::devices list in general + * but in mount context it could be used to exclude list modifications by eg. + * scan ioctl * * btrfs_device::name - renames (write side), read is RCU * @@ -168,6 +171,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * + * Is not required at mount and close times, because our device list is + * protected by the uuid_mutex at that point. + * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from @@ -656,6 +662,11 @@ static void btrfs_free_stale_devices(const char *path, } } +/* + * This is only used on mount, and we are protected from competing things + * messing with our fs_devices by the uuid_mutex, thus we do not need the + * fs_devices->device_list_mutex here. + */ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) @@ -1153,8 +1164,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int ret; lockdep_assert_held(&uuid_mutex); + /* + * The device_list_mutex cannot be taken here in case opening the + * underlying device takes further locks like bd_mutex. + * + * We also don't need the lock here as this is called during mount and + * exclusion is provided by uuid_mutex + */ - mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; @@ -1162,7 +1179,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } - mutex_unlock(&fs_devices->device_list_mutex); return ret; } @@ -4157,6 +4173,7 @@ static int btrfs_uuid_scan_kthread(void *data) goto skip; } update_tree: + btrfs_release_path(path); if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, @@ -4181,6 +4198,7 @@ static int btrfs_uuid_scan_kthread(void *data) } skip: + btrfs_release_path(path); if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; @@ -4188,7 +4206,6 @@ static int btrfs_uuid_scan_kthread(void *data) break; } - btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { @@ -6276,8 +6293,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; + unsigned int nofs_flag; + /* + * We call this under the chunk_mutex, so we want to use NOFS for this + * allocation, however we don't want to change btrfs_alloc_device() to + * always do NOFS because we use it in a lot of other GFP_KERNEL safe + * places. + */ + nofs_flag = memalloc_nofs_save(); device = btrfs_alloc_device(NULL, &devid, dev_uuid); + memalloc_nofs_restore(nofs_flag); if (IS_ERR(device)) return device; diff --git a/fs/buffer.c b/fs/buffer.c index 5205150b904bbbe0569bbbd9e0ad9cc38615e7ba..4f2c847c5c2d0fcf2cb1004ac8f22f236f415026 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3232,6 +3232,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { + /* + * The bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(bh)) { + unlock_buffer(bh); + return -EIO; + } + get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(REQ_OP_WRITE, op_flags, bh); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index a2d4eed27f804c0c51e4ea9b718387946e92fa6c..c0dbf8b7762b4e91cacd69135e1bb3f0575497af 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2015,12 +2015,24 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, if (mutex_trylock(&session->s_mutex) == 0) { dout("inverting session/ino locks on %p\n", session); + session = ceph_get_mds_session(session); spin_unlock(&ci->i_ceph_lock); if (took_snap_rwsem) { up_read(&mdsc->snap_rwsem); took_snap_rwsem = 0; } - mutex_lock(&session->s_mutex); + if (session) { + mutex_lock(&session->s_mutex); + ceph_put_mds_session(session); + } else { + /* + * Because we take the reference while + * holding the i_ceph_lock, it should + * never be NULL. Throw a warning if it + * ever is. + */ + WARN_ON_ONCE(true); + } goto retry; } } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index faca455bd3c696265121f4e4fe90433bbf66ba76..4ce2752c8b71c5edbc0db06e1477921ee9eb7d93 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1819,6 +1819,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 1e438e0faf77ef976cbdf6cfb44fa3ef3314020f..3c24fb77ef325a81aeabacd77adbf8b6c9109165 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page, info_caps = le32_to_cpu(info->cap.caps); /* prealloc new cap struct */ - if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) + if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) { new_cap = ceph_get_cap(mdsc, caps_reservation); + if (!new_cap) + return -ENOMEM; + } /* * prealloc xattr data, if it looks like we'll need it. only diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a2e903203bf9fbc6904fa93dd85c126cfae29d36..5f3707a90e7f70bef4f1e9e2f2a20fc44becdfde 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3615,6 +3615,9 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); ceph_check_delayed_caps(mdsc); + if (mdsc->stopping) + return; + mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + @@ -3682,7 +3685,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) return -ENOMEM; } - fsc->mdsc = mdsc; init_completion(&mdsc->safe_umount_waiters); init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); @@ -3723,6 +3725,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) strscpy(mdsc->nodename, utsname()->nodename, sizeof(mdsc->nodename)); + + fsc->mdsc = mdsc; return 0; } @@ -3949,7 +3953,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) { dout("stop\n"); - cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + /* + * Make sure the delayed work stopped before releasing + * the resources. + * + * Because the cancel_delayed_work_sync() will only + * guarantee that the work finishes executing. But the + * delayed work will re-arm itself again after that. + */ + flush_delayed_work(&mdsc->delayed_work); + if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 71c2dd0c7f038ec0b70f5dbe1d3e3d8654d42a32..2c632793c88c53b0c1ec8603110a3b72060b7eec 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -259,8 +259,9 @@ struct smb_version_operations { int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); - void (*downgrade_oplock)(struct TCP_Server_Info *, - struct cifsInodeInfo *, bool); + void (*downgrade_oplock)(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); @@ -1160,6 +1161,8 @@ struct cifsFileInfo { unsigned int f_flags; bool invalidHandle:1; /* file closed via session abend */ bool oplock_break_cancelled:1; + unsigned int oplock_epoch; /* epoch from the lease break */ + __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ @@ -1300,7 +1303,7 @@ struct cifsInodeInfo { unsigned int epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ -#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ +#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 128cbd69911b46e9b2095b8776bc64e251618ad5..5cb15649adb07d8f3404b2c35fdd92dac038c649 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3804,7 +3804,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; __SetPageLocked(page); - if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { + rc = add_to_page_cache_locked(page, mapping, page->index, gfp); + if (rc) { __ClearPageLocked(page); break; } @@ -3820,6 +3821,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc; + int err = 0; struct list_head tmplist; struct cifsFileInfo *open_file = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); @@ -3860,7 +3862,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * the order of declining indexes. When we put the pages in * the rdata->pages, then we want them in increasing order. */ - while (!list_empty(page_list)) { + while (!list_empty(page_list) && !err) { unsigned int i, nr_pages, bytes, rsize; loff_t offset; struct page *page, *tpage; @@ -3883,9 +3885,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, return 0; } - rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, + nr_pages = 0; + err = readpages_get_pages(mapping, page_list, rsize, &tmplist, &nr_pages, &offset, &bytes); - if (rc) { + if (!nr_pages) { add_credits_and_wake_if(server, credits, 0); break; } @@ -4185,12 +4188,13 @@ void cifs_oplock_break(struct work_struct *work) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; + bool purge_cache = false; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); - server->ops->downgrade_oplock(server, cinode, - test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, + cfile->oplock_epoch, &purge_cache); if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && cifs_has_mand_locks(cinode)) { @@ -4205,18 +4209,21 @@ void cifs_oplock_break(struct work_struct *work) else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); - if (!CIFS_CACHE_READ(cinode)) { + if (!CIFS_CACHE_READ(cinode) || purge_cache) { rc = filemap_fdatawait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); cifs_zap_mapping(inode); } cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); + if (CIFS_CACHE_WRITE(cinode)) + goto oplock_break_ack; } rc = cifs_push_locks(cfile); if (rc) cifs_dbg(VFS, "Push locks rc = %d\n", rc); +oplock_break_ack: /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e45f8e321371c2253a871bdcf12f963edbfe4439..dd67f56ea61e56967cc7ad5316bfb21d931ced27 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -477,21 +477,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags); - /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (pSMB->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - - cifs_queue_oplock_break(netfile); + netfile->oplock_epoch = 0; + netfile->oplock_level = pSMB->OplockLevel; netfile->oplock_break_cancelled = false; + cifs_queue_oplock_break(netfile); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index c7f0c8566442592a9aeece43eaaf0fa312e7f84e..0b7f92451284879794105226f49d6f315bba4d94 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -381,12 +381,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) static void cifs_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - cifs_set_oplock_level(cinode, OPLOCK_READ); - else - cifs_set_oplock_level(cinode, 0); + cifs_set_oplock_level(cinode, oplock); } static bool diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 14265b4bbcc00a22a640eb5912e60a105a17639d..7d875a47d022630961fcc815bfecbfb8b8edee2b 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work) kfree(lw); } +static void +smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key, + __le32 new_lease_state) +{ + struct smb2_lease_break_work *lw; + + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); + if (!lw) { + cifs_put_tlink(tlink); + return; + } + + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); + lw->tlink = tlink; + lw->lease_state = new_lease_state; + memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE); + queue_work(cifsiod_wq, &lw->lease_break); +} + static bool -smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, - struct smb2_lease_break_work *lw) +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { - bool found; __u8 lease_state; struct list_head *tmp; struct cifsFileInfo *cfile; - struct cifs_pending_open *open; struct cifsInodeInfo *cinode; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); @@ -534,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, cifs_dbg(FYI, "found in the open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state); if (ack_req) cfile->oplock_break_cancelled = false; @@ -543,40 +559,38 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); - /* - * Set or clear flags depending on the lease state being READ. - * HANDLE caching flag should be added when the client starts - * to defer closing remote file handles with HANDLE leases. - */ - if (lease_state & SMB2_LEASE_READ_CACHING_HE) - set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); + cfile->oplock_level = lease_state; cifs_queue_oplock_break(cfile); - kfree(lw); return true; } - found = false; + return false; +} + +static struct cifs_pending_open * +smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, + struct smb2_lease_break *rsp) +{ + __u8 lease_state = le32_to_cpu(rsp->NewLeaseState); + int ack_req = le32_to_cpu(rsp->Flags & + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); + struct cifs_pending_open *open; + struct cifs_pending_open *found = NULL; + list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { - found = true; - memcpy(lw->lease_key, open->lease_key, - SMB2_LEASE_KEY_SIZE); - lw->tlink = cifs_get_tlink(open->tlink); - queue_work(cifsiod_wq, &lw->lease_break); + found = open; } cifs_dbg(FYI, "found in the pending open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state); open->oplock = lease_state; } @@ -592,14 +606,7 @@ smb2_is_valid_lease_break(char *buffer) struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; - struct smb2_lease_break_work *lw; - - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); - if (!lw) - return false; - - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); - lw->lease_state = rsp->NewLeaseState; + struct cifs_pending_open *open; cifs_dbg(FYI, "Checking for lease break\n"); @@ -617,11 +624,27 @@ smb2_is_valid_lease_break(char *buffer) spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); - if (smb2_tcon_has_lease(tcon, rsp, lw)) { + if (smb2_tcon_has_lease(tcon, rsp)) { spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } + open = smb2_tcon_find_pending_open_lease(tcon, + rsp); + if (open) { + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; + struct tcon_link *tlink; + + tlink = cifs_get_tlink(open->tlink); + memcpy(lease_key, open->lease_key, + SMB2_LEASE_KEY_SIZE); + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); + smb2_queue_pending_open_break(tlink, + lease_key, + rsp->NewLeaseState); + return true; + } spin_unlock(&tcon->open_file_lock); if (tcon->crfid.is_valid && @@ -639,7 +662,6 @@ smb2_is_valid_lease_break(char *buffer) } } spin_unlock(&cifs_tcp_ses_lock); - kfree(lw); cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); return false; } @@ -701,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); - /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (rsp->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = 0; + cfile->oplock_level = rsp->OplockLevel; + spin_unlock(&cfile->file_info_lock); cifs_queue_oplock_break(cfile); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 2a523139a05fb87ceb70bfecfe94acb16c3adffb..3d63c76ed09895533e30e6530791a549ca97c904 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -950,7 +950,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, return rc; } - len = sizeof(ea) + ea_name_len + ea_value_len + 1; + len = sizeof(*ea) + ea_name_len + ea_value_len + 1; ea = kzalloc(len, GFP_KERNEL); if (ea == NULL) { SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); @@ -2358,22 +2358,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, static void smb2_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, - 0, NULL); - else - server->ops->set_oplock_level(cinode, 0, 0, NULL); + server->ops->set_oplock_level(cinode, oplock, 0, NULL); } static void -smb21_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) +smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); + +static void +smb3_downgrade_oplock(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - server->ops->set_oplock_level(cinode, - set_level2 ? SMB2_LEASE_READ_CACHING_HE : - 0, 0, NULL); + unsigned int old_state = cinode->oplock; + unsigned int old_epoch = cinode->epoch; + unsigned int new_state; + + if (epoch > old_epoch) { + smb21_set_oplock_level(cinode, oplock, 0, NULL); + cinode->epoch = epoch; + } + + new_state = cinode->oplock; + *purge_cache = false; + + if ((old_state & CIFS_CACHE_READ_FLG) != 0 && + (new_state & CIFS_CACHE_READ_FLG) == 0) + *purge_cache = true; + else if (old_state == new_state && (epoch - old_epoch > 1)) + *purge_cache = true; } static void @@ -3449,7 +3465,7 @@ struct smb_version_operations smb21_operations = { .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, @@ -3546,7 +3562,7 @@ struct smb_version_operations smb30_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, @@ -3651,7 +3667,7 @@ struct smb_version_operations smb311_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index e2d2b749c8f383e598fa0c2de0f371603873c376..379ac8caa29a6fc04850f1aa89c63f70ed227737 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1132,6 +1132,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); + if (rc == -ENOKEY) + cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); spnego_key = NULL; goto out; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 308c682fa4d3b57db358b041b390fad66390370c..44501f8cbd75e0d25cfe4c06bb4d19b045b40bdb 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -1209,7 +1209,7 @@ struct smb2_oplock_break { struct smb2_lease_break { struct smb2_sync_hdr sync_hdr; __le16 StructureSize; /* Must be 44 */ - __le16 Reserved; + __le16 Epoch; __le32 Flags; __u8 LeaseKey[16]; __le32 CurrentLeaseState; diff --git a/fs/dcache.c b/fs/dcache.c index bc5755e3c3f355e57a6a4ecd3ad2d19e40f5b1a7..98363cb3d4b193228b62a71774a6c6895dca5c7e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -851,17 +851,19 @@ struct dentry *dget_parent(struct dentry *dentry) { int gotref; struct dentry *ret; + unsigned seq; /* * Do optimistic parent lookup without any * locking. */ rcu_read_lock(); + seq = raw_seqcount_begin(&dentry->d_seq); ret = READ_ONCE(dentry->d_parent); gotref = lockref_get_not_zero(&ret->d_lockref); rcu_read_unlock(); if (likely(gotref)) { - if (likely(ret == READ_ONCE(dentry->d_parent))) + if (!read_seqcount_retry(&dentry->d_seq, seq)) return ret; dput(ret); } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index f1261fa0af8a14357ea4bc21c91de4866ab5ac0e..244b87e4dfe7f4d826dc512c5d89deec2d313908 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -633,6 +633,9 @@ static int new_lockspace(const char *name, const char *cluster, wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); + /* let kobject handle freeing of ls if there's an error */ + do_unreg = 1; + ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); @@ -640,9 +643,6 @@ static int new_lockspace(const char *name, const char *cluster, goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); - /* let kobject handle freeing of ls if there's an error */ - do_unreg = 1; - /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4760c2b7ea84eb1c15cac6c7849ac844d7ff2478..9a81b5621c6cac02a2e4ddeb39a500ab07fab131 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -223,8 +223,7 @@ struct eventpoll { struct file *file; /* used to optimize loop detection check */ - int visited; - struct list_head visited_list_link; + u64 gen; #ifdef CONFIG_NET_RX_BUSY_POLL /* used to track busy poll napi_id */ @@ -274,6 +273,8 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +static u64 loop_check_gen = 0; + /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; @@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; -/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ -static LIST_HEAD(visited_list); - /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. @@ -1379,7 +1377,7 @@ static int reverse_path_check(void) static int ep_create_wakeup_source(struct epitem *epi) { - const char *name; + struct name_snapshot n; struct wakeup_source *ws; if (!epi->ep->ws) { @@ -1388,8 +1386,9 @@ static int ep_create_wakeup_source(struct epitem *epi) return -ENOMEM; } - name = epi->ffd.file->f_path.dentry->d_name.name; - ws = wakeup_source_register(NULL, name); + take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); + ws = wakeup_source_register(NULL, n.name); + release_dentry_name_snapshot(&n); if (!ws) return -ENOMEM; @@ -1451,6 +1450,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1473,22 +1488,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irq(&ep->wq.lock); @@ -1517,6 +1516,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1524,9 +1525,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, rb_erase_cached(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist @@ -1870,13 +1868,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); - ep->visited = 1; - list_add(&ep->visited_list_link, &visited_list); + ep->gen = loop_check_gen; for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->visited) + if (ep_tovisit->gen == loop_check_gen) continue; error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, epi->ffd.file, @@ -1892,9 +1889,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * not already there, and calling reverse_path_check() * during ep_insert(). */ - if (list_empty(&epi->ffd.file->f_tfile_llink)) - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (list_empty(&epi->ffd.file->f_tfile_llink)) { + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); + } } } mutex_unlock(&ep->mtx); @@ -1915,18 +1914,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { - int ret; - struct eventpoll *ep_cur, *ep_next; - - ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, file, ep, current); - /* clear visited list */ - list_for_each_entry_safe(ep_cur, ep_next, &visited_list, - visited_list_link) { - ep_cur->visited = 0; - list_del(&ep_cur->visited_list_link); - } - return ret; } static void clear_tfile_check_list(void) @@ -1938,6 +1927,7 @@ static void clear_tfile_check_list(void) file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); + fput(file); } INIT_LIST_HEAD(&tfile_check_list); } @@ -2087,19 +2077,20 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_lock_nested(&ep->mtx, 0); if (op == EPOLL_CTL_ADD) { if (!list_empty(&f.file->f_ep_links) || + ep->gen == loop_check_gen || is_file_epoll(tf.file)) { full_check = 1; mutex_unlock(&ep->mtx); mutex_lock(&epmutex); if (is_file_epoll(tf.file)) { error = -ELOOP; - if (ep_loop_check(ep, tf.file) != 0) { - clear_tfile_check_list(); + if (ep_loop_check(ep, tf.file) != 0) goto error_tgt_fput; - } - } else + } else { + get_file(tf.file); list_add(&tf.file->f_tfile_llink, &tfile_check_list); + } mutex_lock_nested(&ep->mtx, 0); if (is_file_epoll(tf.file)) { tep = tf.file->private_data; @@ -2123,8 +2114,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, error = ep_insert(ep, &epds, tf.file, fd, full_check); } else error = -EEXIST; - if (full_check) - clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) @@ -2147,8 +2136,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: - if (full_check) + if (full_check) { + clear_tfile_check_list(); + loop_check_gen++; mutex_unlock(&epmutex); + } fdput(tf); error_fput: diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 28b2609f25c1c9502a813c49eacf71d7a7c24256..d39d90c1b6709e23fc41355d332b6604332560cf 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); vm_fault_t ret; + bool write = (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); - if (vmf->flags & FAULT_FLAG_WRITE) { + if (write) { sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); } @@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); up_read(&ei->dax_sem); - if (vmf->flags & FAULT_FLAG_WRITE) + if (write) sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 5c3d7b7e49755ccbe22f65b607e8197d854df091..d8a03b1afbc332dd84c3f9f59d2c8621be8b54f5 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir) if (dir) le16_add_cpu(&desc->bg_used_dirs_count, -1); spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); + percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter); if (dir) percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); mark_buffer_dirty(bh); @@ -531,7 +532,7 @@ struct inode *ext2_new_inode(struct inode *dir, umode_t mode, goto fail; } - percpu_counter_add(&sbi->s_freeinodes_counter, -1); + percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index d203cc935ff833ef023cfc32035de7fe7ae7cf43..f22a89cdb40729789e7b3205093b807c33c25594 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, unsigned int count) { - struct ext4_system_zone *new_entry = NULL, *entry; + struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else { - if (start_blk + count > (entry->start_blk + - entry->count)) - entry->count = (start_blk + count - - entry->start_blk); - new_node = *n; - new_entry = rb_entry(new_node, struct ext4_system_zone, - node); - break; - } + else /* Unexpected overlap of system zones. */ + return -EFSCORRUPTED; } - if (!new_entry) { - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_node = &new_entry->node; - - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &system_blks->root); - } + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_node = &new_entry->node; + + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); @@ -260,14 +250,6 @@ int ext4_setup_system_zone(struct super_block *sb) int flex_size = ext4_flex_bg_size(sbi); int ret; - if (!test_opt(sb, BLOCK_VALIDITY)) { - if (sbi->system_blks) - ext4_release_system_zone(sb); - return 0; - } - if (sbi->system_blks) - return 0; - system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 976b9c3ae9d794cbda45c0ca641c0dab52e310ff..cee152522c28acd8acb0a213ee545018cc0ff6f0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3886,6 +3886,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; + loff_t offset = iocb->ki_pos; + loff_t size = i_size_read(inode); + + if (offset >= size) + return 0; /* * Shared inode_lock is enough for us - it protects against concurrent @@ -5395,7 +5400,7 @@ static int ext4_do_update_inode(handle_t *handle, raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); - if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { + if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 8dd54a8a0361029eb552a57b210a591d0c112071..054cfdd007d6997051f24c6d3091656f65c6fe04 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, BUG_ON(buddy == NULL); k = mb_find_next_zero_bit(buddy, max, 0); - BUG_ON(k >= max); - + if (k >= max) { + ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, + "%d free clusters of order %d. But found 0", + grp->bb_counters[i], i); + ext4_mark_group_bitmap_corrupted(ac->ac_sb, + e4b->bd_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); + break; + } ac->ac_found++; ac->ac_b_ex.fe_len = 1 << i; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 87cd2268dcd51018c900278fb3a68f4f575b27ef..ef6635131294a142f1b45797814751bf6caf25cc 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1468,8 +1468,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, lblk, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, + buf_size, lblk, offset)) return -1; *res_dir = de; return 1; @@ -1939,7 +1939,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); - /* Split the existing block in the middle, size-wise */ + /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { @@ -1949,8 +1949,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, size += map[i].size; move++; } - /* map index at which we will split */ - split = count - move; + /* + * map index at which we will split + * + * If the sum of active entries didn't exceed half the block size, just + * split it in half by count; each resulting block will have at least + * half the space free. + */ + if (i > 0) + split = count - move; + else + split = count/2; + hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", @@ -2579,7 +2589,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, lblk, i)) + entry_buf, buf_size, lblk, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index eed9984acd7ad6c7279e5d2b9a3c9d5b88cbd724..9b9c539e8897b4da96e610b80c29f93cd6f42c4e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); -static void ext4_mark_recovery_complete(struct super_block *sb, +static int ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); -static void ext4_clear_journal_err(struct super_block *sb, - struct ext4_super_block *es); +static int ext4_clear_journal_err(struct super_block *sb, + struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -4629,11 +4629,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_set_resv_clusters(sb); - err = ext4_setup_system_zone(sb); - if (err) { - ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)", err); - goto failed_mount4a; + if (test_opt(sb, BLOCK_VALIDITY)) { + err = ext4_setup_system_zone(sb); + if (err) { + ext4_msg(sb, KERN_ERR, "failed to initialize system " + "zone (%d)", err); + goto failed_mount4a; + } } ext4_ext_init(sb); @@ -4701,7 +4703,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); - ext4_mark_recovery_complete(sb, es); + err = ext4_mark_recovery_complete(sb, es); + if (err) + goto failed_mount8; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@ -4744,10 +4748,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); -#endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -4889,7 +4891,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, struct inode *journal_inode; journal_t *journal; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; journal_inode = ext4_get_journal_inode(sb, journal_inum); if (!journal_inode) @@ -4919,7 +4922,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, struct ext4_super_block *es; struct block_device *bdev; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) @@ -5010,8 +5014,10 @@ static int ext4_load_journal(struct super_block *sb, dev_t journal_dev; int err = 0; int really_read_only; + int journal_dev_ro; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return -EFSCORRUPTED; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5021,7 +5027,31 @@ static int ext4_load_journal(struct super_block *sb, } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); - really_read_only = bdev_read_only(sb->s_bdev); + if (journal_inum && journal_dev) { + ext4_msg(sb, KERN_ERR, + "filesystem has both journal inode and journal device!"); + return -EINVAL; + } + + if (journal_inum) { + journal = ext4_get_journal(sb, journal_inum); + if (!journal) + return -EINVAL; + } else { + journal = ext4_get_dev_journal(sb, journal_dev); + if (!journal) + return -EINVAL; + } + + journal_dev_ro = bdev_read_only(journal->j_dev); + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; + + if (journal_dev_ro && !sb_rdonly(sb)) { + ext4_msg(sb, KERN_ERR, + "journal device read-only, try mounting with '-o ro'"); + err = -EROFS; + goto err_out; + } /* * Are we loading a blank journal or performing recovery after a @@ -5036,27 +5066,14 @@ static int ext4_load_journal(struct super_block *sb, ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed " "(try mounting with noload)"); - return -EROFS; + err = -EROFS; + goto err_out; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } - if (journal_inum && journal_dev) { - ext4_msg(sb, KERN_ERR, "filesystem has both journal " - "and inode journals!"); - return -EINVAL; - } - - if (journal_inum) { - if (!(journal = ext4_get_journal(sb, journal_inum))) - return -EINVAL; - } else { - if (!(journal = ext4_get_dev_journal(sb, journal_dev))) - return -EINVAL; - } - if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); @@ -5076,12 +5093,16 @@ static int ext4_load_journal(struct super_block *sb, if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); - jbd2_journal_destroy(journal); - return err; + goto err_out; } EXT4_SB(sb)->s_journal = journal; - ext4_clear_journal_err(sb, es); + err = ext4_clear_journal_err(sb, es); + if (err) { + EXT4_SB(sb)->s_journal = NULL; + jbd2_journal_destroy(journal); + return err; + } if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5092,6 +5113,10 @@ static int ext4_load_journal(struct super_block *sb, } return 0; + +err_out: + jbd2_journal_destroy(journal); + return err; } static int ext4_commit_super(struct super_block *sb, int sync) @@ -5103,13 +5128,6 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!sbh || block_device_ejected(sb)) return error; - /* - * The superblock bh should be mapped, but it might not be if the - * device was hot-removed. Not much we can do but fail the I/O. - */ - if (!buffer_mapped(sbh)) - return error; - /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock @@ -5177,26 +5195,32 @@ static int ext4_commit_super(struct super_block *sb, int sync) * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ -static void ext4_mark_recovery_complete(struct super_block *sb, - struct ext4_super_block *es) +static int ext4_mark_recovery_complete(struct super_block *sb, + struct ext4_super_block *es) { + int err; journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { - BUG_ON(journal != NULL); - return; + if (journal != NULL) { + ext4_error(sb, "Journal got removed while the fs was " + "mounted!"); + return -EFSCORRUPTED; + } + return 0; } jbd2_journal_lock_updates(journal); - if (jbd2_journal_flush(journal) < 0) + err = jbd2_journal_flush(journal); + if (err < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } - out: jbd2_journal_unlock_updates(journal); + return err; } /* @@ -5204,14 +5228,17 @@ static void ext4_mark_recovery_complete(struct super_block *sb, * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext4_clear_journal_err(struct super_block *sb, +static int ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; - BUG_ON(!ext4_has_feature_journal(sb)); + if (!ext4_has_feature_journal(sb)) { + ext4_error(sb, "Journal got removed while the fs was mounted!"); + return -EFSCORRUPTED; + } journal = EXT4_SB(sb)->s_journal; @@ -5236,6 +5263,7 @@ static void ext4_clear_journal_err(struct super_block *sb, jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } + return 0; } /* @@ -5378,7 +5406,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long old_sb_flags; + unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; @@ -5421,6 +5449,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from *flags to s_flags + */ + vfs_flags = SB_LAZYTIME | SB_I_VERSION; + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags); + if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -5474,9 +5510,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } - if (*flags & SB_LAZYTIME) - sb->s_flags |= SB_LAZYTIME; - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; @@ -5506,8 +5539,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); - if (sbi->s_journal) + if (sbi->s_journal) { + /* + * We let remount-ro finish even if marking fs + * as clean failed... + */ ext4_mark_recovery_complete(sb, es); + } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); } else { @@ -5555,8 +5593,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) * been changed by e2fsck since we originally mounted * the partition.) */ - if (sbi->s_journal) - ext4_clear_journal_err(sb, es); + if (sbi->s_journal) { + err = ext4_clear_journal_err(sb, es); + if (err) + goto restore_opts; + } sbi->s_mount_state = le16_to_cpu(es->s_state); err = ext4_setup_super(sb, es, 0); @@ -5586,7 +5627,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - ext4_setup_system_zone(sb); + /* + * Handle creation of system zone data early because it can fail. + * Releasing of existing data is done when we are sure remount will + * succeed. + */ + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + } + if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); if (err) @@ -5607,8 +5658,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); + + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from s_flags to *flags + */ + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags); - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; @@ -5622,6 +5681,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index cd3be4108afe2af0129e56e5ee2d9561d8cd4d05..06bad3e5309a7dd683f9278aa32a039a7b1f3a4a 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3571,6 +3571,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, unsigned long align = offset | iov_iter_alignment(iter); struct block_device *bdev = inode->i_sb->s_bdev; + if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode)) + return 1; + if (align & blocksize_mask) { if (bdev) blkbits = blksize_bits(bdev_logical_block_size(bdev)); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index dbe2a3e6c71b0d614e9f43d53ef034411a8fc109..76f5930e6c6ece28a77d888be1138908d0ccc10e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3294,7 +3294,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page); +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, @@ -3755,7 +3755,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_write_inline_data(struct inode *inode, struct page *page); -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); +int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, struct page **res_page); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 5e7aaa1caaa7d6330d74c34ad9219ef714e3bdb3..e85a005c7b7fc9185474ff72511d2f100e2bb4de 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -270,7 +270,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) return 0; } -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) +int f2fs_recover_inline_data(struct inode *inode, struct page *npage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; @@ -292,7 +292,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) ri && (ri->i_inline & F2FS_INLINE_DATA)) { process_inline: ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_wait_on_page_writeback(ipage, NODE, true, true); @@ -305,21 +306,25 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) set_page_dirty(ipage); f2fs_put_page(ipage, 1); - return true; + return 1; } if (f2fs_has_inline_data(inode)) { ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_truncate_inline_inode(inode, ipage, 0); clear_inode_flag(inode, FI_INLINE_DATA); f2fs_put_page(ipage, 1); } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { - if (f2fs_truncate_blocks(inode, 0, false)) - return false; + int ret; + + ret = f2fs_truncate_blocks(inode, 0, false); + if (ret) + return ret; goto process_inline; } - return false; + return 0; } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 64417236132026dd2caf749fe199104c91abba77..adf4eedc193c8cf1aa34147e11c444889900cf93 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2575,7 +2575,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) return nr - nr_shrink; } -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; @@ -2583,7 +2583,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) struct f2fs_inode *ri; ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); - f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); ri = F2FS_INODE(page); if (ri->i_inline & F2FS_INLINE_XATTR) { @@ -2602,6 +2603,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) update_inode: f2fs_update_inode(inode, ipage); f2fs_put_page(ipage, 1); + return 0; } int f2fs_recover_xattr_data(struct inode *inode, struct page *page) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 0f1b3c19e95da938c3a645a219c6d50762eb57e1..1ad3d1fd0c35d4c637ded383366c6af9f6837d7b 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -554,7 +554,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* step 1: recover xattr */ if (IS_INODE(page)) { - f2fs_recover_inline_xattr(inode, page); + err = f2fs_recover_inline_xattr(inode, page); + if (err) + goto out; } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) @@ -563,8 +565,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* step 2: recover inline data */ - if (f2fs_recover_inline_data(inode, page)) + err = f2fs_recover_inline_data(inode, page); + if (err) { + if (err == 1) + err = 0; goto out; + } /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 0f8499b7b86aa049b9b883f83f68fc4133b18da6..40544f8d99607c50baffa8042071133ef1618b4f 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1186,6 +1186,9 @@ static void f2fs_put_super(struct super_block *sb) int i; bool dropped; + /* unregister procfs/sysfs entries in advance to avoid race case */ + f2fs_unregister_sysfs(sbi); + f2fs_quota_off_umount(sb); /* prevent remaining shrinker jobs */ @@ -1251,8 +1254,6 @@ static void f2fs_put_super(struct super_block *sb) kvfree(sbi->ckpt); - f2fs_unregister_sysfs(sbi); - sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index b9f5194dbfd9efe84ec0cbd038d03e3efea66080..adbe8fc904e119b08c54d8aa5e078e351c35e4c6 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -832,7 +832,6 @@ static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || - page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index c63bee9adb6a8e175d8e2cfc78abe197264acf5e..20f08f4391c9d9cfb881af85e1135bd9dd020113 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -89,6 +89,8 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) memset(&tr, 0, sizeof(tr)); INIT_LIST_HEAD(&tr.tr_buf); INIT_LIST_HEAD(&tr.tr_databuf); + INIT_LIST_HEAD(&tr.tr_ail1_list); + INIT_LIST_HEAD(&tr.tr_ail2_list); tr.tr_revokes = atomic_read(&gl->gl_ail_count); if (!tr.tr_revokes) diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index d968b5c5df217bea373cb485747694c57cf35eb4..a52b8b0dceeb98f1d819d39e90d6b0f69fc0bc41 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_trans_begin(sdp, blocks, 0); if (error) - goto fail_gunlock2; + goto fail_free_inode; if (blocks > 1) { ip->i_eattr = ip->i_no_addr + 1; @@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_gunlock2; + goto fail_free_inode; BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags)); @@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail_gunlock2; glock_set_object(ip->i_iopen_gh.gh_gl, ip); - gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode); @@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, mark_inode_dirty(inode); d_instantiate(dentry, inode); + /* After instantiate, errors should result in evict which will destroy + * both inode and iopen glocks properly. */ if (file) { file->f_mode |= FMODE_CREATED; error = finish_open(file, dentry, gfs2_open_common); @@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); return error; fail_gunlock3: glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); - gfs2_glock_put(io_gl); fail_gunlock2: - if (io_gl) - clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { glock_clear_object(ip->i_gl, ip); diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 06752db213d218911b79012e5c8ff47ca1c1d85c..74c1fe9c4a04216fbaeddacc72b7d817734f3c36 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -806,8 +806,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) tr = sdp->sd_log_tr; if (tr) { sdp->sd_log_tr = NULL; - INIT_LIST_HEAD(&tr->tr_ail1_list); - INIT_LIST_HEAD(&tr->tr_ail2_list); tr->tr_first = sdp->sd_log_flush_head; if (unlikely (state == SFS_FROZEN)) gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 812b5d5978b27f52672101a139535522dc712aaf..9313f7904e3429425b8cc7aa0dbc7a8d2a6408d5 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -56,6 +56,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, sizeof(u64)); INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_buf); + INIT_LIST_HEAD(&tr->tr_ail1_list); + INIT_LIST_HEAD(&tr->tr_ail2_list); sb_start_intwrite(sdp->sd_vfs); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a15a22d209090995a6ddc6440279665fff8a6f57..8a50722bca29e648541511d4bb165a1925fdb083 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1370,8 +1370,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) int ret; /* Buffer got discarded which means block device got invalidated */ - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh)) { + unlock_buffer(bh); return -EIO; + } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 43693b67971051e9515e1347667e2a07abb3d9b8..8c305593fb51fce584e46feac0e4eec505f5164b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1915,6 +1915,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { + J_ASSERT_JH(jh, jh->b_transaction != NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); @@ -2006,6 +2009,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, { struct buffer_head *head; struct buffer_head *bh; + bool has_write_io_error = false; int ret = 0; J_ASSERT(PageLocked(page)); @@ -2030,11 +2034,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; + + /* + * If we free a metadata buffer which has been failed to + * write out, the jbd2 checkpoint procedure will not detect + * this failure and may lead to filesystem inconsistency + * after cleanup journal tail. + */ + if (buffer_write_io_error(bh)) { + pr_err("JBD2: Error while async write back metadata bh %llu.", + (unsigned long long)bh->b_blocknr); + has_write_io_error = true; + } } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: + if (has_write_io_error) + jbd2_journal_abort(journal, -EIO); + return ret; } @@ -2462,6 +2481,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + + /* + * b_transaction must be set, otherwise the new b_transaction won't + * be holding jh reference + */ + J_ASSERT_JH(jh, jh->b_transaction != NULL); + /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index f20cff1194bb665c4740d5af992af242105f0532..776493713153f97b2e12942726b55ca6bfa877a5 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) int ret; uint32_t now = JFFS2_NOW(); + mutex_lock(&f->sem); for (fd = f->dents ; fd; fd = fd->next) { - if (fd->ino) + if (fd->ino) { + mutex_unlock(&f->sem); return -ENOTEMPTY; + } } + mutex_unlock(&f->sem); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, f, now); diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 72e308c3e66b91fa9a915ebcb6c31f7f62c432bc..03fe8bac36cf4ed8fdde1347704e2d0a9b7ebc5a 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -155,6 +155,25 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) return 0; } +static bool minix_check_superblock(struct super_block *sb) +{ + struct minix_sb_info *sbi = minix_sb(sb); + + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + return false; + + /* + * s_max_size must not exceed the block mapping limitation. This check + * is only needed for V1 filesystems, since V2/V3 support an extra level + * of indirect blocks which places the limit well above U32_MAX. + */ + if (sbi->s_version == MINIX_V1 && + sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE) + return false; + + return true; +} + static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; @@ -190,7 +209,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; - sbi->s_max_size = ms->s_max_size; + s->s_maxbytes = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; @@ -221,7 +240,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; - sbi->s_max_size = m3s->s_max_size; + s->s_maxbytes = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; @@ -233,11 +252,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) } else goto out_no_fs; + if (!minix_check_superblock(s)) + goto out_illegal_sb; + /* * Allocate the buffer map to keep the superblock small. */ - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) - goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) @@ -471,6 +491,13 @@ static struct inode *V1_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); @@ -504,6 +531,13 @@ static struct inode *V2_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c index 043c3fdbc8e7ebe5b0d8cf8d078322c4936b2581..446148792f4115e6fc10eb7c6007f4393c0003b3 100644 --- a/fs/minix/itree_common.c +++ b/fs/minix/itree_common.c @@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode, int n = 0; int i; int parent = minix_new_block(inode); + int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { @@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode, break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + minix_free_block(inode, nr); + err = -ENOMEM; + break; + } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; @@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode, bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); - return -ENOSPC; + return err; } static inline int splice_branch(struct inode *inode, diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index 046cc96ee7adb57aefc7110bae557e7a648334bd..1fed906042aa84e5affde1cd46d3a7ca4ba68404 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, inode->i_sb->s_bdev); - } else if (block < 7) { + return 0; + } + if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) + return 0; + + if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index f7fc7eccccccde1c980be538be928d99db2c3b13..9d00f31a2d9d15b159a665d2b7bc6a8b03adb536 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); - } else if ((u64)block * (u64)sb->s_blocksize >= - minix_sb(sb)->s_max_size) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, sb->s_bdev); - } else if (block < DIRCOUNT) { + return 0; + } + if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) + return 0; + + if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; diff --git a/fs/minix/minix.h b/fs/minix/minix.h index df081e8afcc3cba9339e3303a07731d1150c4d0b..168d45d3de73ea3badb2660f27cd73568e5db569 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -32,7 +32,6 @@ struct minix_sb_info { unsigned long s_zmap_blocks; unsigned long s_firstdatazone; unsigned long s_log_zone_size; - unsigned long s_max_size; int s_dirsize; int s_namelen; struct buffer_head ** s_imap; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4a5d9ab02cb9053a17566bab36d728910a66dfb8..51fefea94b91ff47f23053f4c5a4c1f418f30736 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); do { + if (entry->label) + entry->label->len = NFS4_MAXLABELLEN; + status = xdr_decode(desc, entry, &stream); if (status != 0) { if (status == -EAGAIN) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 05cb68ca1ba1a29267fe107ac3cefd00df9dccc3..b2a2ff3f22a46f0267628d211da530220df117d7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3129,8 +3129,10 @@ static int _nfs4_do_setattr(struct inode *inode, /* Servers should only apply open mode checks for file size changes */ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; - if (!truncate) + if (!truncate) { + nfs4_inode_make_writeable(inode); goto zero_stateid; + } if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ @@ -5603,8 +5605,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, return ret; if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) return -ENOENT; - if (buflen < label.len) - return -ERANGE; return 0; } @@ -7008,7 +7008,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, err = nfs4_set_lock_state(state, fl); if (err != 0) return err; - err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + if (err != -NFS4ERR_DELAY) + break; + ssleep(1); + } while (err == -NFS4ERR_DELAY); return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c4cf0192d7bb8c0896fd0aed70b5a03c041ed074..0a5cae8f8aff995ebddb01ae1d24e2d5300757a1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4280,7 +4280,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, goto out_overflow; if (len < NFS4_MAXLABELLEN) { if (label) { - memcpy(label->label, p, len); + if (label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); + } label->len = len; label->pi = pi; label->lfs = lfs; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 5dae7c85d9b6ea680b0f3b24a286ca83fc5f7bd4..2c7d76b4c5e186701c63eef4d7cb376aa5da9517 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); /* - * nfs_page_group_lock - lock the head of the page group - * @req - request in group that is to be locked + * nfs_page_set_headlock - set the request PG_HEADLOCK + * @req: request that is to be locked * - * this lock must be held when traversing or modifying the page - * group list + * this lock must be held when modifying req->wb_head * * return 0 on success, < 0 on error */ int -nfs_page_group_lock(struct nfs_page *req) +nfs_page_set_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - - if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) + if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) return 0; - set_bit(PG_CONTENDED1, &head->wb_flags); + set_bit(PG_CONTENDED1, &req->wb_flags); smp_mb__after_atomic(); - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, + return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, TASK_UNINTERRUPTIBLE); } /* - * nfs_page_group_unlock - unlock the head of the page group - * @req - request in group that is to be unlocked + * nfs_page_clear_headlock - clear the request PG_HEADLOCK + * @req: request that is to be locked */ void -nfs_page_group_unlock(struct nfs_page *req) +nfs_page_clear_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - smp_mb__before_atomic(); - clear_bit(PG_HEADLOCK, &head->wb_flags); + clear_bit(PG_HEADLOCK, &req->wb_flags); smp_mb__after_atomic(); - if (!test_bit(PG_CONTENDED1, &head->wb_flags)) + if (!test_bit(PG_CONTENDED1, &req->wb_flags)) return; - wake_up_bit(&head->wb_flags, PG_HEADLOCK); + wake_up_bit(&req->wb_flags, PG_HEADLOCK); +} + +/* + * nfs_page_group_lock - lock the head of the page group + * @req: request in group that is to be locked + * + * this lock must be held when traversing or modifying the page + * group list + * + * return 0 on success, < 0 on error + */ +int +nfs_page_group_lock(struct nfs_page *req) +{ + int ret; + + ret = nfs_page_set_headlock(req); + if (ret || req->wb_head == req) + return ret; + return nfs_page_set_headlock(req->wb_head); +} + +/* + * nfs_page_group_unlock - unlock the head of the page group + * @req: request in group that is to be unlocked + */ +void +nfs_page_group_unlock(struct nfs_page *req) +{ + if (req != req->wb_head) + nfs_page_clear_headlock(req->wb_head); + nfs_page_clear_headlock(req); } /* diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 66f699e18755c9d44c275188757665c469f60f15..2b9e139a299756eecd12d68f71ad7f88ecdfb68c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1181,31 +1181,27 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid, return status; } +static bool +pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo, + enum pnfs_iomode iomode, + u32 seq) +{ + struct pnfs_layout_range recall_range = { + .length = NFS4_MAX_UINT64, + .iomode = iomode, + }; + return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, + &recall_range, seq) != -EBUSY; +} + /* Return true if layoutreturn is needed */ static bool pnfs_layout_need_return(struct pnfs_layout_hdr *lo) { - struct pnfs_layout_segment *s; - enum pnfs_iomode iomode; - u32 seq; - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) return false; - - seq = lo->plh_return_seq; - iomode = lo->plh_return_iomode; - - /* Defer layoutreturn until all recalled lsegs are done */ - list_for_each_entry(s, &lo->plh_segs, pls_list) { - if (seq && pnfs_seqid_is_newer(s->pls_seq, seq)) - continue; - if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode) - continue; - if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) - return false; - } - - return true; + return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode, + lo->plh_return_seq); } static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) @@ -2291,16 +2287,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) return ERR_PTR(-EAGAIN); } -static int -mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg, - struct list_head *tmp_list) -{ - if (!mark_lseg_invalid(lseg, tmp_list)) - return 0; - pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg); - return 1; -} - /** * pnfs_mark_matching_lsegs_return - Free or return matching layout segments * @lo: pointer to layout header @@ -2337,7 +2323,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); - if (mark_lseg_invalid_or_return(lseg, tmp_list)) + if (mark_lseg_invalid(lseg, tmp_list)) continue; remaining++; set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 63d20308a9bb7e8b0e1b71104249d749e35b4a4d..d419d89b91f7ca2ef4b4601be6b92b630da45000 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -416,22 +416,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, destroy_list = (subreq->wb_this_page == old_head) ? NULL : subreq->wb_this_page; + /* Note: lock subreq in order to change subreq->wb_head */ + nfs_page_set_headlock(subreq); WARN_ON_ONCE(old_head != subreq->wb_head); /* make sure old group is not used */ subreq->wb_this_page = subreq; + subreq->wb_head = subreq; clear_bit(PG_REMOVE, &subreq->wb_flags); /* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { /* Check if we raced with nfs_page_group_destroy() */ - if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { + nfs_page_clear_headlock(subreq); nfs_free_request(subreq); + } else + nfs_page_clear_headlock(subreq); continue; } + nfs_page_clear_headlock(subreq); - subreq->wb_head = subreq; nfs_release_request(old_head); if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c24306af9758f38e70f9e74764b15547e184c83d..655079ae1dd1ff30df61f24ae533cf90187c79b5 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -471,6 +471,8 @@ find_any_file(struct nfs4_file *f) { struct file *ret; + if (!f) + return NULL; spin_lock(&f->fi_lock); ret = __nfs4_get_fd(f, O_RDWR); if (!ret) { @@ -1207,6 +1209,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) nfs4_free_stateowner(sop); } +static bool +nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) +{ + return list_empty(&stp->st_perfile); +} + static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; @@ -1274,9 +1282,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); + if (!unhash_ol_stateid(stp)) + return false; list_del_init(&stp->st_locks); nfs4_unhash_stid(&stp->st_stid); - return unhash_ol_stateid(stp); + return true; } static void release_lock_stateid(struct nfs4_ol_stateid *stp) @@ -1341,13 +1351,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { - bool unhashed; - lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); - unhashed = unhash_ol_stateid(stp); + if (!unhash_ol_stateid(stp)) + return false; release_open_stateid_locks(stp, reaplist); - return unhashed; + return true; } static void release_open_stateid(struct nfs4_ol_stateid *stp) @@ -5774,21 +5783,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, } static struct nfs4_ol_stateid * -find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) +find_lock_stateid(const struct nfs4_lockowner *lo, + const struct nfs4_ol_stateid *ost) { struct nfs4_ol_stateid *lst; - struct nfs4_client *clp = lo->lo_owner.so_client; - lockdep_assert_held(&clp->cl_lock); + lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); - list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { - if (lst->st_stid.sc_type != NFS4_LOCK_STID) - continue; - if (lst->st_stid.sc_file == fp) { - refcount_inc(&lst->st_stid.sc_count); - return lst; + /* If ost is not hashed, ost->st_locks will not be valid */ + if (!nfs4_ol_stateid_unhashed(ost)) + list_for_each_entry(lst, &ost->st_locks, st_locks) { + if (lst->st_stateowner == &lo->lo_owner) { + refcount_inc(&lst->st_stid.sc_count); + return lst; + } } - } return NULL; } @@ -5804,11 +5813,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); retry: spin_lock(&clp->cl_lock); - spin_lock(&fp->fi_lock); - retstp = find_lock_stateid(lo, fp); + if (nfs4_ol_stateid_unhashed(open_stp)) + goto out_close; + retstp = find_lock_stateid(lo, open_stp); if (retstp) - goto out_unlock; - + goto out_found; refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); @@ -5817,22 +5826,26 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; + spin_lock(&fp->fi_lock); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); -out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&clp->cl_lock); - if (retstp) { - if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { - nfs4_put_stid(&retstp->st_stid); - goto retry; - } - /* To keep mutex tracking happy */ - mutex_unlock(&stp->st_mutex); - stp = retstp; - } return stp; +out_found: + spin_unlock(&clp->cl_lock); + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } + /* To keep mutex tracking happy */ + mutex_unlock(&stp->st_mutex); + return retstp; +out_close: + spin_unlock(&clp->cl_lock); + mutex_unlock(&stp->st_mutex); + return NULL; } static struct nfs4_ol_stateid * @@ -5847,7 +5860,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, *new = false; spin_lock(&clp->cl_lock); - lst = find_lock_stateid(lo, fi); + lst = find_lock_stateid(lo, ost); spin_unlock(&clp->cl_lock); if (lst != NULL) { if (nfsd4_lock_ol_stateid(lst) == nfs_ok) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index c141b06811a6c2bed176be8185f3242235ae1b3c..8149fb6f1f0d20c2a27260ae4a3764a3963b9806 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2867,9 +2867,15 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE, 0, 0); - if (status < 0) + if (status < 0) { mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); + } + return status; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 2319336183005df0f0c086dbf74ea99ba2da9be7..b9f62d29355ba861818dc90f1d2e5152a38adbc1 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -338,8 +338,8 @@ struct ocfs2_super spinlock_t osb_lock; u32 s_next_generation; unsigned long osb_flags; - s16 s_inode_steal_slot; - s16 s_meta_steal_slot; + u16 s_inode_steal_slot; + u16 s_meta_steal_slot; atomic_t s_num_inodes_stolen; atomic_t s_num_meta_stolen; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 15a89c513da2f2d85b9d0fe6480463910f40139c..0230b4ece0f0e99f4598279b9fe71afc1f21db02 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -893,9 +893,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) { spin_lock(&osb->osb_lock); if (type == INODE_ALLOC_SYSTEM_INODE) - osb->s_inode_steal_slot = slot; + osb->s_inode_steal_slot = (u16)slot; else if (type == EXTENT_ALLOC_SYSTEM_INODE) - osb->s_meta_steal_slot = slot; + osb->s_meta_steal_slot = (u16)slot; spin_unlock(&osb->osb_lock); } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 3415e0b09398fb50a813499f14596db104cd414c..2658d91c1f7b621269c5be683566e3ba3833d270 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -92,7 +92,7 @@ struct mount_options unsigned long commit_interval; unsigned long mount_opt; unsigned int atime_quantum; - signed short slot; + unsigned short slot; int localalloc_opt; unsigned int resv_level; int dir_resv_level; @@ -1384,7 +1384,7 @@ static int ocfs2_parse_options(struct super_block *sb, goto bail; } if (option) - mopt->slot = (s16)option; + mopt->slot = (u16)option; break; case Opt_commit: if (match_int(&args[0], &option)) { diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index dcd9c3163587c120b6f9fc57add528f0d3d2f88f..2197bf68f2786534ad01bb8586b6398d69d2e1f7 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -250,6 +250,9 @@ static int pstore_compress(const void *in, void *out, { int ret; + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION)) + return -EINVAL; + ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); if (ret) { pr_err("crypto_comp_compress failed, ret = %d!\n", ret); @@ -647,7 +650,7 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *decompressed; - if (!record->compressed) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 6419e6dacc394dd0e3290a4fefe6af35b06039b2..70387650436cf7085a01bdff6230beca926f452c 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1553,11 +1553,7 @@ void reiserfs_read_locked_inode(struct inode *inode, * set version 1, version 2 could be used too, because stat data * key is the same in both versions */ - key.version = KEY_FORMAT_3_5; - key.on_disk_key.k_dir_id = dirino; - key.on_disk_key.k_objectid = inode->i_ino; - key.on_disk_key.k_offset = 0; - key.on_disk_key.k_type = 0; + _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3); /* look for the object's stat data */ retval = search_item(inode->i_sb, &key, &path_to_sd); diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index ee216925a7090903f77282add2e42e504a40061a..0a397f179fd6ad16495dad44e8e2f9a000f28bec 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -665,6 +665,13 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, if (get_inode_sd_version(inode) == STAT_DATA_V1) return -EOPNOTSUPP; + /* + * priv_root needn't be initialized during mount so allow initial + * lookups to succeed. + */ + if (!REISERFS_SB(inode->i_sb)->priv_root) + return 0; + dentry = xattr_lookup(inode, name, XATTR_REPLACE); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index f86f51f99acebb05df6b2c404b7186e2b6d1194c..1dcadd22b440db5c75ea547dcf2eece7f7f6ef49 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c @@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, size_t limit; limit = romfs_maxsize(sb); - if (pos >= limit) + if (pos >= limit || buflen > limit - pos) return -EIO; - if (buflen > limit - pos) - buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) diff --git a/fs/signalfd.c b/fs/signalfd.c index 4fcd1498acf522d75cced6ea13d22b9d78c9b43b..3c40a3bf772ce217f3a3ff6af9ae397b24327501 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -313,9 +313,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, flags); } @@ -324,9 +325,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, 0); } diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 099bec94b82079f8fbd03f0fb74aee2180d1dab3..fab29f899f91314e3da2b111d924404870d84a03 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc) { - int err = -EINVAL, type, node_len; + int err = -EINVAL, type, node_len, dump_node = 1; uint32_t crc, node_crc, magic; const struct ubifs_ch *ch = buf; @@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, out_len: if (!quiet) ubifs_err(c, "bad node length %d", node_len); + if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) + dump_node = 0; out: if (!quiet) { ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); - ubifs_dump_node(c, buf); + if (dump_node) { + ubifs_dump_node(c, buf); + } else { + int safe_len = min3(node_len, c->leb_size - offs, + (int)UBIFS_MAX_DATA_NODE_SZ); + pr_err("\tprevent out-of-bounds memory access\n"); + pr_err("\ttruncated data node length %d\n", safe_len); + pr_err("\tcorrupted data node:\n"); + print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, + buf, safe_len, 0); + } dump_stack(); } return err; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index a4e07e910f1b4c7d679645c2d3941c8e19f75ee4..6e59e45d7bfbd185949f4600881d4ec4b09ffee6 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -100,7 +100,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct inode *inode; - if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg) + if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) return ERR_PTR(-ESTALE); inode = ufs_iget(sb, ino); diff --git a/fs/xattr.c b/fs/xattr.c index fd7bafb7aceae5bee2fea41efc46b816bd5dc709..e1f041e9b3b01ac081f3c48c07f7f4460768a299 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -203,10 +203,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -215,15 +227,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -386,8 +423,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -396,11 +443,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -409,12 +459,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 2652d00842d6ba8c6479f816765c87dfc622d1cb..efb586ea508bf074631227ac5e122aacfd42e089 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -551,8 +551,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); } xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; - hdr->count = 0; + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; + memset(hdr, 0, sizeof(*hdr)); hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -935,8 +935,10 @@ xfs_attr_shortform_verify( * struct xfs_attr_sf_entry has a variable length. * Check the fixed-offset parts of the structure are * within the data buffer. + * xfs_attr_sf_entry is defined with a 1-byte variable + * array at the end, so we must subtract that off. */ - if (((char *)sfep + sizeof(*sfep)) >= endp) + if (((char *)sfep + sizeof(*sfep) - 1) >= endp) return __this_address; /* Don't allow names with known bad length. */ @@ -1436,7 +1438,9 @@ xfs_attr3_leaf_add_work( for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { if (ichdr->freemap[i].base == tmp) { ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); - ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); + ichdr->freemap[i].size -= + min_t(uint16_t, ichdr->freemap[i].size, + sizeof(xfs_attr_leaf_entry_t)); } } ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 0b7145fdb8aa13566d983aca64b3ae9f919b1b81..f35e1801f1c9087a412e8735e0fc6a1b4010ba8e 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -6130,7 +6130,7 @@ xfs_bmap_validate_extent( isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; - if (isrt) { + if (isrt && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbno(mp, irec->br_startblock)) return __this_address; if (!xfs_verify_rtbno(mp, endfsb)) diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index f1bb3434f51c79d17fbc951b6e108d8c33b6865e..01e99806b941f7816f7edc3dd25195911ede853b 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -214,6 +214,7 @@ __xfs_dir3_free_read( if (fa) { xfs_verifier_error(*bpp, -EFSCORRUPTED, fa); xfs_trans_brelse(tp, *bpp); + *bpp = NULL; return -EFSCORRUPTED; } diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index f99a7aefe4184dc86aa9bb8ee7e631526b57d711..2b3cc5a8ced1b47c16c002ce623d89fcc3aca5cf 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -197,6 +197,24 @@ xfs_calc_inode_chunk_res( return res; } +/* + * Per-extent log reservation for the btree changes involved in freeing or + * allocating a realtime extent. We have to be able to log as many rtbitmap + * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, + * as well as the realtime summary block. + */ +unsigned int +xfs_rtalloc_log_count( + struct xfs_mount *mp, + unsigned int num_ops) +{ + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + unsigned int rtbmp_bytes; + + rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; + return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; +} + /* * Various log reservation values. * @@ -219,13 +237,21 @@ xfs_calc_inode_chunk_res( /* * In a write transaction we can allocate a maximum of 2 - * extents. This gives: + * extents. This gives (t1): * the inode getting the new extents: inode size * the inode's bmap btree: max depth * block size * the agfs of the ags from which the extents are allocated: 2 * sector * the superblock free block counter: sector size * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size - * And the bmap_finish transaction can free bmap blocks in a join: + * Or, if we're writing to a realtime file (t2): + * the inode getting the new extents: inode size + * the inode's bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 1 block + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join (t3): * the agfs of the ags containing the blocks: 2 * sector size * the agfls of the ags containing the blocks: 2 * sector size * the super block free block counter: sector size @@ -235,40 +261,72 @@ STATIC uint xfs_calc_write_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t2 = xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), - XFS_FSB_TO_B(mp, 1)) + + blksz) + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1)))); + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); + } else { + t2 = 0; + } + + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* - * In truncating a file we free up to two extents at once. We can modify: + * In truncating a file we free up to two extents at once. We can modify (t1): * the inode being truncated: inode size * the inode's bmap btree: (max depth + 1) * block size - * And the bmap_finish transaction can free the blocks and bmap blocks: + * And the bmap_finish transaction can free the blocks and bmap blocks (t2): * the agf for each of the ags: 4 * sector size * the agfl for each of the ags: 4 * sector size * the super block to reflect the freed blocks: sector size * worst case split in allocation btrees per extent assuming 4 extents: * 4 exts * 2 trees * (2 * max depth - 1) * block size + * Or, if it's a realtime file (t3): + * the agf for each of the ags: 2 * sector size + * the agfl for each of the ags: 2 * sector size + * the super block to reflect the freed blocks: sector size + * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 2 exts * 1 block + * worst case split in allocation btrees per extent assuming 2 extents: + * 2 exts * 2 trees * (2 * max depth - 1) * block size */ STATIC uint xfs_calc_itruncate_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + - xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), - XFS_FSB_TO_B(mp, 1)))); + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); + + t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + } else { + t3 = 0; + } + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index e1d11f3223e360d72bf7c3c175da8be56c3838e1..f84a58e523bc83a72d0f50cbf34c017cc290a206 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -53,9 +53,27 @@ xchk_setup_inode_bmap( */ if (S_ISREG(VFS_I(sc->ip)->i_mode) && sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { + struct address_space *mapping = VFS_I(sc->ip)->i_mapping; + inode_dio_wait(VFS_I(sc->ip)); - error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); - if (error) + + /* + * Try to flush all incore state to disk before we examine the + * space mappings for the data fork. Leave accumulated errors + * in the mapping for the writer threads to consume. + * + * On ENOSPC or EIO writeback errors, we continue into the + * extent mapping checks because write failures do not + * necessarily imply anything about the correctness of the file + * metadata. The metadata and the file data could be on + * completely separate devices; a media failure might only + * affect a subset of the disk, etc. We can handle delalloc + * extents in the scrubber, so leaving them in memory is fine. + */ + error = filemap_fdatawrite(mapping); + if (!error) + error = filemap_fdatawait_keep_errors(mapping); + if (error && (error != -ENOSPC && error != -EIO)) goto out; } diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index cd3e4d768a18ce2d6466c973dfe891174cc523cf..33dfcba72c7a055f134dbb7010ee4bc584be7613 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -156,6 +156,9 @@ xchk_dir_actor( xname.type = XFS_DIR3_FT_UNKNOWN; error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); + /* ENOENT means the hash lookup failed and the dir is corrupt */ + if (error == -ENOENT) + error = -EFSCORRUPTED; if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, &error)) goto out; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 259549698ba7e607b105360f36ececb7e92fff51..f22acfd53850b4ca094866bec1b0a9f455fc2ec4 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1095,6 +1095,14 @@ __xfs_filemap_fault( return ret; } +static inline bool +xfs_is_write_fault( + struct vm_fault *vmf) +{ + return (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); +} + static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) @@ -1102,7 +1110,7 @@ xfs_filemap_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, IS_DAX(file_inode(vmf->vma->vm_file)) && - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1115,7 +1123,7 @@ xfs_filemap_huge_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, pe_size, - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 901f27ac94abc0c16a6a4065478e0a6a3791b551..56e9043bddc7162d3dc57235dfb0560fdd07322f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1127,7 +1127,7 @@ xfs_reclaim_inode( goto out_ifunlock; xfs_iunpin_wait(ip); } - if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { + if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); goto reclaim; } @@ -1214,6 +1214,7 @@ xfs_reclaim_inode( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); + ASSERT(xfs_inode_clean(ip)); __xfs_inode_free(ip); return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index f2d06e1e49066614d44613d0e46dec786d3077e7..cd81d6d9848d137a48f964e91dff9cb258967009 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1772,10 +1772,31 @@ xfs_inactive_ifree( return error; } + /* + * We do not hold the inode locked across the entire rolling transaction + * here. We only need to hold it for the first transaction that + * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the + * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode + * here breaks the relationship between cluster buffer invalidation and + * stale inode invalidation on cluster buffer item journal commit + * completion, and can result in leaving dirty stale inodes hanging + * around in memory. + * + * We have no need for serialising this inode operation against other + * operations - we freed the inode and hence reallocation is required + * and that will serialise on reallocating the space the deferops need + * to free. Hence we can unlock the inode on the first commit of + * the transaction rather than roll it right through the deferops. This + * avoids relogging the XFS_ISTALE inode. + * + * We check that xfs_ifree() hasn't grown an internal transaction roll + * by asserting that the inode is still locked when it returns. + */ xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, 0); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); error = xfs_ifree(tp, ip); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (error) { /* * If we fail to free the inode, shut down. The cancel @@ -1788,7 +1809,6 @@ xfs_inactive_ifree( xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); } xfs_trans_cancel(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } @@ -1806,7 +1826,6 @@ xfs_inactive_ifree( xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 7bba551cbf90718bb510b46cc08f47b2f0dc7762..8b1b0862e86948a7bfcc00c23e1ed264c19d4554 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -2712,7 +2712,6 @@ xlog_state_do_callback( int funcdidcallbacks; /* flag: function did callbacks */ int repeats; /* for issuing console warnings if * looping too many times */ - int wake = 0; spin_lock(&log->l_icloglock); first_iclog = iclog = log->l_iclog; @@ -2914,11 +2913,9 @@ xlog_state_do_callback( #endif if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) - wake = 1; - spin_unlock(&log->l_icloglock); - - if (wake) wake_up_all(&log->l_flush_wait); + + spin_unlock(&log->l_icloglock); } @@ -4026,7 +4023,9 @@ xfs_log_force_umount( * item committed callback functions will do this again under lock to * avoid races. */ + spin_lock(&log->l_cilp->xc_push_lock); wake_up_all(&log->l_cilp->xc_commit_wait); + spin_unlock(&log->l_cilp->xc_push_lock); xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); #ifdef XFSERRORDEBUG diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 6622652a85a809ed86f7b764c74f26847c7dfcec..0b159a79a17c63b7f3c10f6845e625b88584710d 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1010,6 +1010,7 @@ xfs_reflink_remap_extent( xfs_filblks_t rlen; xfs_filblks_t unmap_len; xfs_off_t newlen; + int64_t qres; int error; unmap_len = irec->br_startoff + irec->br_blockcount - destoff; @@ -1032,13 +1033,19 @@ xfs_reflink_remap_extent( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); - /* If we're not just clearing space, then do we have enough quota? */ - if (real_extent) { - error = xfs_trans_reserve_quota_nblks(tp, ip, - irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); - if (error) - goto out_cancel; - } + /* + * Reserve quota for this operation. We don't know if the first unmap + * in the dest file will cause a bmap btree split, so we always reserve + * at least enough blocks for that split. If the extent being mapped + * in is written, we need to reserve quota for that too. + */ + qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + if (real_extent) + qres += irec->br_blockcount; + error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0, + XFS_QMOPT_RES_REGBLKS); + if (error) + goto out_cancel; trace_xfs_reflink_remap(ip, irec->br_startoff, irec->br_blockcount, irec->br_startblock); diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h index e9f810fc6731746ea0aa1eebf328f352688479dc..43585850f1546a6dc74f144d274584706f53ca03 100644 --- a/fs/xfs/xfs_sysfs.h +++ b/fs/xfs/xfs_sysfs.h @@ -32,9 +32,11 @@ xfs_sysfs_init( struct xfs_kobj *parent_kobj, const char *name) { + struct kobject *parent; + + parent = parent_kobj ? &parent_kobj->kobject : NULL; init_completion(&kobj->complete); - return kobject_init_and_add(&kobj->kobject, ktype, - &parent_kobj->kobject, "%s", name); + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); } static inline void diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index c23257a26c2b81883e7012ff0b526927ec1f77f6..b8f05d5909b59f97e1b11854b90ab41cdf87f375 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -657,7 +657,7 @@ xfs_trans_dqresv( } } if (ninos > 0) { - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; + total_count = dqp->q_res_icount + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index f2ee2b643d533b24e92c0694babb6bdddab92bdb..6fcdf7e449fe75c12c4c00d784977cac1a66217b 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -39,6 +39,7 @@ xfs_trans_ijoin( ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Get a log_item_desc to point at the new item. @@ -90,6 +91,7 @@ xfs_trans_log_inode( ASSERT(ip->i_itemp != NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Don't bother with i_lock for the I_DIRTY_TIME check here, as races diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f4447bcbe8c77adb382073ec0a7cad200af90d50..eff86b97310958e2e8fbd1e26357592f41c6ac95 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -309,6 +309,7 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ + . = ALIGN(8); \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ __end_ro_after_init = .; diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 3f1ef4450a7c2c9487888c2b3a3dda8e4a7d991e..775cd10c04b0d23aeef99b024e7de32516a03b78 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -72,7 +72,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h index 04614d491318802c5b8054c89531a120156cc780..4e41cfd2bdeea41af53d2d949a97fa4c0b4f2248 100644 --- a/include/linux/bluetooth-power.h +++ b/include/linux/bluetooth-power.h @@ -52,6 +52,8 @@ struct bluetooth_power_platform_data { int bt_gpio_sys_rst; /* Bluetooth sw_ctrl gpio */ int bt_gpio_sw_ctrl; + /* Wlan reset gpio */ + int wl_gpio_sys_rst; /* Bluetooth debug gpio */ int bt_gpio_debug; struct device *slim_dev; diff --git a/include/linux/bvec.h b/include/linux/bvec.h index fe7a22dd133b5a3c62833fd2539668d99f20b6f8..bc1f16e9f3f4d4ee5723d0b3a54670430856957e 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -119,11 +119,18 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 7112b8a1faaa7e22f8feb0f9e4246afd5a2f7563..9f511b3e769bc9dbf711f727e34717760877ba74 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -65,6 +65,12 @@ enum { * specified at mount time and thus is implemented here. */ CGRP_CPUSET_CLONE_CHILDREN, + + /* Control group has to be frozen. */ + CGRP_FREEZE, + + /* Cgroup is frozen. */ + CGRP_FROZEN, }; /* cgroup_root->flags */ @@ -317,6 +323,25 @@ struct cgroup_rstat_cpu { struct cgroup *updated_next; /* NULL iff not on the list */ }; +struct cgroup_freezer_state { + /* Should the cgroup and its descendants be frozen. */ + bool freeze; + + /* Should the cgroup actually be frozen? */ + int e_freeze; + + /* Fields below are protected by css_set_lock */ + + /* Number of frozen descendant cgroups */ + int nr_frozen_descendants; + + /* + * Number of tasks, which are counted as frozen: + * frozen, SIGSTOPped, and PTRACEd. + */ + int nr_frozen_tasks; +}; + struct cgroup { /* self css with NULL ->ss, points back to this cgroup */ struct cgroup_subsys_state self; @@ -453,6 +478,13 @@ struct cgroup { /* If there is block congestion on this cgroup. */ atomic_t congestion_count; + /* Used to store internal freezer state */ + struct cgroup_freezer_state freezer; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + /* ids of the ancestors at each level including self */ int ancestor_ids[]; }; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0e1f062f31faa3de3a8ed6b000cef0a7892c6e34..11003a115508520220292156af1cff213b728987 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -581,20 +581,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp, static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, int ancestor_level) { - struct cgroup *ptr; - if (cgrp->level < ancestor_level) return NULL; - - for (ptr = cgrp; - ptr && ptr->level > ancestor_level; - ptr = cgroup_parent(ptr)) - ; - - if (ptr && ptr->level == ancestor_level) - return ptr; - - return NULL; + while (cgrp && cgrp->level > ancestor_level) + cgrp = cgroup_parent(cgrp); + return cgrp; } /** @@ -903,4 +894,47 @@ static inline void put_cgroup_ns(struct cgroup_namespace *ns) free_cgroup_ns(ns); } +#ifdef CONFIG_CGROUPS + +void cgroup_enter_frozen(void); +void cgroup_leave_frozen(bool always_leave); +void cgroup_update_frozen(struct cgroup *cgrp); +void cgroup_freeze(struct cgroup *cgrp, bool freeze); +void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, + struct cgroup *dst); +void cgroup_freezer_frozen_exit(struct task_struct *task); +static inline bool cgroup_task_freeze(struct task_struct *task) +{ + bool ret; + + if (task->flags & PF_KTHREAD) + return false; + + rcu_read_lock(); + ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); + rcu_read_unlock(); + + return ret; +} + +static inline bool cgroup_task_frozen(struct task_struct *task) +{ + return task->frozen; +} + +#else /* !CONFIG_CGROUPS */ + +static inline void cgroup_enter_frozen(void) { } +static inline void cgroup_leave_frozen(bool always_leave) { } +static inline bool cgroup_task_freeze(struct task_struct *task) +{ + return false; +} +static inline bool cgroup_task_frozen(struct task_struct *task) +{ + return false; +} + +#endif /* !CONFIG_CGROUPS */ + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 767a51c8a37bc0e9603e5425ea6005cac5c8e74e..2d9e155fcc40b2ad090f8b36ecbce43a710306cf 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -903,6 +903,11 @@ struct device_link { struct rcu_head rcu_head; #endif bool supplier_preactivated; /* Owned by consumer probe. */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /** @@ -937,6 +942,11 @@ struct dev_links_info { struct list_head defer_hook; bool need_for_probe; enum dl_dev_state status; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /** diff --git a/include/linux/efi.h b/include/linux/efi.h index 6797811bf1e6c7e9f706a7d7362b22cdf94f4dd7..9a5d4b499271662b99041fcaf965e86b771bf5f1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -996,7 +996,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern void efi_free_boot_services(void); extern efi_status_t efi_query_variable_store(u32 attributes, diff --git a/include/linux/font.h b/include/linux/font.h index d6821769dd1e1433901314eea114d5d58189b59b..f85e70bd4793e00dc64fc4a4f714f212b8c1bc52 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -57,4 +57,17 @@ extern const struct font_desc *get_default_font(int xres, int yres, /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 +/* Extra word getters */ +#define REFCOUNT(fd) (((int *)(fd))[-1]) +#define FNTSIZE(fd) (((int *)(fd))[-2]) +#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +#define FNTSUM(fd) (((int *)(fd))[-4]) + +#define FONT_EXTRA_WORDS 4 + +struct font_data { + unsigned int extra[FONT_EXTRA_WORDS]; + const unsigned char data[]; +} __packed; + #endif /* _VIDEO_FONT_H */ diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index a497b9d97f5a94344e6e3bc0888c9566f9c099eb..becec51ec9e5e2b34bd05eb6fdb04a7faf1ccdb0 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -13,6 +13,7 @@ #define _LINUX_FWNODE_H_ #include +#include struct fwnode_operations; struct device; @@ -21,6 +22,11 @@ struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; struct device *dev; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /** diff --git a/include/linux/hid.h b/include/linux/hid.h index 8506637f070d17f730d847c7f492a0365703c325..a46b6832b3733d26026bb09af041348da8367a87 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -956,34 +956,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -997,7 +1012,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index c43e694fef7dde9d8eeb69f5f1d07a506bd29adf..35461d49d3aee749d24e230c263836e65608ea6e 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -428,6 +428,8 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, + CHANNELMSG_22 = 22, + CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index d03071732db4add88eadef01988c6585b23b5955..7c522fdd9ea735280a2cd04ada1507e766967ff6 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -53,6 +53,20 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -64,6 +78,7 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; + struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index b1b4411b4c6b8d8f867a399f40a66552ca5b76cd..539f4a84412f4196fc298d36c8066532a774fe3a 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -308,8 +308,8 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ diff --git a/include/linux/ipa.h b/include/linux/ipa.h index 9ad4cb0550b388cb687600629914586e5ea1e107..c102d2b23b7fbd47b7d74a914acb17a95636bc59 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -11,6 +11,7 @@ #include #include #include "linux/msm_gsi.h" +#include #define IPA_APPS_MAX_BW_IN_MBPS 700 #define IPA_BW_THRESHOLD_MAX 3 @@ -97,6 +98,8 @@ enum ipa_aggr_mode { enum ipa_dp_evt_type { IPA_RECEIVE, IPA_WRITE_DONE, + IPA_CLIENT_START_POLL, + IPA_CLIENT_COMP_NAPI, }; /** @@ -572,6 +575,60 @@ struct ipa_inform_wlan_bw { typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt, void *data); +/** + * struct ipa_connect_params - low-level client connect input parameters. Either + * client allocates the data and desc FIFO and specifies that in data+desc OR + * specifies sizes and pipe_mem pref and IPA does the allocation. + * + * @ipa_ep_cfg: IPA EP configuration + * @client: type of "client" + * @client_bam_hdl: client SPS handle + * @client_ep_idx: client PER EP index + * @priv: callback cookie + * @notify: callback + * priv - callback cookie evt - type of event data - data relevant + * to event. May not be valid. See event_type enum for valid + * cases. + * @desc_fifo_sz: size of desc FIFO + * @data_fifo_sz: size of data FIFO + * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback + * to sys mem if pipe mem alloc fails + * @desc: desc FIFO meta-data when client has allocated it + * @data: data FIFO meta-data when client has allocated it + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + */ +struct ipa_connect_params { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + unsigned long client_bam_hdl; + u32 client_ep_idx; + void *priv; + ipa_notify_cb notify; + u32 desc_fifo_sz; + u32 data_fifo_sz; + bool pipe_mem_preferred; + struct sps_mem_buffer desc; + struct sps_mem_buffer data; + bool skip_ep_cfg; + bool keep_ipa_awake; +}; + +/** + * struct ipa_sps_params - SPS related output parameters resulting from + * low/high level client connect + * @ipa_bam_hdl: IPA SPS handle + * @ipa_ep_idx: IPA PER EP index + * @desc: desc FIFO meta-data + * @data: data FIFO meta-data + */ +struct ipa_sps_params { + unsigned long ipa_bam_hdl; + u32 ipa_ep_idx; + struct sps_mem_buffer desc; + struct sps_mem_buffer data; +}; /** * struct ipa_tx_intf - interface tx properties @@ -637,6 +694,7 @@ struct ipa_sys_connect_params { bool skip_ep_cfg; bool keep_ipa_awake; struct napi_struct *napi_obj; + bool napi_enabled; bool recycle_enabled; }; @@ -848,12 +906,15 @@ struct ipa_rx_page_data { */ enum ipa_irq_type { IPA_BAD_SNOC_ACCESS_IRQ, + IPA_EOT_COAL_IRQ, IPA_UC_IRQ_0, IPA_UC_IRQ_1, IPA_UC_IRQ_2, IPA_UC_IRQ_3, IPA_UC_IN_Q_NOT_EMPTY_IRQ, IPA_UC_RX_CMD_Q_NOT_FULL_IRQ, + IPA_UC_TX_CMD_Q_NOT_FULL_IRQ, + IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ, IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ, IPA_RX_ERR_IRQ, IPA_DEAGGR_ERR_IRQ, @@ -862,6 +923,8 @@ enum ipa_irq_type { IPA_PROC_ERR_IRQ, IPA_TX_SUSPEND_IRQ, IPA_TX_HOLB_DROP_IRQ, + IPA_BAM_IDLE_IRQ, + IPA_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ, IPA_BAM_GSI_IDLE_IRQ, IPA_PIPE_YELLOW_MARKER_BELOW_IRQ, IPA_PIPE_RED_MARKER_BELOW_IRQ, @@ -1306,6 +1369,13 @@ struct ipa_smmu_out_params { #if defined CONFIG_IPA || defined CONFIG_IPA3 +/* + * Connect / Disconnect + */ +int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps, + u32 *clnt_hdl); +int ipa_disconnect(u32 clnt_hdl); + /* * Resume / Suspend */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 2e9da0eda9d1110675fc283357a3796a349de754..62aa2877d7f35dbd118805a979476b98f6f7410c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -210,6 +210,8 @@ struct irq_data { * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -233,6 +235,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -407,6 +410,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d) return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 082d1d2a5216977262d3a1817b30a9131efe873a..dc9a2eecc8b800cd6888f5e4594df4925c9c688f 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,6 +15,7 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +extern void khugepaged_min_free_kbytes_update(void); #define khugepaged_enabled() \ (transparent_hugepage_flags & \ @@ -73,6 +74,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, { return 0; } + +static inline void khugepaged_min_free_kbytes_update(void) +{ +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index ed1453c15041dc838af4093f0bfc886dbfc5f7b8..3d076aca7ac2a86e1773801b921ef62956a5e45b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -439,6 +439,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -502,6 +503,7 @@ enum hsm_task_states { }; enum ata_completion_errors { + AC_ERR_OK = 0, /* no error */ AC_ERR_DEV = (1 << 0), /* device reported error */ AC_ERR_HSM = (1 << 1), /* host state machine violation */ AC_ERR_TIMEOUT = (1 << 2), /* timeout */ @@ -911,9 +913,9 @@ struct ata_port_operations { /* * Command execution */ - int (*qc_defer)(struct ata_queued_cmd *qc); - int (*check_atapi_dma)(struct ata_queued_cmd *qc); - void (*qc_prep)(struct ata_queued_cmd *qc); + int (*qc_defer)(struct ata_queued_cmd *qc); + int (*check_atapi_dma)(struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); unsigned int (*qc_issue)(struct ata_queued_cmd *qc); bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); @@ -1180,7 +1182,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); extern const char *ata_mode_string(unsigned long xfer_mask); extern unsigned long ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); -extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); @@ -1915,9 +1917,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY -extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); -extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); diff --git a/include/linux/log2.h b/include/linux/log2.h index 01d6ba6d8647f456a420bc7b6b897baf61c6c3ca..60d424c92d3a670da56653188e3a49a196b485e7 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -177,7 +177,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ diff --git a/include/linux/mm.h b/include/linux/mm.h index c2d62d2834cfbda92b8f9505fcdfc76a35ec21f5..803c7e7589e2f046f09de9f8de2612068220effa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2347,7 +2347,7 @@ static inline void zero_resv_unavail(void) {} extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum memmap_context, struct vmem_altmap *); + enum meminit_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern void update_kswapd_threads(void); extern int __meminit init_per_zone_wmark_min(void); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index b3a1b292ba34a0fba0dd00a8ef970f044497853c..5a73f0c6e41da4476e1327dad2b368920c395dbb 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -243,7 +243,7 @@ struct mmc_queue_req; * MMC Physical partitions */ struct mmc_part { - unsigned int size; /* partition size (in bytes) */ + u64 size; /* partition size (in bytes) */ unsigned int part_cfg; /* partition type */ char name[MAX_MMC_PART_NAME_LEN]; bool force_ro; /* to make boot parts RO by default */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b84635a886bd51772f3b5df491022cfe52ea2e6c..d24748b61e8e85eefe919030492e084e3df4fc26 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -793,10 +793,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx); -enum memmap_context { - MEMMAP_EARLY, - MEMMAP_HOTPLUG, +/* + * Memory initialization context, use to differentiate memory added by + * the platform statically or via memory hotplug interface. + */ +enum meminit_context { + MEMINIT_EARLY, + MEMINIT_HOTPLUG, }; + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa822d3a484f6f42f840ad2c7d5dc760..625f491b95de87904892dc52e6e5bf5bba8a57fa 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index cf09ab37b45b7d5990c388799b7f502d500cd182..e713476ff29db29df6cc79356d0b07f4a22014e8 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index ad69430fd0eb5a9123727e2054682971de3feca3..5162fc1533c2f20229d53659500dfefce8305466 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* diff --git a/include/linux/node.h b/include/linux/node.h index 708939bae9aa89d665a236e6aeb1974ec3c104cb..a79ec4492650c729a8e78ef35bb7c24e3ea33fd5 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -32,11 +32,13 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -extern int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn); +int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else static inline int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn) + unsigned long end_pfn, + enum meminit_context context) { return 0; } @@ -61,7 +63,8 @@ static inline int register_one_node(int nid) if (error) return error; /* link memory sections under this node */ - error = link_mem_sections(nid, start_pfn, end_pfn); + error = link_mem_sections(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; diff --git a/include/linux/pci.h b/include/linux/pci.h index 01dff69a3a215888b6169aff9d2820052fd91413..cf61a019bb7611da05f8b5e1888cbc738fcf21af 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1158,7 +1158,6 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 2122468ad86a4d2d9e5903eecea583273bb6c92d..e8727a300478cf195639344a19b0072af1c5b588 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -362,6 +362,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CP_ILIM, POWER_SUPPLY_PROP_IRQ_STATUS, POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, + POWER_SUPPLY_PROP_CC_TOGGLE_ENABLE, POWER_SUPPLY_PROP_FG_TYPE, POWER_SUPPLY_PROP_CHARGER_STATUS, /* Local extensions of type int64_t */ diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 0000000000000000000000000000000000000000..aa16e6468f91e79e1f31e2f49c0ca96950600188 --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include +#include + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/random.h b/include/linux/random.h index 012cfeeaa1527d28b8c0fa1ca02453c258dfec5f..04e4ee93cef00a5af725d58fa5ca72fdde762f36 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -108,61 +108,12 @@ declare_get_random_var_wait(long) unsigned long randomize_page(unsigned long start, unsigned long range); -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); -} - /* - * Handle minimum values for seeds + * This is designed to be standalone for just prandom + * users, but for now we include it from + * for legacy reasons. */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} - -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. - */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) -{ - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); -} +#include #ifdef CONFIG_ARCH_RANDOM # include @@ -185,10 +136,4 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v) } #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 45359686e27b281a66b44285cc40be372baf6c66..47d35cbf21baa3ad696b8255c0456c1f002d850f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -885,6 +885,9 @@ struct task_struct { #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; +#endif +#ifdef CONFIG_SCHED_TUNE + int stune_idx; #endif struct sched_dl_entity dl; @@ -1475,6 +1478,8 @@ struct task_struct { /* Used by LSM modules for access restriction: */ void *security; #endif + /* task is frozen/stopped (used by the cgroup freezer) */ + ANDROID_KABI_USE(1, unsigned frozen:1); /* * New fields for task_struct should be added above here, so that diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h index 98228bd48aeea29eb1d456586b58b17ee2aef60a..fa067de9f1a94843f7402f2fd258d8b6339b59f0 100644 --- a/include/linux/sched/jobctl.h +++ b/include/linux/sched/jobctl.h @@ -18,6 +18,7 @@ struct task_struct; #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ +#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) @@ -26,6 +27,7 @@ struct task_struct; #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) +#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index bcf4cf26b8c8950e248ad834402df1d48490f34d..a42a29952889c008876fce06dc40a431dcebc905 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that, writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5fb778155f61a22471225eeab12d3a02c703fd2f..667eb126fadec8ef98538625b86d6733f803559c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1688,6 +1688,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/** + * skb_queue_len_lockless - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + * This variant can be used in lockless contexts. + */ +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return READ_ONCE(list_->qlen); +} + /** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize @@ -1895,7 +1907,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; - list->qlen--; + WRITE_ONCE(list->qlen, list->qlen - 1); next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; @@ -3014,8 +3026,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error if @free_on_error is true. */ -static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, - bool free_on_error) +static inline int __must_check __skb_put_padto(struct sk_buff *skb, + unsigned int len, + bool free_on_error) { unsigned int size = skb->len; @@ -3038,7 +3051,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) { return __skb_put_padto(skb, len, true); } diff --git a/include/linux/spi/qcom-spi.h b/include/linux/spi/qcom-spi.h index 1888fe509ee30333f99908632d84685a0fd034b6..3d8a8f6896bf3bf626c098ab213d25df073bc001 100644 --- a/include/linux/spi/qcom-spi.h +++ b/include/linux/spi/qcom-spi.h @@ -35,7 +35,10 @@ * @bam_producer_pipe_index BAM producer pipe * @rt_priority true if RT thread * @use_pinctrl true if pinctrl library is used - * @is_shared true when qup is shared between ee's + * @is_shared true when qup is shared between ee's and client driver is not + in control of spi pm_runtime_get_sync/put_sync. + * @shared_ee true when qup is shared between ee's and client driver is in + control of spi pm_runtime_get_sync/put_sync. */ struct msm_spi_platform_data { u32 max_clock_speed; @@ -54,4 +57,5 @@ struct msm_spi_platform_data { bool rt_priority; bool use_pinctrl; bool is_shared; + bool shared_ee; }; diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 7acb953298a73118028ae04052fab9249529380f..84ff2844df2a8bc240b7186b7258fbef20c62940 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,6 +57,7 @@ struct tk_read_base { * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. @@ -84,6 +85,9 @@ struct tk_read_base { * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. + * + * @monotonic_to_boottime is a timespec64 representation of @offs_boot to + * accelerate the VDSO update for CLOCK_BOOTTIME. */ struct timekeeper { struct tk_read_base tkr_mono; @@ -99,6 +103,7 @@ struct timekeeper { u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; + struct timespec64 monotonic_to_boot; /* The following members are for timekeeping internal use */ u64 cycle_interval; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index e9de8ad0bad748e9d74e0af0d851568f6378be5c..444aa73037f1906c90a211454b9bcd9b79812349 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -364,7 +364,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) #else /* * tracepoint_string() is used to save the string address for userspace diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 249ceefd14ccd3177a6f8d8f3d986083fa60d34a..1a5b23af20b39b1008f5cd8203ed38b414c3b60e 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -242,6 +242,17 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to, extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); +/* + * probe_user_read(): safely attempt to read from a location in user space + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_user_read(void *dst, const void __user *src, size_t size); + /* * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to @@ -254,7 +265,22 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size); extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +/* + * probe_user_write(): safely attempt to write to a location in user space + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count); +extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index e223e2632edddda608c58210652b3dfe81fa2d98..8b8d13f01caee0a8815f11abd5988ec7886b8a67 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -149,7 +149,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk, void virtio_transport_destruct(struct vsock_sock *vsk); -void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt); +void virtio_transport_recv_pkt(struct virtio_transport *t, + struct virtio_vsock_pkt *pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 30f25e1ac57161986b8926673ac04b9562665784..e704feb938e769ce1a46db68391d83eebe8d7702 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -54,8 +54,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 710900d72562bd59dea902416a52cc1d95b0ba2b..ae614b57420f058b7240a05231aeba7e2a26032e 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -317,6 +317,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cc2d0c3b475b5d192a5b3e71b1ce6426514a4b63..3195728095752ab214772b2be7c89d31a7db50da 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1287,16 +1287,34 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) conn->security_cfm_cb(conn, status); } -static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; + __u8 encrypt; + + if (conn->state == BT_CONFIG) { + if (!status) + conn->state = BT_CONNECTED; - if (conn->sec_level == BT_SECURITY_SDP) - conn->sec_level = BT_SECURITY_LOW; + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + return; + } - if (conn->pending_sec_level > conn->sec_level) - conn->sec_level = conn->pending_sec_level; + if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + encrypt = 0x00; + else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) + encrypt = 0x02; + else + encrypt = 0x01; + + if (!status) { + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; + + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 0697fd41308777a4801dcf8728f2ff5aa0210804..21dbd38f724d4d2fa36bf5ad29ad4d39038dc531 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -619,6 +619,8 @@ struct l2cap_ops { struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); + int (*filter) (struct l2cap_chan * chan, + struct sk_buff *skb); }; struct l2cap_conn { diff --git a/include/net/cnss2.h b/include/net/cnss2.h index d928451f33c654feca1463a08d9c57c0b5fca7f0..52202fdafdd034984f37fec1b14cdfd7ffd42713 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -240,5 +240,5 @@ extern int cnss_athdiag_write(struct device *dev, uint32_t offset, uint32_t mem_type, uint32_t data_len, uint8_t *input); extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode); - +extern int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed); #endif /* _NET_CNSS2_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 371b3b45fd5c9696c319b4dc433d0522fbc5ce42..fc9d6e37552d32f633181fbb22801edcfb3710dd 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -139,8 +139,8 @@ struct inet_connection_sock { } icsk_mtup; u32 icsk_user_timeout; - u64 icsk_ca_priv[88 / sizeof(u64)]; -#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64)) + u64 icsk_ca_priv[104 / sizeof(u64)]; +#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64)) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ @@ -313,5 +313,9 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); +/* update the fast reuse flag when adding a socket */ +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk); + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/ip.h b/include/net/ip.h index d584d025f229495ba4eff995c4a968d844b048cd..00f3e910e8f899a05572493f0839bbb20fec9213 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -402,12 +402,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { struct net *net = dev_net(dst->dev); + unsigned int mtu; if (net->ipv4.sysctl_ip_fwd_use_pmtu || ip_mtu_locked(dst) || !forwarding) return dst_mtu(dst); + /* 'forwarding = true' case should always honour route mtu */ + mtu = dst_metric_raw(dst, RTAX_MTU); + if (mtu) + return mtu; + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); } diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index af0ede9ad4d0bf5ab74c217af2b5a782db96a3e3..c31e54a41b5c4ca52662f095986f985b9ee1a5a0 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1614,18 +1614,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ -/* Really using conntrack? */ -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, - struct sk_buff *skb) +/* Using old conntrack that can not be redirected to another real server? */ +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; - if (!(cp->flags & IP_VS_CONN_F_NFCT)) - return false; ct = nf_ct_get(skb, &ctinfo); - if (ct) + if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 024636c31adcfecfa3ec61effca202b7a4eaaa0a..93253ba1eeac3df071a130de2f75fa28d48db7ba 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -130,6 +130,8 @@ static inline u8 nft_reg_load8(u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } diff --git a/include/net/sock.h b/include/net/sock.h index c2f25f1bf7a948de673ff2a12cf8cde0fa7a80ec..688256d3478873959391c3cbcfabebf465b47721 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -855,6 +855,8 @@ static inline int sk_memalloc_socks(void) { return static_branch_unlikely(&memalloc_socks_key); } + +void __receive_sock(struct file *file); #else static inline int sk_memalloc_socks(void) @@ -862,6 +864,8 @@ static inline int sk_memalloc_socks(void) return 0; } +static inline void __receive_sock(struct file *file) +{ } #endif static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) @@ -906,11 +910,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) skb_dst_force(skb); if (!sk->sk_backlog.tail) - sk->sk_backlog.head = skb; + WRITE_ONCE(sk->sk_backlog.head, skb); else sk->sk_backlog.tail->next = skb; - sk->sk_backlog.tail = skb; + WRITE_ONCE(sk->sk_backlog.tail, skb); skb->next = NULL; } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f087c8d125b8f1e3396247c9db4fc687824cbb5e..fe8bed557691a81de11c4604d0cd615deb733675 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1016,7 +1016,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -1028,7 +1028,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } @@ -1873,21 +1873,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es static inline int xfrm_replay_clone(struct xfrm_state *x, struct xfrm_state *orig) { - x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), + + x->replay_esn = kmemdup(orig->replay_esn, + xfrm_replay_state_esn_len(orig->replay_esn), GFP_KERNEL); if (!x->replay_esn) return -ENOMEM; - - x->replay_esn->bmp_len = orig->replay_esn->bmp_len; - x->replay_esn->replay_window = orig->replay_esn->replay_window; - - x->preplay_esn = kmemdup(x->replay_esn, - xfrm_replay_state_esn_len(x->replay_esn), + x->preplay_esn = kmemdup(orig->preplay_esn, + xfrm_replay_state_esn_len(orig->preplay_esn), GFP_KERNEL); - if (!x->preplay_esn) { - kfree(x->replay_esn); + if (!x->preplay_esn) return -ENOMEM; - } return 0; } diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h index 9b1d43d671a3f6b157db24ba3c80b14893f318d0..8c18dc6d3fde5e44f2aba8f87c67096d5dd092d2 100644 --- a/include/soc/nps/common.h +++ b/include/soc/nps/common.h @@ -45,6 +45,12 @@ #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 +#ifndef AUX_IENABLE +#define AUX_IENABLE 0x40c +#endif + +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) + #ifndef __ASSEMBLY__ /* In order to increase compilation test coverage */ diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h index 7475c7be165aa8e44601b0f659f2941360c3c037..d4aac343659550b3afec7ec8c329c5a9bc259c5a 100644 --- a/include/trace/events/sctp.h +++ b/include/trace/events/sctp.h @@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe, __entry->pathmtu = asoc->pathmtu; __entry->rwnd = asoc->peer.rwnd; __entry->unack_data = asoc->unack_data; - - if (trace_sctp_probe_path_enabled()) { - struct sctp_transport *sp; - - list_for_each_entry(sp, &asoc->peer.transport_addr_list, - transports) { - trace_sctp_probe_path(sp, asoc); - } - } ), TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h index 457479af99166974d5f777b0b0324f933364f642..44ccfd8818f293a9705894e31b468af9600d6c0f 100644 --- a/include/uapi/asm-generic/ioctls.h +++ b/include/uapi/asm-generic/ioctls.h @@ -79,6 +79,7 @@ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ +#define TIOCFAULT 0x544C /* Uart fault */ #define TIOCPMGET 0x544D /* PM get */ #define TIOCPMPUT 0x544E /* PM put */ #define TIOCPMACT 0x544F /* PM is active */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 66ce6659ecb699913df13b253c331ec6dafcf507..c297abc4e669771f450d4c1ee69150a91dd6e9e0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -746,9 +746,10 @@ struct kvm_ppc_resize_hpt { #define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_PR 2 -/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */ -#define KVM_VM_MIPS_TE 0 +/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */ +#define KVM_VM_MIPS_AUTO 0 #define KVM_VM_MIPS_VZ 1 +#define KVM_VM_MIPS_TE 2 #define KVM_S390_SIE_PAGE_OFFSET 1 diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 080a8df134ef3b3cd96018be8a9048947ec09697..24a1c45bd1ae2a41b945cc77e7474ba2d6617ee5 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -25,6 +25,16 @@ enum { LO_FLAGS_DIRECT_IO = 16, }; +/* LO_FLAGS that can be set using LOOP_SET_STATUS(64) */ +#define LOOP_SET_STATUS_SETTABLE_FLAGS (LO_FLAGS_AUTOCLEAR | LO_FLAGS_PARTSCAN) + +/* LO_FLAGS that can be cleared using LOOP_SET_STATUS(64) */ +#define LOOP_SET_STATUS_CLEARABLE_FLAGS (LO_FLAGS_AUTOCLEAR) + +/* LO_FLAGS that can be set using LOOP_CONFIGURE */ +#define LOOP_CONFIGURE_SETTABLE_FLAGS (LO_FLAGS_READ_ONLY | LO_FLAGS_AUTOCLEAR \ + | LO_FLAGS_PARTSCAN | LO_FLAGS_DIRECT_IO) + #include /* for __kernel_old_dev_t */ #include /* for __u64 */ @@ -37,7 +47,7 @@ struct loop_info { int lo_offset; int lo_encrypt_type; int lo_encrypt_key_size; /* ioctl w/o */ - int lo_flags; /* ioctl r/o */ + int lo_flags; char lo_name[LO_NAME_SIZE]; unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ unsigned long lo_init[2]; @@ -53,13 +63,29 @@ struct loop_info64 { __u32 lo_number; /* ioctl r/o */ __u32 lo_encrypt_type; __u32 lo_encrypt_key_size; /* ioctl w/o */ - __u32 lo_flags; /* ioctl r/o */ + __u32 lo_flags; __u8 lo_file_name[LO_NAME_SIZE]; __u8 lo_crypt_name[LO_NAME_SIZE]; __u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ __u64 lo_init[2]; }; +/** + * struct loop_config - Complete configuration for a loop device. + * @fd: fd of the file to be used as a backing file for the loop device. + * @block_size: block size to use; ignored if 0. + * @info: struct loop_info64 to configure the loop device with. + * + * This structure is used with the LOOP_CONFIGURE ioctl, and can be used to + * atomically setup and configure all loop device parameters at once. + */ +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + /* * Loop filter types */ @@ -90,6 +116,7 @@ struct loop_info64 { #define LOOP_SET_CAPACITY 0x4C07 #define LOOP_SET_DIRECT_IO 0x4C08 #define LOOP_SET_BLOCK_SIZE 0x4C09 +#define LOOP_CONFIGURE 0x4C0A /* /dev/loop-control interface */ #define LOOP_CTL_ADD 0x4C80 diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 5eac62e1b68d57570e2b920d61c7c260a774367c..cc00be102b9fb82b94e9360dd4a8af0526de06c3 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -132,7 +132,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index e98d90b5e6a418889b652025b272172c3e3d8c60..e8bca08b2697e128bbf6ae4b8482a5ef6ce6c04c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4499,6 +4499,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) + * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4506,6 +4507,7 @@ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, + NL80211_BAND_6GHZ, NUM_NL80211_BANDS, }; diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 86eca3208b6beba89fd8462c576c09aa937baf8e..a2c006a364e0bf51e4c0cec8a601e4971d02faaa 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -74,6 +74,8 @@ #include /* for "struct sockaddr" et al */ #include /* for IFNAMSIZ and co... */ +#include /* for offsetof */ + /***************************** VERSION *****************************/ /* * This constant is used to know the availability of the wireless @@ -1090,8 +1092,7 @@ struct iw_event { /* iw_point events are special. First, the payload (extra data) come at * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, * we omit the pointer, so start at an offset. */ -#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ - (char *) NULL) +#define IW_EV_POINT_OFF offsetof(struct iw_point, length) #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ IW_EV_POINT_OFF) diff --git a/init/init_task.c b/init/init_task.c index 53e6e27ea8b51a687ac11c2446b2d8a1a7031982..3f6ec9b2bf0f01f6b3d9e605fc05fb6506916ad0 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -93,6 +93,9 @@ struct task_struct init_task #endif #ifdef CONFIG_CGROUP_SCHED .sched_task_group = &root_task_group, +#endif +#ifdef CONFIG_SCHED_TUNE + .stune_idx = 0, #endif .ptraced = LIST_HEAD_INIT(init_task.ptraced), .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry), diff --git a/init/main.c b/init/main.c index 67f6ead0f37148c630ac95f15d516c7dd8427fbe..0e7b6a871aa54ab2945f29703b32f3896998f322 100644 --- a/init/main.c +++ b/init/main.c @@ -512,14 +512,16 @@ static void __init report_meminit(void) { const char *stack; - if (IS_ENABLED(CONFIG_INIT_STACK_ALL)) - stack = "all"; + if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) + stack = "all(pattern)"; + else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) + stack = "all(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) - stack = "byref_all"; + stack = "byref_all(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) - stack = "byref"; + stack = "byref(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) - stack = "__user"; + stack = "__user(zero)"; else stack = "off"; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4f7262eba73d863be764f15e73680ea1e780eac4..50952d6d81209f31ebd0be66bd0eb92c9a28d45f 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -317,8 +317,6 @@ static void audit_update_watch(struct audit_parent *parent, if (oentry->rule.exe) audit_remove_mark(oentry->rule.exe); - audit_watch_log_rule_change(r, owatch, "updated_rules"); - call_rcu(&oentry->rcu, audit_free_rule_rcu); } diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 6fe72792312d856673f6f4f658c730bbb0d0d442..3f3ed33bd2fdc372e95d2de73b2d7d2d9b3085e8 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -667,26 +667,23 @@ static void htab_elem_free_rcu(struct rcu_head *head) struct htab_elem *l = container_of(head, struct htab_elem, rcu); struct bpf_htab *htab = l->htab; - /* must increment bpf_prog_active to avoid kprobe+bpf triggering while - * we're calling kfree, otherwise deadlock is possible if kprobes - * are placed somewhere inside of slub - */ - preempt_disable(); - __this_cpu_inc(bpf_prog_active); htab_elem_free(htab, l); - __this_cpu_dec(bpf_prog_active); - preempt_enable(); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -747,6 +744,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index c04815bb15cc12230f0124de428fb88e5d6f6b4e..11fade89c1f382897ef86619afe8413244a73d13 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -207,10 +207,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) else prev_key = key; + rcu_read_lock(); if (map->ops->map_get_next_key(map, prev_key, key)) { map_iter(m)->done = true; - return NULL; + key = NULL; } + rcu_read_unlock(); return key; } diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile index bfcdae8961227acab958192a7164d98c72a48ea7..5d7a76bfbbb769c41ff5ee072584f92f847fdaaf 100644 --- a/kernel/cgroup/Makefile +++ b/kernel/cgroup/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y := cgroup.o rstat.o namespace.o cgroup-v1.o +obj-y := cgroup.o rstat.o namespace.o cgroup-v1.o freezer.o -obj-$(CONFIG_CGROUP_FREEZER) += freezer.o +obj-$(CONFIG_CGROUP_FREEZER) += legacy_freezer.o obj-$(CONFIG_CGROUP_PIDS) += pids.o obj-$(CONFIG_CGROUP_RDMA) += rdma.o obj-$(CONFIG_CPUSETS) += cpuset.o diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 75568fcf2180dd577f5e8e4903e30516d3c987db..92717bb9005914f3c85c970c494372d98ed3504f 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -196,7 +196,7 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, void cgroup_free_root(struct cgroup_root *root); void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts); -int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags); +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask); int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, struct cgroup_root *root, unsigned long magic, @@ -224,6 +224,7 @@ int cgroup_rmdir(struct kernfs_node *kn); int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, struct kernfs_root *kf_root); +int __cgroup_task_count(const struct cgroup *cgrp); int cgroup_task_count(const struct cgroup *cgrp); /* diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 405167715d5256d808cb1e97f753d3fbd373b598..262c96134c5ce4aff6f1b2e34b55736036853f15 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -27,6 +27,9 @@ /* Controllers blocked by the commandline in v1 */ static u16 cgroup_no_v1_mask; +/* disable named v1 mounts */ +static bool cgroup_no_v1_named; + /* * pidlist destructions need to be flushed on cgroup destruction. Use a * separate workqueue as flush domain. @@ -336,22 +339,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, return l; } -/** - * cgroup_task_count - count the number of tasks in a cgroup. - * @cgrp: the cgroup in question - */ -int cgroup_task_count(const struct cgroup *cgrp) -{ - int count = 0; - struct cgrp_cset_link *link; - - spin_lock_irq(&css_set_lock); - list_for_each_entry(link, &cgrp->cset_links, cset_link) - count += link->cset->nr_tasks; - spin_unlock_irq(&css_set_lock); - return count; -} - /* * Load a cgroup's pidarray with either procs' tgids or tasks' pids */ @@ -965,6 +952,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) } if (!strncmp(token, "name=", 5)) { const char *name = token + 5; + + /* blocked by boot param? */ + if (cgroup_no_v1_named) + return -ENOENT; /* Can't specify an empty name */ if (!strlen(name)) return -EINVAL; @@ -1111,13 +1102,11 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, void *data, unsigned long magic, struct cgroup_namespace *ns) { - struct super_block *pinned_sb = NULL; struct cgroup_sb_opts opts; struct cgroup_root *root; struct cgroup_subsys *ss; struct dentry *dentry; int i, ret; - bool new_root = false; cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); @@ -1179,29 +1168,6 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, if (root->flags ^ opts.flags) pr_warn("new mount options do not match the existing superblock, will be ignored\n"); - /* - * We want to reuse @root whose lifetime is governed by its - * ->cgrp. Let's check whether @root is alive and keep it - * that way. As cgroup_kill_sb() can happen anytime, we - * want to block it by pinning the sb so that @root doesn't - * get killed before mount is complete. - * - * With the sb pinned, tryget_live can reliably indicate - * whether @root can be reused. If it's being killed, - * drain it. We can use wait_queue for the wait but this - * path is super cold. Let's just sleep a bit and retry. - */ - pinned_sb = kernfs_pin_sb(root->kf_root, NULL); - if (IS_ERR(pinned_sb) || - !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { - mutex_unlock(&cgroup_mutex); - if (!IS_ERR_OR_NULL(pinned_sb)) - deactivate_super(pinned_sb); - msleep(10); - ret = restart_syscall(); - goto out_free; - } - ret = 0; goto out_unlock; } @@ -1227,15 +1193,20 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, ret = -ENOMEM; goto out_unlock; } - new_root = true; init_cgroup_root(root, &opts); - ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD); + ret = cgroup_setup_root(root, opts.subsys_mask); if (ret) cgroup_free_root(root); out_unlock: + if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { + mutex_unlock(&cgroup_mutex); + msleep(10); + ret = restart_syscall(); + goto out_free; + } mutex_unlock(&cgroup_mutex); out_free: kfree(opts.release_agent); @@ -1247,25 +1218,13 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, dentry = cgroup_do_mount(&cgroup_fs_type, flags, root, CGROUP_SUPER_MAGIC, ns); - /* - * There's a race window after we release cgroup_mutex and before - * allocating a superblock. Make sure a concurrent process won't - * be able to re-use the root during this window by delaying the - * initialization of root refcnt. - */ - if (new_root) { - mutex_lock(&cgroup_mutex); - percpu_ref_reinit(&root->cgrp.self.refcnt); - mutex_unlock(&cgroup_mutex); + if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) { + struct super_block *sb = dentry->d_sb; + dput(dentry); + deactivate_locked_super(sb); + msleep(10); + dentry = ERR_PTR(restart_syscall()); } - - /* - * If @pinned_sb, we're reusing an existing root and holding an - * extra ref on its sb. Mount is complete. Put the extra ref. - */ - if (pinned_sb) - deactivate_super(pinned_sb); - return dentry; } @@ -1294,7 +1253,12 @@ static int __init cgroup_no_v1(char *str) if (!strcmp(token, "all")) { cgroup_no_v1_mask = U16_MAX; - break; + continue; + } + + if (!strcmp(token, "named")) { + cgroup_no_v1_named = true; + continue; } for_each_subsys(ss, i) { diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 26f42fb73de3ca63a861b6ae86cde83994a0ab89..e6bad6b3f6048c8fa3b8702719c8e39076f8ef0c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -563,6 +563,39 @@ static void cgroup_get_live(struct cgroup *cgrp) css_get(&cgrp->self); } +/** + * __cgroup_task_count - count the number of tasks in a cgroup. The caller + * is responsible for taking the css_set_lock. + * @cgrp: the cgroup in question + */ +int __cgroup_task_count(const struct cgroup *cgrp) +{ + int count = 0; + struct cgrp_cset_link *link; + + lockdep_assert_held(&css_set_lock); + + list_for_each_entry(link, &cgrp->cset_links, cset_link) + count += link->cset->nr_tasks; + + return count; +} + +/** + * cgroup_task_count - count the number of tasks in a cgroup. + * @cgrp: the cgroup in question + */ +int cgroup_task_count(const struct cgroup *cgrp) +{ + int count; + + spin_lock_irq(&css_set_lock); + count = __cgroup_task_count(cgrp); + spin_unlock_irq(&css_set_lock); + + return count; +} + struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) { struct cgroup *cgrp = of->kn->parent->priv; @@ -1898,7 +1931,7 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); } -int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) { LIST_HEAD(tmp_links); struct cgroup *root_cgrp = &root->cgrp; @@ -1915,7 +1948,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) root_cgrp->ancestor_ids[0] = ret; ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, - ref_flags, GFP_KERNEL); + 0, GFP_KERNEL); if (ret) goto out; @@ -2092,18 +2125,16 @@ static void cgroup_kill_sb(struct super_block *sb) struct cgroup_root *root = cgroup_root_from_kf(kf_root); /* - * If @root doesn't have any mounts or children, start killing it. + * If @root doesn't have any children, start killing it. * This prevents new mounts by disabling percpu_ref_tryget_live(). * cgroup_mount() may wait for @root's release. * * And don't kill the default root. */ - if (!list_empty(&root->cgrp.self.children) || - root == &cgrp_dfl_root) - cgroup_put(&root->cgrp); - else + if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && + !percpu_ref_is_dying(&root->cgrp.self.refcnt)) percpu_ref_kill(&root->cgrp.self.refcnt); - + cgroup_put(&root->cgrp); kernfs_kill_sb(sb); } @@ -2332,8 +2363,15 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) get_css_set(to_cset); to_cset->nr_tasks++; css_set_move_task(task, from_cset, to_cset, true); - put_css_set_locked(from_cset); from_cset->nr_tasks--; + /* + * If the source or destination cgroup is frozen, + * the task might require to change its state. + */ + cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp, + to_cset->dfl_cgrp); + put_css_set_locked(from_cset); + } } spin_unlock_irq(&css_set_lock); @@ -2532,7 +2570,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); if (!dst_cset) - goto err; + return -ENOMEM; WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); @@ -2564,9 +2602,6 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) } return 0; -err: - cgroup_migrate_finish(mgctx); - return -ENOMEM; } /** @@ -3378,8 +3413,11 @@ static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of, static int cgroup_events_show(struct seq_file *seq, void *v) { - seq_printf(seq, "populated %d\n", - cgroup_is_populated(seq_css(seq)->cgroup)); + struct cgroup *cgrp = seq_css(seq)->cgroup; + + seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp)); + seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags)); + return 0; } @@ -3499,6 +3537,40 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) } #endif /* CONFIG_PSI */ +static int cgroup_freeze_show(struct seq_file *seq, void *v) +{ + struct cgroup *cgrp = seq_css(seq)->cgroup; + + seq_printf(seq, "%d\n", cgrp->freezer.freeze); + + return 0; +} + +static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct cgroup *cgrp; + ssize_t ret; + int freeze; + + ret = kstrtoint(strstrip(buf), 0, &freeze); + if (ret) + return ret; + + if (freeze < 0 || freeze > 1) + return -ERANGE; + + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENOENT; + + cgroup_freeze(cgrp, freeze); + + cgroup_kn_unlock(of->kn); + + return nbytes; +} + static int cgroup_file_open(struct kernfs_open_file *of) { struct cftype *cft = of->kn->priv; @@ -4687,6 +4759,12 @@ static struct cftype cgroup_base_files[] = { .name = "cgroup.stat", .seq_show = cgroup_stat_show, }, + { + .name = "cgroup.freeze", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_freeze_show, + .write = cgroup_freeze_write, + }, { .name = "cpu.stat", .flags = CFTYPE_NOT_ON_ROOT, @@ -5046,12 +5124,37 @@ static struct cgroup *cgroup_create(struct cgroup *parent) if (ret) goto out_psi_free; + /* + * New cgroup inherits effective freeze counter, and + * if the parent has to be frozen, the child has too. + */ + cgrp->freezer.e_freeze = parent->freezer.e_freeze; + if (cgrp->freezer.e_freeze) { + /* + * Set the CGRP_FREEZE flag, so when a process will be + * attached to the child cgroup, it will become frozen. + * At this point the new cgroup is unpopulated, so we can + * consider it frozen immediately. + */ + set_bit(CGRP_FREEZE, &cgrp->flags); + set_bit(CGRP_FROZEN, &cgrp->flags); + } + spin_lock_irq(&css_set_lock); for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; - if (tcgrp != cgrp) + if (tcgrp != cgrp) { tcgrp->nr_descendants++; + + /* + * If the new cgroup is frozen, all ancestor cgroups + * get a new frozen descendant, but their state can't + * change because of this. + */ + if (cgrp->freezer.e_freeze) + tcgrp->freezer.nr_frozen_descendants++; + } } spin_unlock_irq(&css_set_lock); @@ -5342,6 +5445,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { tcgrp->nr_descendants--; tcgrp->nr_dying_descendants++; + /* + * If the dying cgroup is frozen, decrease frozen descendants + * counters of ancestor cgroups. + */ + if (test_bit(CGRP_FROZEN, &cgrp->flags)) + tcgrp->freezer.nr_frozen_descendants--; } spin_unlock_irq(&css_set_lock); @@ -5504,7 +5613,7 @@ int __init cgroup_init(void) hash_add(css_set_table, &init_css_set.hlist, css_set_hash(init_css_set.subsys)); - BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0)); + BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); mutex_unlock(&cgroup_mutex); @@ -5795,6 +5904,26 @@ void cgroup_post_fork(struct task_struct *child) cset->nr_tasks++; css_set_move_task(child, NULL, cset, false); } + + /* + * If the cgroup has to be frozen, the new task has too. + * Let's set the JOBCTL_TRAP_FREEZE jobctl bit to get + * the task into the frozen state. + */ + if (unlikely(cgroup_task_freeze(child))) { + spin_lock(&child->sighand->siglock); + WARN_ON_ONCE(child->frozen); + child->jobctl |= JOBCTL_TRAP_FREEZE; + spin_unlock(&child->sighand->siglock); + + /* + * Calling cgroup_update_frozen() isn't required here, + * because it will be called anyway a bit later + * from do_freezer_trap(). So we avoid cgroup's + * transient switch from the frozen state and back. + */ + } + spin_unlock_irq(&css_set_lock); } @@ -5844,6 +5973,12 @@ void cgroup_exit(struct task_struct *tsk) css_set_move_task(tsk, cset, NULL, false); list_add_tail(&tsk->cg_list, &cset->dying_tasks); cset->nr_tasks--; + + if (unlikely(cgroup_task_frozen(tsk))) + cgroup_freezer_frozen_exit(tsk); + else if (unlikely(cgroup_task_freeze(tsk))) + cgroup_update_frozen(task_dfl_cgroup(tsk)); + spin_unlock_irq(&css_set_lock); } else { get_css_set(cset); @@ -6077,6 +6212,8 @@ void cgroup_sk_clone(struct sock_cgroup_data *skcd) { /* Socket clone path */ if (skcd->val) { + if (skcd->no_refcnt) + return; /* * We might be cloning a socket which is left in an empty * cgroup and the cgroup might have already been rmdir'd. @@ -6145,10 +6282,8 @@ static ssize_t show_delegatable_files(struct cftype *files, char *buf, ret += snprintf(buf + ret, size - ret, "%s\n", cft->name); - if (unlikely(ret >= size)) { - WARN_ON(1); + if (WARN_ON(ret >= size)) break; - } } return ret; diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c index 08236798d17315622d73540d62a89dcbafdc5c77..9d8cda478fc9fd2733e443a04caa354818b9f8b4 100644 --- a/kernel/cgroup/freezer.c +++ b/kernel/cgroup/freezer.c @@ -1,481 +1,317 @@ -/* - * cgroup_freezer.c - control group freezer subsystem - * - * Copyright IBM Corporation, 2007 - * - * Author : Cedric Le Goater - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include -#include +//SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include +#include +#include +#include + +#include "cgroup-internal.h" /* - * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is - * set if "FROZEN" is written to freezer.state cgroupfs file, and cleared - * for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING - * for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of - * its ancestors has FREEZING_SELF set. + * Propagate the cgroup frozen state upwards by the cgroup tree. */ -enum freezer_state_flags { - CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */ - CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */ - CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */ - CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */ - - /* mask for all FREEZING flags */ - CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT, -}; - -struct freezer { - struct cgroup_subsys_state css; - unsigned int state; -}; - -static DEFINE_MUTEX(freezer_mutex); - -static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) +static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen) { - return css ? container_of(css, struct freezer, css) : NULL; -} + int desc = 1; -static inline struct freezer *task_freezer(struct task_struct *task) -{ - return css_freezer(task_css(task, freezer_cgrp_id)); + /* + * If the new state is frozen, some freezing ancestor cgroups may change + * their state too, depending on if all their descendants are frozen. + * + * Otherwise, all ancestor cgroups are forced into the non-frozen state. + */ + while ((cgrp = cgroup_parent(cgrp))) { + if (frozen) { + cgrp->freezer.nr_frozen_descendants += desc; + if (!test_bit(CGRP_FROZEN, &cgrp->flags) && + test_bit(CGRP_FREEZE, &cgrp->flags) && + cgrp->freezer.nr_frozen_descendants == + cgrp->nr_descendants) { + set_bit(CGRP_FROZEN, &cgrp->flags); + cgroup_file_notify(&cgrp->events_file); + desc++; + } + } else { + cgrp->freezer.nr_frozen_descendants -= desc; + if (test_bit(CGRP_FROZEN, &cgrp->flags)) { + clear_bit(CGRP_FROZEN, &cgrp->flags); + cgroup_file_notify(&cgrp->events_file); + desc++; + } + } + } } -static struct freezer *parent_freezer(struct freezer *freezer) +/* + * Revisit the cgroup frozen state. + * Checks if the cgroup is really frozen and perform all state transitions. + */ +void cgroup_update_frozen(struct cgroup *cgrp) { - return css_freezer(freezer->css.parent); -} + bool frozen; -bool cgroup_freezing(struct task_struct *task) -{ - bool ret; + lockdep_assert_held(&css_set_lock); - rcu_read_lock(); - ret = task_freezer(task)->state & CGROUP_FREEZING; - rcu_read_unlock(); + /* + * If the cgroup has to be frozen (CGRP_FREEZE bit set), + * and all tasks are frozen and/or stopped, let's consider + * the cgroup frozen. Otherwise it's not frozen. + */ + frozen = test_bit(CGRP_FREEZE, &cgrp->flags) && + cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp); - return ret; -} + if (frozen) { + /* Already there? */ + if (test_bit(CGRP_FROZEN, &cgrp->flags)) + return; -static const char *freezer_state_strs(unsigned int state) -{ - if (state & CGROUP_FROZEN) - return "FROZEN"; - if (state & CGROUP_FREEZING) - return "FREEZING"; - return "THAWED"; -}; - -static struct cgroup_subsys_state * -freezer_css_alloc(struct cgroup_subsys_state *parent_css) -{ - struct freezer *freezer; + set_bit(CGRP_FROZEN, &cgrp->flags); + } else { + /* Already there? */ + if (!test_bit(CGRP_FROZEN, &cgrp->flags)) + return; - freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL); - if (!freezer) - return ERR_PTR(-ENOMEM); + clear_bit(CGRP_FROZEN, &cgrp->flags); + } + cgroup_file_notify(&cgrp->events_file); - return &freezer->css; + /* Update the state of ancestor cgroups. */ + cgroup_propagate_frozen(cgrp, frozen); } -/** - * freezer_css_online - commit creation of a freezer css - * @css: css being created - * - * We're committing to creation of @css. Mark it online and inherit - * parent's freezing state while holding both parent's and our - * freezer->lock. +/* + * Increment cgroup's nr_frozen_tasks. */ -static int freezer_css_online(struct cgroup_subsys_state *css) +static void cgroup_inc_frozen_cnt(struct cgroup *cgrp) { - struct freezer *freezer = css_freezer(css); - struct freezer *parent = parent_freezer(freezer); - - mutex_lock(&freezer_mutex); - - freezer->state |= CGROUP_FREEZER_ONLINE; - - if (parent && (parent->state & CGROUP_FREEZING)) { - freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; - atomic_inc(&system_freezing_cnt); - } - - mutex_unlock(&freezer_mutex); - return 0; + cgrp->freezer.nr_frozen_tasks++; } -/** - * freezer_css_offline - initiate destruction of a freezer css - * @css: css being destroyed - * - * @css is going away. Mark it dead and decrement system_freezing_count if - * it was holding one. +/* + * Decrement cgroup's nr_frozen_tasks. */ -static void freezer_css_offline(struct cgroup_subsys_state *css) +static void cgroup_dec_frozen_cnt(struct cgroup *cgrp) { - struct freezer *freezer = css_freezer(css); - - mutex_lock(&freezer_mutex); - - if (freezer->state & CGROUP_FREEZING) - atomic_dec(&system_freezing_cnt); - - freezer->state = 0; - - mutex_unlock(&freezer_mutex); + cgrp->freezer.nr_frozen_tasks--; + WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0); } -static void freezer_css_free(struct cgroup_subsys_state *css) +/* + * Enter frozen/stopped state, if not yet there. Update cgroup's counters, + * and revisit the state of the cgroup, if necessary. + */ +void cgroup_enter_frozen(void) { - kfree(css_freezer(css)); + struct cgroup *cgrp; + + if (current->frozen) + return; + + spin_lock_irq(&css_set_lock); + current->frozen = true; + cgrp = task_dfl_cgroup(current); + cgroup_inc_frozen_cnt(cgrp); + cgroup_update_frozen(cgrp); + spin_unlock_irq(&css_set_lock); } /* - * Tasks can be migrated into a different freezer anytime regardless of its - * current state. freezer_attach() is responsible for making new tasks - * conform to the current state. + * Conditionally leave frozen/stopped state. Update cgroup's counters, + * and revisit the state of the cgroup, if necessary. * - * Freezer state changes and task migration are synchronized via - * @freezer->lock. freezer_attach() makes the new tasks conform to the - * current state and all following state changes can see the new tasks. + * If always_leave is not set, and the cgroup is freezing, + * we're racing with the cgroup freezing. In this case, we don't + * drop the frozen counter to avoid a transient switch to + * the unfrozen state. */ -static void freezer_attach(struct cgroup_taskset *tset) +void cgroup_leave_frozen(bool always_leave) { - struct task_struct *task; - struct cgroup_subsys_state *new_css; - - mutex_lock(&freezer_mutex); - - /* - * Make the new tasks conform to the current state of @new_css. - * For simplicity, when migrating any task to a FROZEN cgroup, we - * revert it to FREEZING and let update_if_frozen() determine the - * correct state later. - * - * Tasks in @tset are on @new_css but may not conform to its - * current state before executing the following - !frozen tasks may - * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. - */ - cgroup_taskset_for_each(task, new_css, tset) { - struct freezer *freezer = css_freezer(new_css); - - if (!(freezer->state & CGROUP_FREEZING)) { - __thaw_task(task); - } else { - freeze_task(task); - /* clear FROZEN and propagate upwards */ - while (freezer && (freezer->state & CGROUP_FROZEN)) { - freezer->state &= ~CGROUP_FROZEN; - freezer = parent_freezer(freezer); - } - } + struct cgroup *cgrp; + + spin_lock_irq(&css_set_lock); + cgrp = task_dfl_cgroup(current); + if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) { + cgroup_dec_frozen_cnt(cgrp); + cgroup_update_frozen(cgrp); + WARN_ON_ONCE(!current->frozen); + current->frozen = false; + } + spin_unlock_irq(&css_set_lock); + + if (unlikely(current->frozen)) { + /* + * If the task remained in the frozen state, + * make sure it won't reach userspace without + * entering the signal handling loop. + */ + spin_lock_irq(¤t->sighand->siglock); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); } - - mutex_unlock(&freezer_mutex); } -/** - * freezer_fork - cgroup post fork callback - * @task: a task which has just been forked - * - * @task has just been created and should conform to the current state of - * the cgroup_freezer it belongs to. This function may race against - * freezer_attach(). Losing to freezer_attach() means that we don't have - * to do anything as freezer_attach() will put @task into the appropriate - * state. +/* + * Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE + * jobctl bit. */ -static void freezer_fork(struct task_struct *task) +static void cgroup_freeze_task(struct task_struct *task, bool freeze) { - struct freezer *freezer; + unsigned long flags; - /* - * The root cgroup is non-freezable, so we can skip locking the - * freezer. This is safe regardless of race with task migration. - * If we didn't race or won, skipping is obviously the right thing - * to do. If we lost and root is the new cgroup, noop is still the - * right thing to do. - */ - if (task_css_is_root(task, freezer_cgrp_id)) + /* If the task is about to die, don't bother with freezing it. */ + if (!lock_task_sighand(task, &flags)) return; - mutex_lock(&freezer_mutex); - rcu_read_lock(); - - freezer = task_freezer(task); - if (freezer->state & CGROUP_FREEZING) - freeze_task(task); + if (freeze) { + task->jobctl |= JOBCTL_TRAP_FREEZE; + signal_wake_up(task, false); + } else { + task->jobctl &= ~JOBCTL_TRAP_FREEZE; + wake_up_process(task); + } - rcu_read_unlock(); - mutex_unlock(&freezer_mutex); + unlock_task_sighand(task, &flags); } -/** - * update_if_frozen - update whether a cgroup finished freezing - * @css: css of interest - * - * Once FREEZING is initiated, transition to FROZEN is lazily updated by - * calling this function. If the current state is FREEZING but not FROZEN, - * this function checks whether all tasks of this cgroup and the descendant - * cgroups finished freezing and, if so, sets FROZEN. - * - * The caller is responsible for grabbing RCU read lock and calling - * update_if_frozen() on all descendants prior to invoking this function. - * - * Task states and freezer state might disagree while tasks are being - * migrated into or out of @css, so we can't verify task states against - * @freezer state here. See freezer_attach() for details. +/* + * Freeze or unfreeze all tasks in the given cgroup. */ -static void update_if_frozen(struct cgroup_subsys_state *css) +static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze) { - struct freezer *freezer = css_freezer(css); - struct cgroup_subsys_state *pos; struct css_task_iter it; struct task_struct *task; - lockdep_assert_held(&freezer_mutex); + lockdep_assert_held(&cgroup_mutex); - if (!(freezer->state & CGROUP_FREEZING) || - (freezer->state & CGROUP_FROZEN)) - return; - - /* are all (live) children frozen? */ - rcu_read_lock(); - css_for_each_child(pos, css) { - struct freezer *child = css_freezer(pos); - - if ((child->state & CGROUP_FREEZER_ONLINE) && - !(child->state & CGROUP_FROZEN)) { - rcu_read_unlock(); - return; - } - } - rcu_read_unlock(); - - /* are all tasks frozen? */ - css_task_iter_start(css, 0, &it); + spin_lock_irq(&css_set_lock); + if (freeze) + set_bit(CGRP_FREEZE, &cgrp->flags); + else + clear_bit(CGRP_FREEZE, &cgrp->flags); + spin_unlock_irq(&css_set_lock); + css_task_iter_start(&cgrp->self, 0, &it); while ((task = css_task_iter_next(&it))) { - if (freezing(task)) { - /* - * freezer_should_skip() indicates that the task - * should be skipped when determining freezing - * completion. Consider it frozen in addition to - * the usual frozen condition. - */ - if (!frozen(task) && !freezer_should_skip(task)) - goto out_iter_end; - } + /* + * Ignore kernel threads here. Freezing cgroups containing + * kthreads isn't supported. + */ + if (task->flags & PF_KTHREAD) + continue; + cgroup_freeze_task(task, freeze); } - - freezer->state |= CGROUP_FROZEN; -out_iter_end: css_task_iter_end(&it); + + /* + * Cgroup state should be revisited here to cover empty leaf cgroups + * and cgroups which descendants are already in the desired state. + */ + spin_lock_irq(&css_set_lock); + if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants) + cgroup_update_frozen(cgrp); + spin_unlock_irq(&css_set_lock); } -static int freezer_read(struct seq_file *m, void *v) +/* + * Adjust the task state (freeze or unfreeze) and revisit the state of + * source and destination cgroups. + */ +void cgroup_freezer_migrate_task(struct task_struct *task, + struct cgroup *src, struct cgroup *dst) { - struct cgroup_subsys_state *css = seq_css(m), *pos; - - mutex_lock(&freezer_mutex); - rcu_read_lock(); + lockdep_assert_held(&css_set_lock); - /* update states bottom-up */ - css_for_each_descendant_post(pos, css) { - if (!css_tryget_online(pos)) - continue; - rcu_read_unlock(); - - update_if_frozen(pos); + /* + * Kernel threads are not supposed to be frozen at all. + */ + if (task->flags & PF_KTHREAD) + return; - rcu_read_lock(); - css_put(pos); + /* + * Adjust counters of freezing and frozen tasks. + * Note, that if the task is frozen, but the destination cgroup is not + * frozen, we bump both counters to keep them balanced. + */ + if (task->frozen) { + cgroup_inc_frozen_cnt(dst); + cgroup_dec_frozen_cnt(src); } + cgroup_update_frozen(dst); + cgroup_update_frozen(src); - rcu_read_unlock(); - mutex_unlock(&freezer_mutex); - - seq_puts(m, freezer_state_strs(css_freezer(css)->state)); - seq_putc(m, '\n'); - return 0; + /* + * Force the task to the desired state. + */ + cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags)); } -static void freeze_cgroup(struct freezer *freezer) +void cgroup_freezer_frozen_exit(struct task_struct *task) { - struct css_task_iter it; - struct task_struct *task; + struct cgroup *cgrp = task_dfl_cgroup(task); - css_task_iter_start(&freezer->css, 0, &it); - while ((task = css_task_iter_next(&it))) - freeze_task(task); - css_task_iter_end(&it); -} + lockdep_assert_held(&css_set_lock); -static void unfreeze_cgroup(struct freezer *freezer) -{ - struct css_task_iter it; - struct task_struct *task; - - css_task_iter_start(&freezer->css, 0, &it); - while ((task = css_task_iter_next(&it))) - __thaw_task(task); - css_task_iter_end(&it); + cgroup_dec_frozen_cnt(cgrp); + cgroup_update_frozen(cgrp); } -/** - * freezer_apply_state - apply state change to a single cgroup_freezer - * @freezer: freezer to apply state change to - * @freeze: whether to freeze or unfreeze - * @state: CGROUP_FREEZING_* flag to set or clear - * - * Set or clear @state on @cgroup according to @freeze, and perform - * freezing or thawing as necessary. - */ -static void freezer_apply_state(struct freezer *freezer, bool freeze, - unsigned int state) +void cgroup_freeze(struct cgroup *cgrp, bool freeze) { - /* also synchronizes against task migration, see freezer_attach() */ - lockdep_assert_held(&freezer_mutex); - - if (!(freezer->state & CGROUP_FREEZER_ONLINE)) - return; - - if (freeze) { - if (!(freezer->state & CGROUP_FREEZING)) - atomic_inc(&system_freezing_cnt); - freezer->state |= state; - freeze_cgroup(freezer); - } else { - bool was_freezing = freezer->state & CGROUP_FREEZING; + struct cgroup_subsys_state *css; + struct cgroup *dsct; + bool applied = false; - freezer->state &= ~state; + lockdep_assert_held(&cgroup_mutex); - if (!(freezer->state & CGROUP_FREEZING)) { - if (was_freezing) - atomic_dec(&system_freezing_cnt); - freezer->state &= ~CGROUP_FROZEN; - unfreeze_cgroup(freezer); - } - } -} + /* + * Nothing changed? Just exit. + */ + if (cgrp->freezer.freeze == freeze) + return; -/** - * freezer_change_state - change the freezing state of a cgroup_freezer - * @freezer: freezer of interest - * @freeze: whether to freeze or thaw - * - * Freeze or thaw @freezer according to @freeze. The operations are - * recursive - all descendants of @freezer will be affected. - */ -static void freezer_change_state(struct freezer *freezer, bool freeze) -{ - struct cgroup_subsys_state *pos; + cgrp->freezer.freeze = freeze; /* - * Update all its descendants in pre-order traversal. Each - * descendant will try to inherit its parent's FREEZING state as - * CGROUP_FREEZING_PARENT. + * Propagate changes downwards the cgroup tree. */ - mutex_lock(&freezer_mutex); - rcu_read_lock(); - css_for_each_descendant_pre(pos, &freezer->css) { - struct freezer *pos_f = css_freezer(pos); - struct freezer *parent = parent_freezer(pos_f); + css_for_each_descendant_pre(css, &cgrp->self) { + dsct = css->cgroup; - if (!css_tryget_online(pos)) + if (cgroup_is_dead(dsct)) continue; - rcu_read_unlock(); - - if (pos_f == freezer) - freezer_apply_state(pos_f, freeze, - CGROUP_FREEZING_SELF); - else - freezer_apply_state(pos_f, - parent->state & CGROUP_FREEZING, - CGROUP_FREEZING_PARENT); - - rcu_read_lock(); - css_put(pos); - } - rcu_read_unlock(); - mutex_unlock(&freezer_mutex); -} - -static ssize_t freezer_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - bool freeze; - - buf = strstrip(buf); - - if (strcmp(buf, freezer_state_strs(0)) == 0) - freeze = false; - else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0) - freeze = true; - else - return -EINVAL; - - freezer_change_state(css_freezer(of_css(of)), freeze); - return nbytes; -} -static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css, - struct cftype *cft) -{ - struct freezer *freezer = css_freezer(css); + if (freeze) { + dsct->freezer.e_freeze++; + /* + * Already frozen because of ancestor's settings? + */ + if (dsct->freezer.e_freeze > 1) + continue; + } else { + dsct->freezer.e_freeze--; + /* + * Still frozen because of ancestor's settings? + */ + if (dsct->freezer.e_freeze > 0) + continue; - return (bool)(freezer->state & CGROUP_FREEZING_SELF); -} + WARN_ON_ONCE(dsct->freezer.e_freeze < 0); + } -static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css, - struct cftype *cft) -{ - struct freezer *freezer = css_freezer(css); + /* + * Do change actual state: freeze or unfreeze. + */ + cgroup_do_freeze(dsct, freeze); + applied = true; + } - return (bool)(freezer->state & CGROUP_FREEZING_PARENT); + /* + * Even if the actual state hasn't changed, let's notify a user. + * The state can be enforced by an ancestor cgroup: the cgroup + * can already be in the desired state or it can be locked in the + * opposite state, so that the transition will never happen. + * In both cases it's better to notify a user, that there is + * nothing to wait for. + */ + if (!applied) + cgroup_file_notify(&cgrp->events_file); } - -static struct cftype files[] = { - { - .name = "state", - .flags = CFTYPE_NOT_ON_ROOT, - .seq_show = freezer_read, - .write = freezer_write, - }, - { - .name = "self_freezing", - .flags = CFTYPE_NOT_ON_ROOT, - .read_u64 = freezer_self_freezing_read, - }, - { - .name = "parent_freezing", - .flags = CFTYPE_NOT_ON_ROOT, - .read_u64 = freezer_parent_freezing_read, - }, - { } /* terminate */ -}; - -struct cgroup_subsys freezer_cgrp_subsys = { - .css_alloc = freezer_css_alloc, - .css_online = freezer_css_online, - .css_offline = freezer_css_offline, - .css_free = freezer_css_free, - .attach = freezer_attach, - .fork = freezer_fork, - .legacy_cftypes = files, -}; diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c new file mode 100644 index 0000000000000000000000000000000000000000..08236798d17315622d73540d62a89dcbafdc5c77 --- /dev/null +++ b/kernel/cgroup/legacy_freezer.c @@ -0,0 +1,481 @@ +/* + * cgroup_freezer.c - control group freezer subsystem + * + * Copyright IBM Corporation, 2007 + * + * Author : Cedric Le Goater + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is + * set if "FROZEN" is written to freezer.state cgroupfs file, and cleared + * for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING + * for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of + * its ancestors has FREEZING_SELF set. + */ +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */ + CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */ + CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */ + CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */ + + /* mask for all FREEZING flags */ + CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; +}; + +static DEFINE_MUTEX(freezer_mutex); + +static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct freezer, css) : NULL; +} + +static inline struct freezer *task_freezer(struct task_struct *task) +{ + return css_freezer(task_css(task, freezer_cgrp_id)); +} + +static struct freezer *parent_freezer(struct freezer *freezer) +{ + return css_freezer(freezer->css.parent); +} + +bool cgroup_freezing(struct task_struct *task) +{ + bool ret; + + rcu_read_lock(); + ret = task_freezer(task)->state & CGROUP_FREEZING; + rcu_read_unlock(); + + return ret; +} + +static const char *freezer_state_strs(unsigned int state) +{ + if (state & CGROUP_FROZEN) + return "FROZEN"; + if (state & CGROUP_FREEZING) + return "FREEZING"; + return "THAWED"; +}; + +static struct cgroup_subsys_state * +freezer_css_alloc(struct cgroup_subsys_state *parent_css) +{ + struct freezer *freezer; + + freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL); + if (!freezer) + return ERR_PTR(-ENOMEM); + + return &freezer->css; +} + +/** + * freezer_css_online - commit creation of a freezer css + * @css: css being created + * + * We're committing to creation of @css. Mark it online and inherit + * parent's freezing state while holding both parent's and our + * freezer->lock. + */ +static int freezer_css_online(struct cgroup_subsys_state *css) +{ + struct freezer *freezer = css_freezer(css); + struct freezer *parent = parent_freezer(freezer); + + mutex_lock(&freezer_mutex); + + freezer->state |= CGROUP_FREEZER_ONLINE; + + if (parent && (parent->state & CGROUP_FREEZING)) { + freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; + atomic_inc(&system_freezing_cnt); + } + + mutex_unlock(&freezer_mutex); + return 0; +} + +/** + * freezer_css_offline - initiate destruction of a freezer css + * @css: css being destroyed + * + * @css is going away. Mark it dead and decrement system_freezing_count if + * it was holding one. + */ +static void freezer_css_offline(struct cgroup_subsys_state *css) +{ + struct freezer *freezer = css_freezer(css); + + mutex_lock(&freezer_mutex); + + if (freezer->state & CGROUP_FREEZING) + atomic_dec(&system_freezing_cnt); + + freezer->state = 0; + + mutex_unlock(&freezer_mutex); +} + +static void freezer_css_free(struct cgroup_subsys_state *css) +{ + kfree(css_freezer(css)); +} + +/* + * Tasks can be migrated into a different freezer anytime regardless of its + * current state. freezer_attach() is responsible for making new tasks + * conform to the current state. + * + * Freezer state changes and task migration are synchronized via + * @freezer->lock. freezer_attach() makes the new tasks conform to the + * current state and all following state changes can see the new tasks. + */ +static void freezer_attach(struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct cgroup_subsys_state *new_css; + + mutex_lock(&freezer_mutex); + + /* + * Make the new tasks conform to the current state of @new_css. + * For simplicity, when migrating any task to a FROZEN cgroup, we + * revert it to FREEZING and let update_if_frozen() determine the + * correct state later. + * + * Tasks in @tset are on @new_css but may not conform to its + * current state before executing the following - !frozen tasks may + * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. + */ + cgroup_taskset_for_each(task, new_css, tset) { + struct freezer *freezer = css_freezer(new_css); + + if (!(freezer->state & CGROUP_FREEZING)) { + __thaw_task(task); + } else { + freeze_task(task); + /* clear FROZEN and propagate upwards */ + while (freezer && (freezer->state & CGROUP_FROZEN)) { + freezer->state &= ~CGROUP_FROZEN; + freezer = parent_freezer(freezer); + } + } + } + + mutex_unlock(&freezer_mutex); +} + +/** + * freezer_fork - cgroup post fork callback + * @task: a task which has just been forked + * + * @task has just been created and should conform to the current state of + * the cgroup_freezer it belongs to. This function may race against + * freezer_attach(). Losing to freezer_attach() means that we don't have + * to do anything as freezer_attach() will put @task into the appropriate + * state. + */ +static void freezer_fork(struct task_struct *task) +{ + struct freezer *freezer; + + /* + * The root cgroup is non-freezable, so we can skip locking the + * freezer. This is safe regardless of race with task migration. + * If we didn't race or won, skipping is obviously the right thing + * to do. If we lost and root is the new cgroup, noop is still the + * right thing to do. + */ + if (task_css_is_root(task, freezer_cgrp_id)) + return; + + mutex_lock(&freezer_mutex); + rcu_read_lock(); + + freezer = task_freezer(task); + if (freezer->state & CGROUP_FREEZING) + freeze_task(task); + + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); +} + +/** + * update_if_frozen - update whether a cgroup finished freezing + * @css: css of interest + * + * Once FREEZING is initiated, transition to FROZEN is lazily updated by + * calling this function. If the current state is FREEZING but not FROZEN, + * this function checks whether all tasks of this cgroup and the descendant + * cgroups finished freezing and, if so, sets FROZEN. + * + * The caller is responsible for grabbing RCU read lock and calling + * update_if_frozen() on all descendants prior to invoking this function. + * + * Task states and freezer state might disagree while tasks are being + * migrated into or out of @css, so we can't verify task states against + * @freezer state here. See freezer_attach() for details. + */ +static void update_if_frozen(struct cgroup_subsys_state *css) +{ + struct freezer *freezer = css_freezer(css); + struct cgroup_subsys_state *pos; + struct css_task_iter it; + struct task_struct *task; + + lockdep_assert_held(&freezer_mutex); + + if (!(freezer->state & CGROUP_FREEZING) || + (freezer->state & CGROUP_FROZEN)) + return; + + /* are all (live) children frozen? */ + rcu_read_lock(); + css_for_each_child(pos, css) { + struct freezer *child = css_freezer(pos); + + if ((child->state & CGROUP_FREEZER_ONLINE) && + !(child->state & CGROUP_FROZEN)) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + + /* are all tasks frozen? */ + css_task_iter_start(css, 0, &it); + + while ((task = css_task_iter_next(&it))) { + if (freezing(task)) { + /* + * freezer_should_skip() indicates that the task + * should be skipped when determining freezing + * completion. Consider it frozen in addition to + * the usual frozen condition. + */ + if (!frozen(task) && !freezer_should_skip(task)) + goto out_iter_end; + } + } + + freezer->state |= CGROUP_FROZEN; +out_iter_end: + css_task_iter_end(&it); +} + +static int freezer_read(struct seq_file *m, void *v) +{ + struct cgroup_subsys_state *css = seq_css(m), *pos; + + mutex_lock(&freezer_mutex); + rcu_read_lock(); + + /* update states bottom-up */ + css_for_each_descendant_post(pos, css) { + if (!css_tryget_online(pos)) + continue; + rcu_read_unlock(); + + update_if_frozen(pos); + + rcu_read_lock(); + css_put(pos); + } + + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); + + seq_puts(m, freezer_state_strs(css_freezer(css)->state)); + seq_putc(m, '\n'); + return 0; +} + +static void freeze_cgroup(struct freezer *freezer) +{ + struct css_task_iter it; + struct task_struct *task; + + css_task_iter_start(&freezer->css, 0, &it); + while ((task = css_task_iter_next(&it))) + freeze_task(task); + css_task_iter_end(&it); +} + +static void unfreeze_cgroup(struct freezer *freezer) +{ + struct css_task_iter it; + struct task_struct *task; + + css_task_iter_start(&freezer->css, 0, &it); + while ((task = css_task_iter_next(&it))) + __thaw_task(task); + css_task_iter_end(&it); +} + +/** + * freezer_apply_state - apply state change to a single cgroup_freezer + * @freezer: freezer to apply state change to + * @freeze: whether to freeze or unfreeze + * @state: CGROUP_FREEZING_* flag to set or clear + * + * Set or clear @state on @cgroup according to @freeze, and perform + * freezing or thawing as necessary. + */ +static void freezer_apply_state(struct freezer *freezer, bool freeze, + unsigned int state) +{ + /* also synchronizes against task migration, see freezer_attach() */ + lockdep_assert_held(&freezer_mutex); + + if (!(freezer->state & CGROUP_FREEZER_ONLINE)) + return; + + if (freeze) { + if (!(freezer->state & CGROUP_FREEZING)) + atomic_inc(&system_freezing_cnt); + freezer->state |= state; + freeze_cgroup(freezer); + } else { + bool was_freezing = freezer->state & CGROUP_FREEZING; + + freezer->state &= ~state; + + if (!(freezer->state & CGROUP_FREEZING)) { + if (was_freezing) + atomic_dec(&system_freezing_cnt); + freezer->state &= ~CGROUP_FROZEN; + unfreeze_cgroup(freezer); + } + } +} + +/** + * freezer_change_state - change the freezing state of a cgroup_freezer + * @freezer: freezer of interest + * @freeze: whether to freeze or thaw + * + * Freeze or thaw @freezer according to @freeze. The operations are + * recursive - all descendants of @freezer will be affected. + */ +static void freezer_change_state(struct freezer *freezer, bool freeze) +{ + struct cgroup_subsys_state *pos; + + /* + * Update all its descendants in pre-order traversal. Each + * descendant will try to inherit its parent's FREEZING state as + * CGROUP_FREEZING_PARENT. + */ + mutex_lock(&freezer_mutex); + rcu_read_lock(); + css_for_each_descendant_pre(pos, &freezer->css) { + struct freezer *pos_f = css_freezer(pos); + struct freezer *parent = parent_freezer(pos_f); + + if (!css_tryget_online(pos)) + continue; + rcu_read_unlock(); + + if (pos_f == freezer) + freezer_apply_state(pos_f, freeze, + CGROUP_FREEZING_SELF); + else + freezer_apply_state(pos_f, + parent->state & CGROUP_FREEZING, + CGROUP_FREEZING_PARENT); + + rcu_read_lock(); + css_put(pos); + } + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); +} + +static ssize_t freezer_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + bool freeze; + + buf = strstrip(buf); + + if (strcmp(buf, freezer_state_strs(0)) == 0) + freeze = false; + else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0) + freeze = true; + else + return -EINVAL; + + freezer_change_state(css_freezer(of_css(of)), freeze); + return nbytes; +} + +static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct freezer *freezer = css_freezer(css); + + return (bool)(freezer->state & CGROUP_FREEZING_SELF); +} + +static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct freezer *freezer = css_freezer(css); + + return (bool)(freezer->state & CGROUP_FREEZING_PARENT); +} + +static struct cftype files[] = { + { + .name = "state", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = freezer_read, + .write = freezer_write, + }, + { + .name = "self_freezing", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = freezer_self_freezing_read, + }, + { + .name = "parent_freezing", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = freezer_parent_freezing_read, + }, + { } /* terminate */ +}; + +struct cgroup_subsys freezer_cgrp_subsys = { + .css_alloc = freezer_css_alloc, + .css_online = freezer_css_online, + .css_offline = freezer_css_offline, + .css_free = freezer_css_free, + .attach = freezer_attach, + .fork = freezer_fork, + .legacy_cftypes = files, +}; diff --git a/kernel/events/core.c b/kernel/events/core.c index 879c3efe11f6fb85edeaea120bddf919ac8fb5a1..6992a7abcd3c7c93bbddabb07186cf7216a932b6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -98,7 +98,7 @@ static void remote_function(void *data) * retry due to any failures in smp_call_function_single(), such as if the * task_cpu() goes offline concurrently. * - * returns @func return value or -ESRCH when the process isn't running + * returns @func return value or -ESRCH or -ENXIO when the process isn't running */ static int task_function_call(struct task_struct *p, remote_function_f func, void *info) @@ -114,7 +114,8 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) for (;;) { ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); - ret = !ret ? data.ret : -EAGAIN; + if (!ret) + ret = data.ret; if (ret != -EAGAIN) break; diff --git a/kernel/fork.c b/kernel/fork.c index 0e5b27f80745b7cba1ce7aac6933c66026f415db..ef4295ac552e34616ac6f6b0ca7c7bff7cb59032 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1239,7 +1239,9 @@ static int wait_for_vfork_done(struct task_struct *child, int killed; freezer_do_not_count(); + cgroup_enter_frozen(); killed = wait_for_completion_killable(vfork); + cgroup_leave_frozen(false); freezer_count(); if (killed) { diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ec37563674d62497d1d4d356ab9b547242469f63..60c7be5ff5c8cb5e35d53062bf0438f94cc7a59e 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 92b445f4d62dda4d072a2728233aa548a3e0be81..39d858364333fede1aef4b34177c6b169e4f95ea 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -280,12 +280,16 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, struct irq_desc *desc = irq_data_to_desc(data); /* + * Handle irq chips which can handle affinity only in activated + * state correctly + * * If the interrupt is not yet activated, just store the affinity * mask and do not call the chip driver at all. On activation the * driver has to make sure anyway that the interrupt is in a * useable state so startup works. */ - if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data)) + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || + irqd_is_activated(data) || !irqd_affinity_on_activate(data)) return false; cpumask_copy(desc->irq_common_data.affinity, mask); diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 30cc217b863184989987f2fa76c76c8bc9a082b4..651a4ad6d711f82d8192d07a6b7616ef9337651f 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, unsigned int cpu, bit; struct cpumap *cm; + /* + * Not required in theory, but matrix_find_best_cpu() uses + * for_each_cpu() which ignores the cpumask on UP . + */ + if (cpumask_empty(msk)) + return -EINVAL; + cpu = matrix_find_best_cpu(m, msk); if (cpu == UINT_MAX) return -ENOSPC; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 178327a75e7330e86bfbfda0d8755d5fc48c9068..2161f519d4812f0e0fd179cd7fa5d503e6a200f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1065,9 +1065,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p) return ret; } #else /* !CONFIG_KPROBES_ON_FTRACE */ -#define prepare_kprobe(p) arch_prepare_kprobe(p) -#define arm_kprobe_ftrace(p) (-ENODEV) -#define disarm_kprobe_ftrace(p) (-ENODEV) +static inline int prepare_kprobe(struct kprobe *p) +{ + return arch_prepare_kprobe(p); +} + +static inline int arm_kprobe_ftrace(struct kprobe *p) +{ + return -ENODEV; +} + +static inline int disarm_kprobe_ftrace(struct kprobe *p) +{ + return -ENODEV; +} #endif /* Arm a kprobe with text_mutex */ @@ -2061,6 +2072,9 @@ static void kill_kprobe(struct kprobe *p) { struct kprobe *kp; + if (WARN_ON_ONCE(kprobe_gone(p))) + return; + p->flags |= KPROBE_FLAG_GONE; if (kprobe_aggrprobe(p)) { /* @@ -2077,6 +2091,14 @@ static void kill_kprobe(struct kprobe *p) * the original probed function (which will be freed soon) any more. */ arch_remove_kprobe(p); + + /* + * The module is going away. We should disarm the kprobe which + * is using ftrace, because ftrace framework is still available at + * MODULE_STATE_GOING notification. + */ + if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) + disarm_kprobe_ftrace(p); } /* Disable one kprobe */ @@ -2236,7 +2258,10 @@ static int kprobes_module_callback(struct notifier_block *nb, mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; - hlist_for_each_entry_rcu(p, head, hlist) + hlist_for_each_entry_rcu(p, head, hlist) { + if (kprobe_gone(p)) + continue; + if (within_module_init((unsigned long)p->addr, mod) || (checkcore && within_module_core((unsigned long)p->addr, mod))) { @@ -2253,6 +2278,7 @@ static int kprobes_module_callback(struct notifier_block *nb, */ kill_kprobe(p); } + } } mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; diff --git a/kernel/kthread.c b/kernel/kthread.c index e3dfad2dbd375e565ffdbdce3d5da5228d258912..4b3644b85760146ac6f6fa698794b1533b331414 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -191,8 +191,15 @@ static void __kthread_parkme(struct kthread *self) if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; + /* + * Thread is going to call schedule(), do not preempt it, + * or the caller of kthread_park() may spend more time in + * wait_task_inactive(). + */ + preempt_disable(); complete(&self->parked); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); } __set_current_state(TASK_RUNNING); } @@ -237,8 +244,14 @@ static int kthread(void *_create) /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; + /* + * Thread is going to call schedule(), do not preempt it, + * or the creator may spend more time in wait_task_inactive(). + */ + preempt_disable(); complete(done); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 6fcc4650f0c489bb51ee97caf8aab44ea819f7e8..53cc3bb7025a5209f086897812d234153c97808f 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -394,7 +394,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) seq_time(m, lt->min); seq_time(m, lt->max); seq_time(m, lt->total); - seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0); + seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); } static void seq_stats(struct seq_file *m, struct lock_stat_data *data) diff --git a/kernel/module.c b/kernel/module.c index 3efc7a5145753436c7e6ef39623676e100c72596..9dfc374bfeb40fed42a5fa20bbe02061430d9569 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1461,18 +1461,34 @@ struct module_sect_attrs { struct module_sect_attr attrs[0]; }; +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) static ssize_t module_sect_read(struct file *file, struct kobject *kobj, struct bin_attribute *battr, char *buf, loff_t pos, size_t count) { struct module_sect_attr *sattr = container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; if (pos != 0) return -EINVAL; - return sprintf(buf, "0x%px\n", - kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL); + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) @@ -1521,7 +1537,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info) goto out; sect_attrs->nsections++; sattr->battr.read = module_sect_read; - sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4); + sattr->battr.size = MODULE_SECT_READ_SIZE; sattr->battr.attr.mode = 0400; *(gattr++) = &(sattr++)->battr; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 13aa12967d0517f0a10ba1e4273e6f21922e4974..9e7494f6e1729224ee07a072212dcb0877777513 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2150,6 +2150,9 @@ static int __init console_setup(char *str) char *s, *options, *brl_options = NULL; int idx; + if (str[0] == 0) + return 1; + if (_braille_console_setup(&str, &brl_options)) return 1; diff --git a/kernel/relay.c b/kernel/relay.c index 13c19f39e31e23e3d5f9ba2110699c5c7d079a82..735cb208f023b132077ff53bd6ed9eb9f7d97863 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -197,6 +197,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); + free_percpu(chan->buf); kfree(chan); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 88d38452307877e1b39cb9aad8c35d078b4c4afb..20dcf11e090bb12ea513f5f6a62a891515303ccf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11403,7 +11403,12 @@ static void kick_ilb(unsigned int flags) { int ilb_cpu; - nohz.next_balance++; + /* + * Increase nohz.next_balance only when if full ilb is triggered but + * not if we only update stats. + */ + if (flags & NOHZ_BALANCE_KICK) + nohz.next_balance = jiffies+1; ilb_cpu = find_new_ilb(); @@ -11716,6 +11721,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, } } + /* + * next_balance will be updated only when there is a need. + * When the CPU is attached to null domain for ex, it will not be + * updated. + */ + if (likely(update_next_balance)) + nohz.next_balance = next_balance; + /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); @@ -11736,14 +11749,6 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, if (has_blocked_load) WRITE_ONCE(nohz.has_blocked, 1); - /* - * next_balance will be updated only when there is a need. - * When the CPU is attached to null domain for ex, it will not be - * updated. - */ - if (likely(update_next_balance)) - nohz.next_balance = next_balance; - return ret; } diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index d756a103bffaf4af69bc3150089261df3e34ecdc..3e724560a8b5c9392894fee6b34f9de346b2eb8c 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1334,7 +1334,7 @@ sd_init(struct sched_domain_topology_level *tl, sd_flags = (*tl->sd_flags)(); if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, "wrong sd_flags in topology description\n")) - sd_flags &= ~TOPOLOGY_SD_FLAGS; + sd_flags &= TOPOLOGY_SD_FLAGS; /* Apply detected topology flags */ sd_flags |= dflags; diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 3bee1ae40180796c14e1769307834af6c681153e..2f6e4cbb004c07b58ef2092350079106f776f8bc 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -420,7 +420,6 @@ void schedtune_enqueue_task(struct task_struct *p, int cpu) { struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu); unsigned long irq_flags; - struct schedtune *st; int idx; if (unlikely(!schedtune_initialized)) @@ -432,90 +431,16 @@ void schedtune_enqueue_task(struct task_struct *p, int cpu) * do_exit()::cgroup_exit() and task migration. */ raw_spin_lock_irqsave(&bg->lock, irq_flags); - rcu_read_lock(); - st = task_schedtune(p); - idx = st->idx; + idx = p->stune_idx; schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK); - rcu_read_unlock(); raw_spin_unlock_irqrestore(&bg->lock, irq_flags); } int schedtune_can_attach(struct cgroup_taskset *tset) { - struct task_struct *task; - struct cgroup_subsys_state *css; - struct boost_groups *bg; - struct rq_flags rq_flags; - unsigned int cpu; - struct rq *rq; - int src_bg; /* Source boost group index */ - int dst_bg; /* Destination boost group index */ - int tasks; - u64 now; - - if (unlikely(!schedtune_initialized)) - return 0; - - - cgroup_taskset_for_each(task, css, tset) { - - /* - * Lock the CPU's RQ the task is enqueued to avoid race - * conditions with migration code while the task is being - * accounted - */ - rq = task_rq_lock(task, &rq_flags); - - if (!task->on_rq) { - task_rq_unlock(rq, task, &rq_flags); - continue; - } - - /* - * Boost group accouting is protected by a per-cpu lock and requires - * interrupt to be disabled to avoid race conditions on... - */ - cpu = cpu_of(rq); - bg = &per_cpu(cpu_boost_groups, cpu); - raw_spin_lock(&bg->lock); - - dst_bg = css_st(css)->idx; - src_bg = task_schedtune(task)->idx; - - /* - * Current task is not changing boostgroup, which can - * happen when the new hierarchy is in use. - */ - if (unlikely(dst_bg == src_bg)) { - raw_spin_unlock(&bg->lock); - task_rq_unlock(rq, task, &rq_flags); - continue; - } - - /* - * This is the case of a RUNNABLE task which is switching its - * current boost group. - */ - - /* Move task from src to dst boost group */ - tasks = bg->group[src_bg].tasks - 1; - bg->group[src_bg].tasks = max(0, tasks); - bg->group[dst_bg].tasks += 1; - - /* Update boost hold start for this group */ - now = sched_clock_cpu(cpu); - bg->group[dst_bg].ts = now; - - /* Force boost group re-evaluation at next boost check */ - bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS; - - raw_spin_unlock(&bg->lock); - task_rq_unlock(rq, task, &rq_flags); - } - return 0; } @@ -580,7 +505,6 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu) { struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu); unsigned long irq_flags; - struct schedtune *st; int idx; if (unlikely(!schedtune_initialized)) @@ -591,14 +515,11 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu) * interrupt to be disabled to avoid race conditions on... */ raw_spin_lock_irqsave(&bg->lock, irq_flags); - rcu_read_lock(); - st = task_schedtune(p); - idx = st->idx; + idx = p->stune_idx; schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK); - rcu_read_unlock(); raw_spin_unlock_irqrestore(&bg->lock, irq_flags); } @@ -678,11 +599,19 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft) return st->boost; } -#ifdef CONFIG_SCHED_WALT static void schedtune_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct cgroup_subsys_state *css; + struct boost_groups *bg; + struct rq_flags rq_flags; + unsigned int cpu; + struct rq *rq; + int src_idx; /* Source boost group index */ + int dst_idx; /* Destination boost group index */ + int tasks; + u64 now; +#ifdef CONFIG_SCHED_WALT struct schedtune *st; bool colocate; @@ -693,13 +622,68 @@ static void schedtune_attach(struct cgroup_taskset *tset) cgroup_taskset_for_each(task, css, tset) sync_cgroup_colocation(task, colocate); -} -#else -static void schedtune_attach(struct cgroup_taskset *tset) -{ -} #endif + cgroup_taskset_for_each(task, css, tset) { + /* + * Lock the CPU's RQ the task is enqueued to avoid race + * conditions with migration code while the task is being + * accounted + */ + rq = task_rq_lock(task, &rq_flags); + + /* + * Boost group accouting is protected by a per-cpu lock and + * requires interrupt to be disabled to avoid race conditions + * on... + */ + cpu = cpu_of(rq); + bg = &per_cpu(cpu_boost_groups, cpu); + raw_spin_lock(&bg->lock); + + dst_idx = task_schedtune(task)->idx; + src_idx = task->stune_idx; + + /* + * Current task is not changing boostgroup, which can + * happen when the new hierarchy is in use. + */ + if (unlikely(dst_idx == src_idx)) { + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); + continue; + } + + task->stune_idx = dst_idx; + + if (!task_on_rq_queued(task)) { + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); + continue; + } + + /* + * This is the case of a RUNNABLE task which is switching its + * current boost group. + */ + + /* Move task from src to dst boost group */ + tasks = bg->group[src_idx].tasks - 1; + bg->group[src_idx].tasks = max(0, tasks); + bg->group[dst_idx].tasks += 1; + + /* Update boost hold start for this group */ + now = sched_clock_cpu(cpu); + bg->group[dst_idx].ts = now; + + /* Force boost group re-evaluation at next boost check */ + bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS; + + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); + } +} + static int boost_write(struct cgroup_subsys_state *css, struct cftype *cft, s64 boost) @@ -832,8 +816,8 @@ struct cgroup_subsys schedtune_cgrp_subsys = { .css_alloc = schedtune_css_alloc, .css_free = schedtune_css_free, .attach = schedtune_attach, - .can_attach = schedtune_can_attach, - .cancel_attach = schedtune_cancel_attach, + .can_attach = schedtune_can_attach, + .cancel_attach = schedtune_cancel_attach, .legacy_cftypes = files, .early_init = 1, }; diff --git a/kernel/signal.c b/kernel/signal.c index fcf68cde9e7a722c3704434b460ff7cd196f93e6..89c41b76f975af041000af9d427bc3ff2230f0ed 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -45,6 +45,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -153,9 +154,10 @@ static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) static bool recalc_sigpending_tsk(struct task_struct *t) { - if ((t->jobctl & JOBCTL_PENDING_MASK) || + if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || PENDING(&t->pending, &t->blocked) || - PENDING(&t->signal->shared_pending, &t->blocked)) { + PENDING(&t->signal->shared_pending, &t->blocked) || + cgroup_task_frozen(t)) { set_tsk_thread_flag(t, TIF_SIGPENDING); return true; } @@ -2138,8 +2140,10 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) */ preempt_disable(); read_unlock(&tasklist_lock); + cgroup_enter_frozen(); preempt_enable_no_resched(); freezable_schedule(); + cgroup_leave_frozen(true); } else { /* * By the time we got the lock, our tracer went away. @@ -2317,6 +2321,7 @@ static bool do_signal_stop(int signr) } /* Now we don't run again until woken by SIGCONT or SIGKILL */ + cgroup_enter_frozen(); freezable_schedule(); return true; } else { @@ -2363,6 +2368,43 @@ static void do_jobctl_trap(void) } } +/** + * do_freezer_trap - handle the freezer jobctl trap + * + * Puts the task into frozen state, if only the task is not about to quit. + * In this case it drops JOBCTL_TRAP_FREEZE. + * + * CONTEXT: + * Must be called with @current->sighand->siglock held, + * which is always released before returning. + */ +static void do_freezer_trap(void) + __releases(¤t->sighand->siglock) +{ + /* + * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, + * let's make another loop to give it a chance to be handled. + * In any case, we'll return back. + */ + if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != + JOBCTL_TRAP_FREEZE) { + spin_unlock_irq(¤t->sighand->siglock); + return; + } + + /* + * Now we're sure that there is no pending fatal signal and no + * pending traps. Clear TIF_SIGPENDING to not get out of schedule() + * immediately (if there is a non-fatal signal pending), and + * put the task into sleep. + */ + __set_current_state(TASK_INTERRUPTIBLE); + clear_thread_flag(TIF_SIGPENDING); + spin_unlock_irq(¤t->sighand->siglock); + cgroup_enter_frozen(); + freezable_schedule(); +} + static int ptrace_signal(int signr, siginfo_t *info) { /* @@ -2475,6 +2517,10 @@ bool get_signal(struct ksignal *ksig) trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, &sighand->action[SIGKILL - 1]); recalc_sigpending(); + current->jobctl &= ~JOBCTL_TRAP_FREEZE; + spin_unlock_irq(&sighand->siglock); + if (unlikely(cgroup_task_frozen(current))) + cgroup_leave_frozen(true); goto fatal; } @@ -2485,9 +2531,24 @@ bool get_signal(struct ksignal *ksig) do_signal_stop(0)) goto relock; - if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { - do_jobctl_trap(); + if (unlikely(current->jobctl & + (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { + if (current->jobctl & JOBCTL_TRAP_MASK) { + do_jobctl_trap(); + spin_unlock_irq(&sighand->siglock); + } else if (current->jobctl & JOBCTL_TRAP_FREEZE) + do_freezer_trap(); + + goto relock; + } + + /* + * If the task is leaving the frozen state, let's update + * cgroup counters and reset the frozen bit. + */ + if (unlikely(cgroup_task_frozen(current))) { spin_unlock_irq(&sighand->siglock); + cgroup_leave_frozen(true); goto relock; } @@ -2581,8 +2642,8 @@ bool get_signal(struct ksignal *ksig) continue; } - fatal: spin_unlock_irq(&sighand->siglock); + fatal: /* * Anything else is fatal, maybe with a core dump. diff --git a/kernel/sys.c b/kernel/sys.c index 0a1cdee858decb8cacae7133b6f6c5dd2a34a7c1..db88cc3ac9e36390bf0aae7f40cffed580d1ab23 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1283,11 +1283,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) { - struct oldold_utsname tmp = {}; + struct oldold_utsname tmp; if (!name) return -EFAULT; + memset(&tmp, 0, sizeof(tmp)); + down_read(&uts_sem); memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 28d8ae8ba7225ec7697218bf9fa8e62aef6f54fd..cb91395ac46171b4d1fa0f8fbaae23e3e4936c76 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1271,6 +1271,15 @@ void tick_irq_enter(void) #ifdef CONFIG_HIGH_RES_TIMERS static void (*wake_callback)(void); +void register_tick_sched_wakeup_callback(void (*cb)(void)) +{ + if (!wake_callback) + wake_callback = cb; + else + pr_warn("tick-sched wake cb already exists; skipping.\n"); +} +EXPORT_SYMBOL_GPL(register_tick_sched_wakeup_callback); + /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled. @@ -1414,15 +1423,6 @@ int tick_check_oneshot_change(int allow_nohz) return 0; } -void register_tick_sched_wakeup_callback(void (*cb)(void)) -{ - if (!wake_callback) - wake_callback = cb; - else - pr_warn("tick-sched wake cb already exists; skipping.\n"); -} -EXPORT_SYMBOL_GPL(register_tick_sched_wakeup_callback); - ktime_t *get_next_event_cpu(unsigned int cpu) { return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 81ee5b83c92007702b59e374aa6296e8190f5dc7..aa798afcb08904b2366cfa6046f2ab433fbcb3c0 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -150,6 +150,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) { tk->offs_boot = ktime_add(tk->offs_boot, delta); + /* + * Timespec representation for VDSO update to avoid 64bit division + * on every update. + */ + tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); } /* @@ -1004,9 +1009,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) return -EOVERFLOW; tmp *= mult; - rem *= mult; - do_div(rem, div); + rem = div64_u64(rem * mult, div); *base = tmp + rem; return 0; } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 4ff60e33d2d00a5b67dba695b87f9c7d4a1a846b..dd4f75d98b6adc9a01b9137e3b26f585cb05a45d 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1698,6 +1699,13 @@ void update_process_times(int user_tick) scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(p); + + /* The current CPU might make use of net randoms without receiving IRQs + * to renew them often enough. Let's update the net_rand_state from a + * non-constant value that's not affine to the number of calls to make + * sure it's updated when there's some activity (we don't care in idle). + */ + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); } /** diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 565ab06917a78761e00fd0a948f4ead65a1c643d..15882e4b5c83bb309ef33a87496fea799f122067 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata, struct timekeeper *tk) { struct vdso_timestamp *vdso_ts; - u64 nsec; + u64 nsec, sec; vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; @@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata, } vdso_ts->nsec = nsec; - /* CLOCK_MONOTONIC_RAW */ - vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; - vdso_ts->sec = tk->raw_sec; - vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* Copy MONOTONIC time for BOOTTIME */ + sec = vdso_ts->sec; + /* Add the boot offset */ + sec += tk->monotonic_to_boot.tv_sec; + nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; /* CLOCK_BOOTTIME */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; - vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec; - nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + - ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); + vdso_ts->sec = sec; + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); vdso_ts->sec++; } vdso_ts->nsec = nsec; + /* CLOCK_MONOTONIC_RAW */ + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; + vdso_ts->sec = tk->raw_sec; + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* CLOCK_TAI */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7a4ca2deb39bc1d4e2576bbbb25935e903b28e66..1442f6152abc26aa90d4e0fde6ddc495b49a1f2c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -529,6 +529,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!dir) goto err; + /* + * As blktrace relies on debugfs for its interface the debugfs directory + * is required, contrary to the usual mantra of not checking for debugfs + * files or directories. + */ + if (IS_ERR_OR_NULL(dir)) { + pr_warn("debugfs_dir not present for %s so skipping\n", + buts->name); + ret = -ENOENT; + goto err; + } + bt->dev = dev; atomic_set(&bt->dropped, 0); INIT_LIST_HEAD(&bt->running_list); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0441265d518c78f196bafbad14dd55f06fadd3b0..3b8422a88b7e033d05056c5dc450611c38a9a3da 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5666,8 +5666,11 @@ static int referenced_filters(struct dyn_ftrace *rec) int cnt = 0; for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { - if (ops_references_rec(ops, rec)) - cnt++; + if (ops_references_rec(ops, rec)) { + cnt++; + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + rec->flags |= FTRACE_FL_REGS; + } } return cnt; @@ -5844,8 +5847,8 @@ void ftrace_module_enable(struct module *mod) if (ftrace_start_up) cnt += referenced_filters(rec); - /* This clears FTRACE_FL_DISABLED */ - rec->flags = cnt; + rec->flags &= ~FTRACE_FL_DISABLED; + rec->flags += cnt; if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(rec, 1); @@ -6369,16 +6372,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, { int bit; - if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) - return; - bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; preempt_disable_notrace(); - op->func(ip, parent_ip, op, regs); + if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) + op->func(ip, parent_ip, op, regs); preempt_enable_notrace(); trace_clear_recursion(bit); @@ -6449,12 +6450,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } else { unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9a669358fe354785484953134cfc85b1a258d742..13eeeb9d7c4c53b701b6f92019cde157a724c18d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3043,6 +3043,9 @@ int trace_array_printk(struct trace_array *tr, if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; + if (!tr) + return -ENOENT; + va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); @@ -8537,7 +8540,7 @@ __init static int tracer_alloc_buffers(void) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ - if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) + if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 06bb2fd9a56c596c00c4c5e1a93c081e10dd8bc8..a97aad105d3673b234fe9faad531ba758cc41112 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, F_STRUCT( __field( int, size ) - __dynamic_array(unsigned long, caller ) + __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) ), F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n" diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4337193ef156388b374c3d9fc656ca9ceebae894..78204c2176352bf7481b67987617218e2371865f 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -534,12 +534,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, tr, INT_MIN); - register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, + register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, tr, INT_MAX); } else { unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, tr); - unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, + unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, tr); } } @@ -800,6 +800,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) char *event = NULL, *sub = NULL, *match; int ret; + if (!tr) + return -ENOENT; /* * The buf format can be : * *: means any event by that name. diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index b949c3917c679c72f275ef2fb24157ddcfe36f77..9be3d1d1fcb47f466e4506b5fdebaed1575c2a1a 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -451,8 +451,10 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, switch (*next) { case '(': /* #2 */ - if (top - op_stack > nr_parens) - return ERR_PTR(-EINVAL); + if (top - op_stack > nr_parens) { + ret = -EINVAL; + goto out_free; + } *(++top) = invert; continue; case '!': /* #3 */ diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0d98c7738066d86654c05f51e413880094b125d9..11d3cd205de93660242c928b8c5d79be053ff61d 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -4225,7 +4225,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) s = kstrdup(field_str, GFP_KERNEL); if (!s) { - kfree(hist_data->attrs->var_defs.name[n_vars]); ret = -ENOMEM; goto free; } diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 8030e24dbf1481bed0452d5cf63114917fbf7ba9..568918fae8d41f6c7679e5965b192eb14c135ead 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -270,6 +270,7 @@ static bool disable_migrate; static void move_to_next_cpu(void) { struct cpumask *current_mask = &save_cpumask; + struct trace_array *tr = hwlat_trace; int next_cpu; if (disable_migrate) @@ -283,7 +284,7 @@ static void move_to_next_cpu(void) goto disable; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); next_cpu = cpumask_next(smp_processor_id(), current_mask); put_online_cpus(); @@ -360,7 +361,7 @@ static int start_kthread(struct trace_array *tr) /* Just pick the first CPU on first iteration */ current_mask = &save_cpumask; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); put_online_cpus(); next_cpu = cpumask_first(current_mask); diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index 71f553cceb3c1ee07bafe04b419797e59793f8b7..0e373cb0106bbf41f84ea242188464f65aeddb4c 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -59,14 +59,14 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { + lockdep_hardirqs_off(CALLER_ADDR0); + if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); tracer_hardirqs_off(CALLER_ADDR0, caller_addr); if (!in_nmi()) trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); } - - lockdep_hardirqs_off(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_TRACE_IRQFLAGS */ diff --git a/kernel/umh.c b/kernel/umh.c index 52a9084f85419b8127aee4b1ef2c6f380b3fa870..16653319c8ce8790fd862bc526f9a80547ce1ebb 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -72,6 +73,14 @@ static int call_usermodehelper_exec_async(void *data) flush_signal_handlers(current, 1); spin_unlock_irq(¤t->sighand->siglock); + /* + * Initial kernel threads share ther FS with init, in order to + * get the init root directory. But we've now created a new + * thread that is going to execve a user process and has its own + * 'struct fs_struct'. Reset umask to the default. + */ + current->fs->umask = 0022; + /* * Our parent (unbound workqueue) runs with elevated scheduling * priority. Avoid propagating that into the userspace child. diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index d27e1d51112c262926d8f63dbc1bbd50cfcf7cad..7366a7614898e187eb3512a295c67e227e492738 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -85,22 +85,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = { { _DPRINTK_FLAGS_NONE, '_' }, }; +struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; }; + /* format a string into buf[] which describes the _ddebug's flags */ -static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, - size_t maxlen) +static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb) { - char *p = buf; + char *p = fb->buf; int i; - BUG_ON(maxlen < 6); for (i = 0; i < ARRAY_SIZE(opt_array); ++i) - if (dp->flags & opt_array[i].flag) + if (flags & opt_array[i].flag) *p++ = opt_array[i].opt_char; - if (p == buf) + if (p == fb->buf) *p++ = '_'; *p = '\0'; - return buf; + return fb->buf; } #define vpr_info(fmt, ...) \ @@ -142,7 +142,7 @@ static int ddebug_change(const struct ddebug_query *query, struct ddebug_table *dt; unsigned int newflags; unsigned int nfound = 0; - char flagbuf[10]; + struct flagsbuf fbuf; /* search for matching ddebugs */ mutex_lock(&ddebug_lock); @@ -199,8 +199,7 @@ static int ddebug_change(const struct ddebug_query *query, vpr_info("changed %s:%d [%s]%s =%s\n", trim_prefix(dp->filename), dp->lineno, dt->mod_name, dp->function, - ddebug_describe_flags(dp, flagbuf, - sizeof(flagbuf))); + ddebug_describe_flags(dp->flags, &fbuf)); } } mutex_unlock(&ddebug_lock); @@ -779,7 +778,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) { struct ddebug_iter *iter = m->private; struct _ddebug *dp = p; - char flagsbuf[10]; + struct flagsbuf flags; vpr_info("called m=%p p=%p\n", m, p); @@ -792,7 +791,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) seq_printf(m, "%s:%u [%s]%s =%s \"", trim_prefix(dp->filename), dp->lineno, iter->table->mod_name, dp->function, - ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); + ddebug_describe_flags(dp->flags, &flags)); seq_escape(m, dp->format, "\t\r\n\""); seq_puts(m, "\"\n"); diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c index 532f0ff89a962d389a09efabd37ef81a1c5d8347..0e2deac97da0dc251ff22cb91ea22947750318a5 100644 --- a/lib/fonts/font_10x18.c +++ b/lib/fonts/font_10x18.c @@ -8,8 +8,8 @@ #define FONTDATAMAX 9216 -static const unsigned char fontdata_10x18[FONTDATAMAX] = { - +static struct font_data fontdata_10x18 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ @@ -5129,8 +5129,7 @@ static const unsigned char fontdata_10x18[FONTDATAMAX] = { 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ - -}; +} }; const struct font_desc font_10x18 = { @@ -5138,7 +5137,7 @@ const struct font_desc font_10x18 = { .name = "10x18", .width = 10, .height = 18, - .data = fontdata_10x18, + .data = fontdata_10x18.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c index 09b2cc03435b938be9f73264d6937cf944149f54..87da8acd07db0c2700ca311820ff1f244ec72bde 100644 --- a/lib/fonts/font_6x10.c +++ b/lib/fonts/font_6x10.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -static const unsigned char fontdata_6x10[] = { +#define FONTDATAMAX 2560 +static struct font_data fontdata_6x10 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -3074,14 +3076,13 @@ static const unsigned char fontdata_6x10[] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_6x10 = { .idx = FONT6x10_IDX, .name = "6x10", .width = 6, .height = 10, - .data = fontdata_6x10, + .data = fontdata_6x10.data, .pref = 0, }; diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c index d7136c33f1f018b854a887c58d2da54e7496d27b..5e975dfa10a53a3dd257738f8cc5c1340dd0cbe6 100644 --- a/lib/fonts/font_6x11.c +++ b/lib/fonts/font_6x11.c @@ -9,8 +9,8 @@ #define FONTDATAMAX (11*256) -static const unsigned char fontdata_6x11[FONTDATAMAX] = { - +static struct font_data fontdata_6x11 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -3338,8 +3338,7 @@ static const unsigned char fontdata_6x11[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_6x11 = { @@ -3347,7 +3346,7 @@ const struct font_desc font_vga_6x11 = { .name = "ProFont6x11", .width = 6, .height = 11, - .data = fontdata_6x11, + .data = fontdata_6x11.data, /* Try avoiding this font if possible unless on MAC */ .pref = -2000, }; diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c index 89752d0b23e8b04c0ec0b6c55887de1fb90464b6..86d298f38505886cc36c4af28ba0107b1dc7a1de 100644 --- a/lib/fonts/font_7x14.c +++ b/lib/fonts/font_7x14.c @@ -8,8 +8,8 @@ #define FONTDATAMAX 3584 -static const unsigned char fontdata_7x14[FONTDATAMAX] = { - +static struct font_data fontdata_7x14 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ @@ -4105,8 +4105,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = { 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ - -}; +} }; const struct font_desc font_7x14 = { @@ -4114,6 +4113,6 @@ const struct font_desc font_7x14 = { .name = "7x14", .width = 7, .height = 14, - .data = fontdata_7x14, + .data = fontdata_7x14.data, .pref = 0, }; diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c index b7ab1f5fbdb8a523b37b7e9fc23e0a7534adabe5..37cedd36ca5ef236d544eb70e09ead940de97e5f 100644 --- a/lib/fonts/font_8x16.c +++ b/lib/fonts/font_8x16.c @@ -10,8 +10,8 @@ #define FONTDATAMAX 4096 -static const unsigned char fontdata_8x16[FONTDATAMAX] = { - +static struct font_data fontdata_8x16 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -4619,8 +4619,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_8x16 = { @@ -4628,7 +4627,7 @@ const struct font_desc font_vga_8x16 = { .name = "VGA8x16", .width = 8, .height = 16, - .data = fontdata_8x16, + .data = fontdata_8x16.data, .pref = 0, }; EXPORT_SYMBOL(font_vga_8x16); diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c index 2328ebc8bab5d7a4cab1f7d600f48200b11c9712..8ab695538395df9f7c573a8d62aaa323b7bf35e4 100644 --- a/lib/fonts/font_8x8.c +++ b/lib/fonts/font_8x8.c @@ -9,8 +9,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_8x8[FONTDATAMAX] = { - +static struct font_data fontdata_8x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -2570,8 +2570,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_8x8 = { @@ -2579,6 +2578,6 @@ const struct font_desc font_vga_8x8 = { .name = "VGA8x8", .width = 8, .height = 8, - .data = fontdata_8x8, + .data = fontdata_8x8.data, .pref = 0, }; diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c index 0ff0e85d4481ba93a24b4c727b6756c5694e210b..069b3e80c43449270f0937e60fe3c5dce1fdbdbd 100644 --- a/lib/fonts/font_acorn_8x8.c +++ b/lib/fonts/font_acorn_8x8.c @@ -3,7 +3,10 @@ #include -static const unsigned char acorndata_8x8[] = { +#define FONTDATAMAX 2048 + +static struct font_data acorndata_8x8 = { +{ 0, 0, FONTDATAMAX, 0 }, { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ @@ -260,14 +263,14 @@ static const unsigned char acorndata_8x8[] = { /* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00, /* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -}; +} }; const struct font_desc font_acorn_8x8 = { .idx = ACORN8x8_IDX, .name = "Acorn8x8", .width = 8, .height = 8, - .data = acorndata_8x8, + .data = acorndata_8x8.data, #ifdef CONFIG_ARCH_ACORN .pref = 20, #else diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c index 838caa1cfef70d1eec3ee3ae9ecdb2c24a933add..1449876c6a27027b7a3cb1af2c4bb2cbc1296917 100644 --- a/lib/fonts/font_mini_4x6.c +++ b/lib/fonts/font_mini_4x6.c @@ -43,8 +43,8 @@ __END__; #define FONTDATAMAX 1536 -static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { - +static struct font_data fontdata_mini_4x6 = { + { 0, 0, FONTDATAMAX, 0 }, { /*{*/ /* Char 0: ' ' */ 0xee, /*= [*** ] */ @@ -2145,14 +2145,14 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ -}; +} }; const struct font_desc font_mini_4x6 = { .idx = MINI4x6_IDX, .name = "MINI4x6", .width = 4, .height = 6, - .data = fontdata_mini_4x6, + .data = fontdata_mini_4x6.data, .pref = 3, }; diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c index b15d3c342c5bbc9682b12ccb081b6b9c66b83cdf..32d65551e7ed29be9265cd73e4f48aee5d816934 100644 --- a/lib/fonts/font_pearl_8x8.c +++ b/lib/fonts/font_pearl_8x8.c @@ -14,8 +14,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { - +static struct font_data fontdata_pearl8x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -2575,14 +2575,13 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_pearl_8x8 = { .idx = PEARL8x8_IDX, .name = "PEARL8x8", .width = 8, .height = 8, - .data = fontdata_pearl8x8, + .data = fontdata_pearl8x8.data, .pref = 2, }; diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c index 955d6eee3959d7b295047952087b9f357754ebb5..641a6b4dca424fb6843126d9d2cc6d1585d04d79 100644 --- a/lib/fonts/font_sun12x22.c +++ b/lib/fonts/font_sun12x22.c @@ -3,8 +3,8 @@ #define FONTDATAMAX 11264 -static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { - +static struct font_data fontdata_sun12x22 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ @@ -6148,8 +6148,7 @@ static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ - -}; +} }; const struct font_desc font_sun_12x22 = { @@ -6157,7 +6156,7 @@ const struct font_desc font_sun_12x22 = { .name = "SUN12x22", .width = 12, .height = 22, - .data = fontdata_sun12x22, + .data = fontdata_sun12x22.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c index 03d71e53954abddea323add037b8d9d28d8e232a..193fe6d988e08e95d1b172d34651ee920f4674a5 100644 --- a/lib/fonts/font_sun8x16.c +++ b/lib/fonts/font_sun8x16.c @@ -3,7 +3,8 @@ #define FONTDATAMAX 4096 -static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { +static struct font_data fontdata_sun8x16 = { +{ 0, 0, FONTDATAMAX, 0 }, { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, @@ -260,14 +261,14 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { /* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -}; +} }; const struct font_desc font_sun_8x16 = { .idx = SUN8x16_IDX, .name = "SUN8x16", .width = 8, .height = 16, - .data = fontdata_sun8x16, + .data = fontdata_sun8x16.data, #ifdef __sparc__ .pref = 10, #else diff --git a/lib/random32.c b/lib/random32.c index 4aaa76404d561b86609bbd1c6eb3ca4620ca81bf..b6f3325e38e4347923bc4db34d4acaeb90773203 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/string.c b/lib/string.c index 72125fd5b4a64dda92169faec990a2a2d4afbc08..edf4907ec946fe3c66a8f936bd8702521f0a069c 100644 --- a/lib/string.c +++ b/lib/string.c @@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count) EXPORT_SYMBOL(strscpy); #endif +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is a pointer + * to the new %NUL-terminating character in @dest. (For strcpy, the return + * value is a pointer to the start of @dest). This interface is considered + * unsafe as it doesn't perform bounds checking of the inputs. As such it's + * not recommended for usage. Instead, its definition is provided in case + * the compiler lowers other libcalls to stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 9cf77628fc913e0e63ae29e843c34f6a685e9f3d..87a0cc750ea2363d15f85d10efff14f68d209bbd 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev, break; case TEST_KMOD_FS_TYPE: kfree_const(config->test_fs); - config->test_driver = NULL; + config->test_fs = NULL; copied = config_copy_test_fs(config, test_str, strlen(test_str)); break; diff --git a/mm/filemap.c b/mm/filemap.c index cc85d52cf6ff19160f44fabbd54340b74ea4da95..774d8bee2528407677a521f0a80776c872f62de0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3039,6 +3039,14 @@ static struct page *do_read_cache_page(struct address_space *mapping, unlock_page(page); goto out; } + + /* + * A previous I/O error may have been due to temporary + * failures. + * Clear page error before actual read, PG_error will be + * set again if read page fails. + */ + ClearPageError(page); goto filler; out: diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 325f3c852088fd3d67c77f8110fbd4703144075a..b48a74f3df6216f20d83eb5d1db90310fa2e0df5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2143,7 +2143,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, put_page(page); add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; - } else if (is_huge_zero_pmd(*pmd)) { + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_invalidate_range() see comments below inside @@ -2231,27 +2231,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - atomic_inc(&page[i]._mapcount); - pte_unmap(pte); - } - - /* - * Set PG_double_map before dropping compound_mapcount to avoid - * false-negative page_mapped(). - */ - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) + if (!pmd_migration) atomic_inc(&page[i]._mapcount); + pte_unmap(pte); } - if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { - /* Last compound_mapcount is gone. */ - __dec_node_page_state(page, NR_ANON_THPS); - if (TestClearPageDoubleMap(page)) { - /* No need in mapcount reference anymore */ + if (!pmd_migration) { + /* + * Set PG_double_map before dropping compound_mapcount to avoid + * false-negative page_mapped(). + */ + if (compound_mapcount(page) > 1 && + !TestSetPageDoubleMap(page)) { for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_dec(&page[i]._mapcount); + atomic_inc(&page[i]._mapcount); + } + + lock_page_memcg(page); + if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { + /* Last compound_mapcount is gone. */ + __dec_lruvec_page_state(page, NR_ANON_THPS); + if (TestClearPageDoubleMap(page)) { + /* No need in mapcount reference anymore */ + for (i = 0; i < HPAGE_PMD_NR; i++) + atomic_dec(&page[i]._mapcount); + } } + unlock_page_memcg(page); } smp_wmb(); /* make pte visible before pmd */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c3adb9d4ccfc829f51f4c922936065913463f4f2..df155e1419f8e21a2717820682cc4ef0d75af1bf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2918,6 +2918,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, + void *buffer, size_t *length, + loff_t *ppos, unsigned long *out) +{ + struct ctl_table dup_table; + + /* + * In order to avoid races with __do_proc_doulongvec_minmax(), we + * can duplicate the @table and alter the duplicate of it. + */ + dup_table = *table; + dup_table.data = out; + + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); +} + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -2929,9 +2945,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (!hugepages_supported()) return -EOPNOTSUPP; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -2975,9 +2990,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, if (write && hstate_is_gigantic(h)) return -EINVAL; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -4652,25 +4666,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - unsigned long check_addr = *start; + unsigned long a_start, a_end; if (!(vma->vm_flags & VM_MAYSHARE)) return; - for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { - unsigned long a_start = check_addr & PUD_MASK; - unsigned long a_end = a_start + PUD_SIZE; + /* Extend the range to be PUD aligned for a worst case scenario */ + a_start = ALIGN_DOWN(*start, PUD_SIZE); + a_end = ALIGN(*end, PUD_SIZE); - /* - * If sharing is possible, adjust start/end if necessary. - */ - if (range_in_vma(vma, a_start, a_end)) { - if (a_start < *start) - *start = a_start; - if (a_end > *end) - *end = a_end; - } - } + /* + * Intersect the range with the vma range, since pmd sharing won't be + * across vma after all + */ + *start = max(vma->vm_start, a_start); + *end = min(vma->vm_end, a_end); } /* diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d11af6114a925f6b47da35ed46c98d6fac545bfe..aedfaf0801556d50e93265e3824b83361cd86cd2 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -53,6 +53,9 @@ enum scan_result { #define CREATE_TRACE_POINTS #include +static struct task_struct *khugepaged_thread __read_mostly; +static DEFINE_MUTEX(khugepaged_mutex); + /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; @@ -394,7 +397,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, static inline int khugepaged_test_exit(struct mm_struct *mm) { - return atomic_read(&mm->mm_users) == 0; + return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm); } static bool hugepage_vma_check(struct vm_area_struct *vma, @@ -427,7 +430,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; @@ -820,6 +823,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait) static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) { + /* + * If the hpage allocated earlier was briefly exposed in page cache + * before collapse_file() failed, it is possible that racing lookups + * have not yet completed, and would then be unpleasantly surprised by + * finding the hpage reused for the same mapping at a different offset. + * Just release the previous allocation if there is any danger of that. + */ + if (*hpage && page_count(*hpage) > 1) { + put_page(*hpage); + *hpage = NULL; + } + if (!*hpage) *hpage = khugepaged_alloc_hugepage(wait); @@ -1007,9 +1022,6 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); - result = SCAN_ANY_PROCESS; - if (!mmget_still_valid(mm)) - goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; @@ -1256,6 +1268,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; + struct mm_struct *mm; unsigned long addr; pmd_t *pmd, _pmd; @@ -1269,7 +1282,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) continue; if (vma->vm_end < addr + HPAGE_PMD_SIZE) continue; - pmd = mm_find_pmd(vma->vm_mm, addr); + mm = vma->vm_mm; + pmd = mm_find_pmd(mm, addr); if (!pmd) continue; /* @@ -1278,14 +1292,16 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * re-fault. Not ideal, but it's more important to not disturb * the system too much. */ - if (down_write_trylock(&vma->vm_mm->mmap_sem)) { - spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); - /* assume page table is clear */ - _pmd = pmdp_collapse_flush(vma, addr, pmd); - spin_unlock(ptl); - up_write(&vma->vm_mm->mmap_sem); - mm_dec_nr_ptes(vma->vm_mm); - pte_free(vma->vm_mm, pmd_pgtable(_pmd)); + if (down_write_trylock(&mm->mmap_sem)) { + if (!khugepaged_test_exit(mm)) { + spinlock_t *ptl = pmd_lock(mm, pmd); + /* assume page table is clear */ + _pmd = pmdp_collapse_flush(vma, addr, pmd); + spin_unlock(ptl); + mm_dec_nr_ptes(mm); + pte_free(mm, pmd_pgtable(_pmd)); + } + up_write(&mm->mmap_sem); } } i_mmap_unlock_write(mapping); @@ -1944,8 +1960,6 @@ static void set_recommended_min_free_kbytes(void) int start_stop_khugepaged(void) { - static struct task_struct *khugepaged_thread __read_mostly; - static DEFINE_MUTEX(khugepaged_mutex); int err = 0; mutex_lock(&khugepaged_mutex); @@ -1972,3 +1986,11 @@ int start_stop_khugepaged(void) mutex_unlock(&khugepaged_mutex); return err; } + +void khugepaged_min_free_kbytes_update(void) +{ + mutex_lock(&khugepaged_mutex); + if (khugepaged_enabled() && khugepaged_thread) + set_recommended_min_free_kbytes(); + mutex_unlock(&khugepaged_mutex); +} diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 1589165a002e3b1512c8b2a5766167b9f7e7d128..740cba23f4a35edb79f4425204a92dfab5749198 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -2051,7 +2051,7 @@ void __init kmemleak_init(void) create_object((unsigned long)__bss_start, __bss_stop - __bss_start, KMEMLEAK_GREY, GFP_ATOMIC); /* only register .data..ro_after_init if not within .data */ - if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) + if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata) create_object((unsigned long)__start_ro_after_init, __end_ro_after_init - __start_ro_after_init, KMEMLEAK_GREY, GFP_ATOMIC); diff --git a/mm/maccess.c b/mm/maccess.c index ec00be51a24fd6a9639897fafeea3abd45b9d3f4..6e41ba452e5e94983113b9fc126c8a6954892efa 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -5,8 +5,32 @@ #include #include +static __always_inline long +probe_read_common(void *dst, const void __user *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + +static __always_inline long +probe_write_common(void __user *dst, const void *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_to_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** - * probe_kernel_read(): safely attempt to read from a location + * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from * @size: size of the data chunk @@ -29,16 +53,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_from_user_inatomic(dst, - (__force const void __user *)src, size); - pagefault_enable(); + ret = probe_read_common(dst, (__force const void __user *)src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_read); +/** + * probe_user_read(): safely attempt to read from a user-space location + * @dst: pointer to the buffer that shall take the data + * @src: address to read from. This must be a user address. + * @size: size of the data chunk + * + * Safely read from user address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_read(void *dst, const void __user *src, size_t size) + __attribute__((alias("__probe_user_read"))); + +long __probe_user_read(void *dst, const void __user *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(VERIFY_READ, src, size)) + ret = probe_read_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_read); + /** * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to @@ -48,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ + long __weak probe_kernel_write(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_write"))); @@ -57,15 +106,40 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - pagefault_enable(); + ret = probe_write_common((__force void __user *)dst, src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_write); +/** + * probe_user_write(): safely attempt to write to a user-space location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_write(void __user *dst, const void *src, size_t size) + __attribute__((alias("__probe_user_write"))); + +long __probe_user_write(void __user *dst, const void *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(VERIFY_WRITE, dst, size)) + ret = probe_write_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_write); + /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. * @dst: Destination address, in kernel space. This buffer must be at @@ -105,3 +179,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) return ret ? -EFAULT : src - unsafe_addr; } + +/** + * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user + * address. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @unsafe_addr: Unsafe user address. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from unsafe user address to kernel buffer. + * + * On success, returns the length of the string INCLUDING the trailing NUL. + * + * If access fails, returns -EFAULT (some data may have been copied + * and the trailing NUL added). + * + * If @count is smaller than the length of the string, copies @count-1 bytes, + * sets the last byte of @dst buffer to NUL and returns @count. + */ +long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count) +{ + mm_segment_t old_fs = get_fs(); + long ret; + + if (unlikely(count <= 0)) + return 0; + + set_fs(USER_DS); + pagefault_disable(); + ret = strncpy_from_user(dst, unsafe_addr, count); + pagefault_enable(); + set_fs(old_fs); + + if (ret >= count) { + ret = count; + dst[ret - 1] = '\0'; + } else if (ret > 0) { + ret++; + } + + return ret; +} + +/** + * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL. + * @unsafe_addr: The string to measure. + * @count: Maximum count (including NUL) + * + * Get the size of a NUL-terminated string in user space without pagefault. + * + * Returns the size of the string INCLUDING the terminating NUL. + * + * If the string is too long, returns a number larger than @count. User + * has to check the return value against "> count". + * On exception (or invalid count), returns 0. + * + * Unlike strnlen_user, this can be used from IRQ handler etc. because + * it disables pagefaults. + */ +long strnlen_unsafe_user(const void __user *unsafe_addr, long count) +{ + mm_segment_t old_fs = get_fs(); + int ret; + + set_fs(USER_DS); + pagefault_disable(); + ret = strnlen_user(unsafe_addr, count); + pagefault_enable(); + set_fs(old_fs); + + return ret; +} diff --git a/mm/memory.c b/mm/memory.c index 68b1002cc7bdcec0b3d8f5c7fc7efbb78c06e4af..2122259cc9d8ba2228044e0ac69b42d34773731f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -121,6 +121,18 @@ int randomize_va_space __read_mostly = 2; #endif +#ifndef arch_faults_on_old_pte +static inline bool arch_faults_on_old_pte(void) +{ + /* + * Those arches which don't have hw access flag feature need to + * implement their own helper. By default, "true" means pagefault + * will be hit on old pte. + */ + return true; +} +#endif + static int __init disable_randmaps(char *s) { randomize_va_space = 0; @@ -2519,32 +2531,101 @@ static inline int pte_unmap_same(struct vm_fault *vmf) return ret; } -static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) +static inline bool cow_user_page(struct page *dst, struct page *src, + struct vm_fault *vmf) { + bool ret; + void *kaddr; + void __user *uaddr; + bool locked = false; + struct vm_area_struct *vma = vmf->vma; + struct mm_struct *mm = vma->vm_mm; + unsigned long addr = vmf->address; + debug_dma_assert_idle(src); + if (likely(src)) { + copy_user_highpage(dst, src, addr, vma); + return true; + } + /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ - if (unlikely(!src)) { - void *kaddr = kmap_atomic(dst); - void __user *uaddr = (void __user *)(va & PAGE_MASK); + kaddr = kmap_atomic(dst); + uaddr = (void __user *)(addr & PAGE_MASK); + + /* + * On architectures with software "accessed" bits, we would + * take a double page fault, so mark it accessed here. + */ + if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { + pte_t entry; + + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* + * Other thread has already handled the fault + * and we don't need to do anything. If it's + * not the case, the fault will be triggered + * again on the same address. + */ + ret = false; + goto pte_unlock; + } + + entry = pte_mkyoung(vmf->orig_pte); + if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) + update_mmu_cache(vma, addr, vmf->pte); + } + + /* + * This really shouldn't fail, because the page is there + * in the page tables. But it might just be unreadable, + * in which case we just give up and fill the result with + * zeroes. + */ + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + if (locked) + goto warn; + + /* Re-validate under PTL if the page is still mapped */ + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* The PTE changed under us. Retry page fault. */ + ret = false; + goto pte_unlock; + } /* - * This really shouldn't fail, because the page is there - * in the page tables. But it might just be unreadable, - * in which case we just give up and fill the result with - * zeroes. + * The same page can be mapped back since last copy attampt. + * Try to copy again under PTL. */ - if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + /* + * Give a warn in case there can be some obscure + * use-case + */ +warn: + WARN_ON_ONCE(1); clear_page(kaddr); - kunmap_atomic(kaddr); - flush_dcache_page(dst); - } else - copy_user_highpage(dst, src, va, vma); + } + } + + ret = true; + +pte_unlock: + if (locked) + pte_unmap_unlock(vmf->pte, vmf->ptl); + kunmap_atomic(kaddr); + flush_dcache_page(dst); + + return ret; } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) @@ -2703,7 +2784,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) vmf->address); if (!new_page) goto out; - cow_user_page(new_page, old_page, vmf->address, vma); + + if (!cow_user_page(new_page, old_page, vmf)) { + /* + * COW failed, if the fault was solved by other, + * it's fine. If not, userspace would re-fault on + * the same address and we will handle the fault + * from the second attempt. + */ + put_page(new_page); + if (old_page) + put_page(old_page); + return 0; + } } if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 7d575c6d44b0a897da7be43a25350ea989dd87b1..5f8e0960e0abc71de26ab3aa59a8631c8b07d34b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -777,7 +777,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, * are reserved so nobody should be touching them so we should be safe */ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, - MEMMAP_HOTPLUG, altmap); + MEMINIT_HOTPLUG, altmap); set_zone_contiguous(zone); } @@ -1174,7 +1174,8 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) } /* link memory sections under this node.*/ - ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); + ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), + MEMINIT_HOTPLUG); BUG_ON(ret); /* create new memmap entry */ diff --git a/mm/mmap.c b/mm/mmap.c index af62c59039532438b115a72e11400f6dacee4216..9f4e340e5632de91926b454d3c9bb8bf8f664c30 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2170,6 +2170,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = 0; + info.align_offset = 0; return vm_unmapped_area(&info); } #endif @@ -2211,6 +2212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; info.align_mask = 0; + info.align_offset = 0; addr = vm_unmapped_area(&info); /* @@ -3218,6 +3220,7 @@ void exit_mmap(struct mm_struct *mm) if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); + cond_resched(); } vm_unacct_memory(nr_accounted); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7cce28206ec9d8f4384627e00fa27803492f2891..7baa337182ac109e8eea24c9f8e71e66ae9cc617 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -67,6 +67,7 @@ #include #include #include +#include #include #include @@ -1260,6 +1261,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, struct page *page, *tmp; LIST_HEAD(head); + /* + * Ensure proper count is passed which otherwise would stuck in the + * below while (list_empty(list)) loop. + */ + count = min(pcp->count, count); while (count) { struct list_head *list; @@ -5932,7 +5938,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat) * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, enum memmap_context context, + unsigned long start_pfn, enum meminit_context context, struct vmem_altmap *altmap) { unsigned long end_pfn = start_pfn + size; @@ -5959,7 +5965,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * There can be holes in boot-time mem_map[]s handed to this * function. They do not exist on hotplugged memory. */ - if (context != MEMMAP_EARLY) + if (context != MEMINIT_EARLY) goto not_early; if (!early_pfn_valid(pfn)) @@ -5994,7 +6000,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, not_early: page = pfn_to_page(pfn); __init_single_page(page, pfn, zone, nid); - if (context == MEMMAP_HOTPLUG) + if (context == MEMINIT_HOTPLUG) SetPageReserved(page); /* @@ -6009,7 +6015,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * check here not to call set_pageblock_migratetype() against * pfn out of zone. * - * Please note that MEMMAP_HOTPLUG path doesn't clear memmap + * Please note that MEMINIT_HOTPLUG path doesn't clear memmap * because this is done early in sparse_add_one_section */ if (!(pfn & (pageblock_nr_pages - 1))) { @@ -6030,7 +6036,8 @@ static void __meminit zone_init_free_lists(struct zone *zone) #ifndef __HAVE_ARCH_MEMMAP_INIT #define memmap_init(size, nid, zone, start_pfn) \ - memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL) + memmap_init_zone((size), (nid), (zone), (start_pfn), \ + MEMINIT_EARLY, NULL) #endif static int zone_batchsize(struct zone *zone) @@ -7862,9 +7869,11 @@ int __meminit init_per_zone_wmark_min(void) setup_min_slab_ratio(); #endif + khugepaged_min_free_kbytes_update(); + return 0; } -core_initcall(init_per_zone_wmark_min) +postcore_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/page_counter.c b/mm/page_counter.c index de31470655f66c3492b1858bd308eedd95917afd..147ff99187b8197142746ef13cc28acf77dd05cf 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) long new; new = atomic_long_add_return(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is indeed racy, but we can live with some * inaccuracy in the watermark. @@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter, new = atomic_long_add_return(nr_pages, &c->usage); if (new > c->max) { atomic_long_sub(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is racy, but we can live with some * inaccuracy in the failcnt. @@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter, *fail = c; goto failed; } - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * Just like with failcnt, we can live with some * inaccuracy in the watermark. diff --git a/mm/pagewalk.c b/mm/pagewalk.c index c3084ff2569d2f297ed369874ba839aadc93785e..3c0930d94a2953894a98f5fe27629d54b05b4c57 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; - addr += PAGE_SIZE; - if (addr == end) + if (addr >= end - PAGE_SIZE) break; + addr += PAGE_SIZE; pte++; } diff --git a/mm/percpu.c b/mm/percpu.c index 38393016f204b0b3ea97cf1cf18a98f80729c9ac..2bbae237a8e8055276ed1999408095e218233b49 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1103,7 +1103,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, /* allocate chunk */ chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(region_size >> PAGE_SHIFT), + BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long), 0); INIT_LIST_HEAD(&chunk->list); diff --git a/mm/slub.c b/mm/slub.c index 52edbbe341b268a245fba178d33388fad1000640..363a41f34166e6b1980f12bb9d105672d38245d0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -657,12 +657,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) } static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { if ((s->flags & SLAB_CONSISTENCY_CHECKS) && - !check_valid_pointer(s, page, nextfree)) { - object_err(s, page, freelist, "Freechain corrupt"); - freelist = NULL; + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; slab_fix(s, "Isolate corrupted freechain"); return true; } @@ -1382,7 +1382,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { return false; } @@ -2106,7 +2106,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, * 'freelist' is already corrupted. So isolate all objects * starting at 'freelist'. */ - if (freelist_corrupted(s, page, freelist, nextfree)) + if (freelist_corrupted(s, page, &freelist, nextfree)) break; do { diff --git a/mm/swap_state.c b/mm/swap_state.c index 1b2b986c8220cb12fea9e936afa3bc5332cdeb7b..565e3464a3647f0bdbb844b91c242639a0bb4543 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -538,10 +538,11 @@ static unsigned long swapin_nr_pages(unsigned long offset) return 1; hits = atomic_xchg(&swapin_readahead_hits, 0); - pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, + pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, + max_pages, atomic_read(&last_readahead_pages)); if (!hits) - prev_offset = offset; + WRITE_ONCE(prev_offset, offset); atomic_set(&last_readahead_pages, pages); return pages; diff --git a/mm/swapfile.c b/mm/swapfile.c index 67eb4f67553419d9bf84d9ff0211feec58743c93..c1eb8bdc83000ec7aa554f8ecb888e91258e2f0d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1019,7 +1019,7 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) goto nextsi; } if (size == SWAPFILE_CLUSTER) { - if (!(si->flags & SWP_FILE)) + if (si->flags & SWP_BLKDEV) n_ret = swap_alloc_cluster(si, swp_entries); } else n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, @@ -2760,10 +2760,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) else type = si->type + 1; + ++(*pos); for (; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; - ++*pos; return si; } diff --git a/mm/vmscan.c b/mm/vmscan.c index cf077f0a6c55abceb938ef5834968b02ea16692f..bfe52b7ed24e73a4c50021f6f0d662beb4c64353 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2782,6 +2782,14 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) unsigned long reclaimed; unsigned long scanned; + /* + * This loop can become CPU-bound when target memcgs + * aren't eligible for reclaim - either because they + * don't have any reclaimable pages, or because their + * memory is explicitly protected. Avoid soft lockups. + */ + cond_resched(); + switch (mem_cgroup_protected(root, memcg)) { case MEMCG_PROT_MIN: /* @@ -3182,8 +3190,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { - pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, - (enum zone_type)ZONE_NORMAL); + if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL) + WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL); + wake_up_interruptible(&pgdat->kswapd_wait); } @@ -3815,9 +3824,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, enum zone_type prev_classzone_idx) { - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return prev_classzone_idx; - return pgdat->kswapd_classzone_idx; + enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3861,8 +3870,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * the previous request that slept prematurely. */ if (remaining) { - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); + WRITE_ONCE(pgdat->kswapd_classzone_idx, + kswapd_classzone_idx(pgdat, classzone_idx)); + + if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) + WRITE_ONCE(pgdat->kswapd_order, reclaim_order); } finish_wait(&pgdat->kswapd_wait, &wait); @@ -3944,12 +3956,12 @@ static int kswapd(void *p) tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); for ( ; ; ) { bool ret; - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); kswapd_try_sleep: @@ -3957,10 +3969,10 @@ static int kswapd(void *p) classzone_idx); /* Read the new order and classzone_idx */ - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); ret = try_to_freeze(); if (kthread_should_stop()) @@ -4005,20 +4017,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; + enum zone_type curr_idx; if (!managed_zone(zone)) return; if (!cpuset_zone_allowed(zone, gfp_flags)) return; + pgdat = zone->zone_pgdat; + curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx) + WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx); + + if (READ_ONCE(pgdat->kswapd_order) < order) + WRITE_ONCE(pgdat->kswapd_order, order); - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - pgdat->kswapd_classzone_idx = classzone_idx; - else - pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, - classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index f868cf6fba7946c87d0bb152dbd35ba61e3eb0fb..b6dcb40fa8a7d2e0dc6ed6fee957533544435125 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -377,6 +377,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -718,11 +722,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); @@ -818,20 +831,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) diff --git a/net/atm/lec.c b/net/atm/lec.c index ad4f829193f053c8a0c0846f1e9f619617dcd18e..5a6186b809874ecaa0f2239019292dc7abdde5c0 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1270,6 +1270,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) entry->vcc = NULL; } if (entry->recv_vcc) { + struct atm_vcc *vcc = entry->recv_vcc; + struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); + + kfree(vpriv); + vcc->user_back = NULL; + entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 0458de53cb64b2da51de492ffa27f33068351cc8..04a620fd13014463ed0c7c047f3a61a05d862e39 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -716,6 +716,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); + if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@ -843,11 +849,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; - ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 85faf25c2912249849a907e29bf058535d19e280..1401031f4bb4a59f436ddc95720d522b43cff115 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -96,11 +97,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) */ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) { - const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; + const struct batadv_bla_backbone_gw *gw; u32 hash = 0; - hash = jhash(&claim->addr, sizeof(claim->addr), hash); - hash = jhash(&claim->vid, sizeof(claim->vid), hash); + gw = (struct batadv_bla_backbone_gw *)data; + hash = jhash(&gw->orig, sizeof(gw->orig), hash); + hash = jhash(&gw->vid, sizeof(gw->vid), hash); return hash % size; } @@ -450,7 +452,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); @@ -1589,13 +1594,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv) } /** - * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the bcast_packet to be checked + * @skb: contains the multicast packet to be checked + * @payload_ptr: pointer to position inside the head buffer of the skb + * marking the start of the data to be CRC'ed + * @orig: originator mac address, NULL if unknown * - * check if it is on our broadcast list. Another gateway might - * have sent the same packet because it is connected to the same backbone, - * so we have to remove this duplicate. + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. * * This is performed by checking the CRC, which will tell us * with a good chance that it is the same packet. If it is furthermore @@ -1604,19 +1612,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv) * * Return: true if a packet is in the duplicate list, false otherwise. */ -bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) +static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb, u8 *payload_ptr, + const u8 *orig) { - int i, curr; - __be32 crc; - struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_duplist_entry *entry; bool ret = false; - - bcast_packet = (struct batadv_bcast_packet *)skb->data; + int i, curr; + __be32 crc; /* calculate the crc ... */ - crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); + crc = batadv_skb_crc32(skb, payload_ptr); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); @@ -1635,8 +1641,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, if (entry->crc != crc) continue; - if (batadv_compare_eth(entry->orig, bcast_packet->orig)) - continue; + /* are the originators both known and not anonymous? */ + if (orig && !is_zero_ether_addr(orig) && + !is_zero_ether_addr(entry->orig)) { + /* If known, check if the new frame came from + * the same originator: + * We are safe to take identical frames from the + * same orig, if known, as multiplications in + * the mesh are detected via the (orig, seqno) pair. + * So we can be a bit more liberal here and allow + * identical frames from the same orig which the source + * host might have sent multiple times on purpose. + */ + if (batadv_compare_eth(entry->orig, orig)) + continue; + } /* this entry seems to match: same crc, not too old, * and from another gw. therefore return true to forbid it. @@ -1652,7 +1671,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, entry = &bat_priv->bla.bcast_duplist[curr]; entry->crc = crc; entry->entrytime = jiffies; - ether_addr_copy(entry->orig, bcast_packet->orig); + + /* known originator */ + if (orig) + ether_addr_copy(entry->orig, orig); + /* anonymous originator */ + else + eth_zero_addr(entry->orig); + bat_priv->bla.bcast_duplist_curr = curr; out: @@ -1661,6 +1687,48 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, return ret; } +/** + * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the multicast packet to be checked, decapsulated from a + * unicast_packet + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); +} + +/** + * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the bcast_packet to be checked + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + struct batadv_bcast_packet *bcast_packet; + u8 *payload_ptr; + + bcast_packet = (struct batadv_bcast_packet *)skb->data; + payload_ptr = (u8 *)(bcast_packet + 1); + + return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, + bcast_packet->orig); +} + /** * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for * the VLAN identified by vid. @@ -1822,7 +1890,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame - * @is_bcast: the packet came in a broadcast packet type. + * @packet_type: the batman packet type this frame came in * * batadv_bla_rx avoidance checks if: * * we have to race for a claim @@ -1834,7 +1902,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * further process the skb. */ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast) + unsigned short vid, int packet_type) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; @@ -1856,9 +1924,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto handled; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) - /* don't allow broadcasts while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) - goto handled; + /* don't allow multicast packets while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + /* Both broadcast flooding or multicast-via-unicasts + * delivery might send to multiple backbone gateways + * sharing the same LAN and therefore need to coordinate + * which backbone gateway forwards into the LAN, + * by claiming the payload source address. + * + * Broadcast flooding and multicast-via-unicasts + * delivery use the following two batman packet types. + * Note: explicitly exclude BATADV_UNICAST_4ADDR, + * as the DHCP gateway feature will send explicitly + * to only one BLA gateway, so the claiming process + * should be avoided there. + */ + if (packet_type == BATADV_BCAST || + packet_type == BATADV_UNICAST) + goto handled; + + /* potential duplicates from foreign BLA backbone gateways via + * multicast-in-unicast packets + */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + packet_type == BATADV_UNICAST && + batadv_bla_check_ucast_duplist(bat_priv, skb)) + goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; @@ -1893,13 +1984,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; } - /* if it is a broadcast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { + /* if it is a multicast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { /* ... drop it. the responsible gateway is in charge. * - * We need to check is_bcast because with the gateway + * We need to check packet type because with the gateway * feature, broadcasts (like DHCP requests) may be sent - * using a unicast packet type. + * using a unicast 4 address packet type. See comment above. */ goto handled; } else { diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 71f95a3e4d3f335890408685432f18e5d7411a76..af28fdb01467ce290c2a00d0741f01a6e4f347ee 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -48,7 +48,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast); + unsigned short vid, int packet_type); bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); bool batadv_bla_is_backbone_gw(struct sk_buff *skb, @@ -79,7 +79,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, - bool is_bcast) + int packet_type) { return false; } diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 140c61a3f1ecfec4fe23c5ddca19e18e2e86fd56..0c59fefc137196899f97e0fa7882cf55ceebe34c 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -714,8 +714,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ - if (ret == BATADV_DHCP_TO_CLIENT && - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { + if (ret == BATADV_DHCP_TO_CLIENT) { + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) + return BATADV_DHCP_NO; + /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index cc3ed93a6d513dffd4711cac50545d65ef7d640e..98af41e3810dcdf96edad8dff89d4d2b624c5d7f 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -838,6 +838,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, vid = batadv_get_vid(skb, hdr_len); ethhdr = (struct ethhdr *)(skb->data + hdr_len); + /* do not reroute multicast frames in a unicast header */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + return true; + /* check if the destination client was served by this node and it is now * roaming. In this case, it means that the node has got a ROAM_ADV * message and that it knows the new destination in the mesh to re-route diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index a2976adeeedce449144a1b3fd9986126018e2d8d..6ff78080ec7fb4b6dcf15f87a3a0aa2e6187a5fa 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -426,10 +426,10 @@ void batadv_interface_rx(struct net_device *soft_iface, struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; - bool is_bcast; + int packet_type; batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; - is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); + packet_type = batadv_bcast_packet->packet_type; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); @@ -472,7 +472,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) + if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) goto out; if (orig_node) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 357475cceec61ba264fd49621e75de624f4afff4..9a75f9b00b5129c4faae0f16f1ad894a5361951b 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -57,6 +57,7 @@ static bool enable_6lowpan; /* We are listening incoming connections via this channel */ static struct l2cap_chan *listen_chan; +static DEFINE_MUTEX(set_lock); struct lowpan_peer { struct list_head list; @@ -1082,12 +1083,14 @@ static void do_enable_set(struct work_struct *work) enable_6lowpan = set_enable->flag; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } listen_chan = bt_6lowpan_listen(); + mutex_unlock(&set_lock); kfree(set_enable); } @@ -1139,11 +1142,13 @@ static ssize_t lowpan_control_write(struct file *fp, if (ret == -EINVAL) return ret; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); listen_chan = NULL; } + mutex_unlock(&set_lock); if (conn) { struct lowpan_peer *peer; diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 51c2cf2d8923ae8dcb174355f26b0b08634a3892..be9640e9ca00674b15a342e98fb60ac4099b6734 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -233,6 +233,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_info_req req; found = true; + + memset(&req, 0, sizeof(req)); + req.id = cl->id; a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), sizeof(req), &req); @@ -312,6 +315,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, if (!hdev || hdev->dev_type != HCI_AMP) { struct a2mp_info_rsp rsp; + memset(&rsp, 0, sizeof(rsp)); + rsp.id = req->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; @@ -355,6 +360,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, if (!ctrl) return -ENOMEM; + memset(&req, 0, sizeof(req)); + req.id = rsp->id; a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), &req); @@ -383,6 +390,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_amp_assoc_rsp rsp; rsp.id = req->id; + memset(&rsp, 0, sizeof(rsp)); + if (tmp) { rsp.status = A2MP_STATUS_COLLISION_OCCURED; amp_mgr_put(tmp); @@ -471,7 +480,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; struct hci_dev *hdev; struct hci_conn *hcon; @@ -482,6 +490,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); + memset(&rsp, 0, sizeof(rsp)); + rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; @@ -560,6 +570,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); + memset(&rsp, 0, sizeof(rsp)); + rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; rsp.status = A2MP_STATUS_SUCCESS; @@ -682,6 +694,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) if (err) { struct a2mp_cmd_rej rej; + memset(&rej, 0, sizeof(rej)); + rej.reason = cpu_to_le16(0); hdr = (void *) skb->data; @@ -905,6 +919,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev) BT_DBG("%s mgr %p", hdev->name, mgr); + memset(&rsp, 0, sizeof(rsp)); + rsp.id = hdev->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; @@ -1002,6 +1018,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) if (!mgr) return; + memset(&rsp, 0, sizeof(rsp)); + hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); if (!hs_hcon) { rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; @@ -1034,6 +1052,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan) mgr->bredr_chan = chan; + memset(&req, 0, sizeof(req)); + req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); req.ext_feat = 0; a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index db735d0d931e62346464df9f775b91d42c44d5dd..1b50e4ef2c6833c7b8b2d34982b08d2460ae3a96 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1282,6 +1282,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 0; } + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a044e6bb12b840d3962f5e97d3c2e978287b664b..d98d8e78b7363d62f02af4d7d78f935c18abe0cf 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -41,12 +41,27 @@ /* Handle HCI Event packets */ -static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, + u8 *new_status) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%2.2x", hdev->name, status); + /* It is possible that we receive Inquiry Complete event right + * before we receive Inquiry Cancel Command Complete event, in + * which case the latter event should have status of Command + * Disallowed (0x0c). This should not be treated as error, since + * we actually achieve what Inquiry Cancel wants to achieve, + * which is to end the last Inquiry session. + */ + if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); + status = 0x00; + } + + *new_status = status; + if (status) return; @@ -1229,6 +1244,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -2357,7 +2375,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -2738,7 +2756,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); - hci_encrypt_cfm(conn, ev->status, 0x00); + hci_encrypt_cfm(conn, ev->status); } } @@ -2823,22 +2841,7 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, conn->enc_key_size = rp->key_size; } - if (conn->state == BT_CONFIG) { - conn->state = BT_CONNECTED; - hci_connect_cfm(conn, 0); - hci_conn_drop(conn); - } else { - u8 encrypt; - - if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) - encrypt = 0x00; - else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) - encrypt = 0x02; - else - encrypt = 0x01; - - hci_encrypt_cfm(conn, 0, encrypt); - } + hci_encrypt_cfm(conn, 0); unlock: hci_dev_unlock(hdev); @@ -2887,27 +2890,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - /* In Secure Connections Only mode, do not allow any connections - * that are not encrypted with AES-CCM using a P-256 authenticated - * combination key. - */ - if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && - (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || - conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_drop(conn); - goto unlock; - } - /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; @@ -2937,14 +2936,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) } notify: - if (conn->state == BT_CONFIG) { - if (!ev->status) - conn->state = BT_CONNECTED; - - hci_connect_cfm(conn, ev->status); - hci_conn_drop(conn); - } else - hci_encrypt_cfm(conn, ev->status, ev->encrypt); + hci_encrypt_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); @@ -3036,7 +3028,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, switch (*opcode) { case HCI_OP_INQUIRY_CANCEL: - hci_cc_inquiry_cancel(hdev, skb); + hci_cc_inquiry_cancel(hdev, skb, status); break; case HCI_OP_PERIODIC_INQ: @@ -3945,6 +3937,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -3966,6 +3961,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -3986,6 +3984,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } } +unlock: hci_dev_unlock(hdev); } @@ -4148,7 +4147,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -5116,7 +5115,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5138,6 +5138,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5197,7 +5202,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5251,7 +5256,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5286,7 +5291,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5326,7 +5332,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5400,7 +5406,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length + 1; @@ -5598,7 +5605,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); + ev->direct_addr_type, ev->rssi, NULL, 0, + false); ptr += sizeof(*ev); } @@ -5719,6 +5727,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) u8 status = 0, event = hdr->evt, req_evt = 0; u16 opcode = HCI_OP_NOP; + if (!event) { + bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); + goto done; + } + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(cmd_hdr->opcode); @@ -5930,6 +5943,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) req_complete_skb(hdev, status, opcode, orig_skb); } +done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0d84d1f820d4c20fcc9ac29c09fe716d730dd4a3..f1ff83321023182e8fb52f6b3263fb6054d68532 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); mutex_lock(&conn->chan_lock); + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling + * this work. No need to call l2cap_chan_hold(chan) here again. + */ l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) @@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_close(chan, reason); - l2cap_chan_unlock(chan); - chan->ops->close(chan); - mutex_unlock(&conn->chan_lock); + l2cap_chan_unlock(chan); l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); } struct l2cap_chan *l2cap_chan_create(void) @@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_del(chan, err); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@ -4114,7 +4117,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, return 0; } - if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && + chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock; @@ -4337,6 +4341,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); rsp.dcid = cpu_to_le16(chan->scid); @@ -4345,12 +4350,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, chan->ops->set_shutdown(chan); - l2cap_chan_hold(chan); l2cap_chan_del(chan, ECONNRESET); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -4382,20 +4386,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); return 0; } - l2cap_chan_hold(chan); l2cap_chan_del(chan, 0); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -6678,9 +6683,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) goto drop; } - if ((chan->mode == L2CAP_MODE_ERTM || - chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb)) - goto drop; + if (chan->ops->filter) { + if (chan->ops->filter(chan, skb)) + goto drop; + } if (!control->sframe) { int err; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a3a2cd55e23a9caa288663e69243db71cdf211fc..2a85dc3be8bf39569ff53a9502f8fefc5efc3b79 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1039,7 +1039,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, } /* Kill socket (only if zapped and orphan) - * Must be called on unlocked socket. + * Must be called on unlocked socket, with l2cap channel lock. */ static void l2cap_sock_kill(struct sock *sk) { @@ -1190,6 +1190,7 @@ static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; + struct l2cap_chan *chan; BT_DBG("sock %p, sk %p", sock, sk); @@ -1199,9 +1200,17 @@ static int l2cap_sock_release(struct socket *sock) bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, 2); + chan = l2cap_pi(sk)->chan; + + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); sock_orphan(sk); l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return err; } @@ -1219,12 +1228,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) BT_DBG("child chan %p state %s", chan, state_to_string(chan->state)); + l2cap_chan_hold(chan); l2cap_chan_lock(chan); + __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } } @@ -1464,6 +1476,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) sk->sk_state_change(sk); } +static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct sock *sk = chan->data; + + switch (chan->mode) { + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + return sk_filter(sk, skb); + } + + return 0; +} + static const struct l2cap_ops l2cap_chan_ops = { .name = "L2CAP Socket Interface", .new_connection = l2cap_sock_new_connection_cb, @@ -1478,6 +1503,7 @@ static const struct l2cap_ops l2cap_chan_ops = { .set_shutdown = l2cap_sock_set_shutdown_cb, .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, + .filter = l2cap_sock_filter, }; static void l2cap_sock_destruct(struct sock *sk) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ccce954f814682a40ba5d8af0ab463d5b0bfda3b..5340b1097afb7a2e3a8b9b9e27335776b943a03a 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -756,7 +756,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_ssp_capable(hdev)) { settings |= MGMT_SETTING_SSP; - settings |= MGMT_SETTING_HS; + if (IS_ENABLED(CONFIG_BT_HS)) + settings |= MGMT_SETTING_HS; } if (lmp_sc_capable(hdev)) @@ -1771,6 +1772,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) BT_DBG("request for %s", hdev->name); + if (!IS_ENABLED(CONFIG_BT_HS)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); + status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); diff --git a/net/compat.c b/net/compat.c index 3c4b0283b29a1045b073484b6b4571f9f9f9d460..2a8c7cb5f06a87426d4aa915dea1534b8f92bb83 100644 --- a/net/compat.c +++ b/net/compat.c @@ -289,6 +289,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) break; } /* Bump the usage count and install the file. */ + __receive_sock(fp[i]); fd_install(new_fd, get_file(fp[i])); } diff --git a/net/core/dev.c b/net/core/dev.c index e8097b88f95be417a363769c2c6597bc7892162b..e696349f9e19ab9fd2ab365f4a67f87cff5ecc19 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6215,12 +6215,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, pr_err_once("netif_napi_add() called with weight %d on device %s\n", weight, dev->name); napi->weight = weight; - list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); diff --git a/net/core/filter.c b/net/core/filter.c index 2ed842de8d137c1dd8042c9b4025ff78a0e33202..a14eecd9e4131345fdf4a14b113e9b2e70133513 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5421,8 +5421,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, bool indirect = BPF_MODE(orig->code) == BPF_IND; struct bpf_insn *insn = insn_buf; - /* We're guaranteed here that CTX is in R6. */ - *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); } else { @@ -5430,6 +5428,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, if (orig->imm) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); } + /* We're guaranteed here that CTX is in R6. */ + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); switch (BPF_SIZE(orig->code)) { case BPF_B: diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9a21bc3efb888174f1e3dc577671bcce45db768a..89725f7aec6c0410357b5c86faa28eb93866ff05 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } + (*pos)++; return NULL; } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a581cf101cd9c799aa0d54839c41500a9ecd2827..023ce0fbb496de5a7280bcb80e4c017bdc237689 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -161,7 +161,7 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int cpu = smp_processor_id(); - list_for_each_entry(napi, &dev->napi_list, dev_list) { + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); smp_store_release(&napi->poll_owner, -1); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ef92eacb0bb8703eb0f9933b7a57ebcbe5a2ccb4..d5324eb855f9436a0f78f67742cc9b911d9d1b13 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -183,6 +183,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, u8 *data; bool pfmemalloc; + if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE)) + gfp_mask |= GFP_DMA; + cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; @@ -5128,8 +5131,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; - - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) + /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; @@ -5521,9 +5524,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - if (k == 0) { - /* split line is in frag list */ - pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); + /* split line is in frag list */ + if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { + /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ + if (skb_has_frag_list(skb)) + kfree_skb_list(skb_shinfo(skb)->frag_list); + kfree(data); + return -ENOMEM; } skb_release_data(skb); diff --git a/net/core/sock.c b/net/core/sock.c index 13322df4bd17dacddc16fb19ada5a56d24f22f8b..47d5bcb9256d666ed2d9a82553ae118edaa870a9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2636,6 +2636,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct * } EXPORT_SYMBOL(sock_no_mmap); +/* + * When a file is received (via SCM_RIGHTS, etc), we must bump the + * various sock-based usage counts. + */ +void __receive_sock(struct file *file) +{ + struct socket *sock; + int error; + + /* + * The resulting value of "error" is ignored here since we only + * need to take action when the file is a socket and testing + * "sock" for NULL is sufficient. + */ + sock = sock_from_file(file, &error); + if (sock) { + sock_update_netprioidx(&sock->sk->sk_cgrp_data); + sock_update_classid(&sock->sk->sk_cgrp_data); + } +} + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index a556cd708885a798ba13e1cf8443c8cf96251e28..5ee6b94131b233d7b0ffaeb18c55a7f31d661c5e 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1421,6 +1421,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; + int prio; int err; if (!ops) @@ -1469,6 +1470,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, struct dcbnl_buffer *buffer = nla_data(ieee[DCB_ATTR_DCB_BUFFER]); + for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { + if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { + err = -EINVAL; + goto err; + } + } + err = ops->dcbnl_setbuffer(netdev, buffer); if (err) goto err; diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 4083326b806e848eef3dec04d3e04dede2f4a957..d62d28d358d910ec09527f79f445ad49cc2051f5 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -17,6 +17,16 @@ #define DSA_HLEN 4 #define EDSA_HLEN 8 +#define FRAME_TYPE_TO_CPU 0x00 +#define FRAME_TYPE_FORWARD 0x03 + +#define TO_CPU_CODE_MGMT_TRAP 0x00 +#define TO_CPU_CODE_FRAME2REG 0x01 +#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02 +#define TO_CPU_CODE_POLICY_TRAP 0x03 +#define TO_CPU_CODE_ARP_MIRROR 0x04 +#define TO_CPU_CODE_POLICY_MIRROR 0x05 + static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); @@ -81,6 +91,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 *edsa_header; + int frame_type; + int code; int source_device; int source_port; @@ -95,8 +107,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, /* * Check that frame type is either TO_CPU or FORWARD. */ - if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0) + frame_type = edsa_header[0] >> 6; + + switch (frame_type) { + case FRAME_TYPE_TO_CPU: + code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1); + + /* + * Mark the frame to never egress on any port of the same switch + * unless it's a trapped IGMP/MLD packet, in which case the + * bridge might want to forward it. + */ + if (code != TO_CPU_CODE_IGMP_MLD_TRAP) + skb->offload_fwd_mark = 1; + + break; + + case FRAME_TYPE_FORWARD: + skb->offload_fwd_mark = 1; + break; + + default: return NULL; + } /* * Determine source device and port. @@ -160,8 +193,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, 2 * ETH_ALEN); } - skb->offload_fwd_mark = 1; - return skb; } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3047fc4737c4d848b5fc5cff46adee5781ef05f1..48d7125501b4ece8a19c1efd9e0ca7d7bebb66d1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1749,7 +1749,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6c63524f598a9b5171bfda0692df824883faa136..89c613f195664c93501de2fc848c600cb744ae30 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -19,12 +19,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + bool need_csum, need_recompute_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -45,6 +45,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); + need_recompute_csum = skb->csum_not_inet; skb->encap_hdr_csum = need_csum; features &= skb->dev->hw_enc_features; @@ -102,7 +103,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, } *(pcsum + 1) = 0; - *pcsum = gso_make_checksum(skb, 0); + if (need_recompute_csum && !skb_is_gso(skb)) { + __wsum csum; + + csum = skb_checksum(skb, gre_offset, + skb->len - gre_offset, 0); + *pcsum = csum_fold(csum); + } else { + *pcsum = gso_make_checksum(skb, 0); + } } while ((skb = skb->next)); out: return segs; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c81fe129f7d973aaecd53729974ed2ee1fad0aa1..56e86467078de25b599c3853dbbd202cbddc2a0c 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -285,6 +285,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, ipv6_only_sock(sk), true, false); } +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk) +{ + kuid_t uid = sock_i_uid(sk); + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + + if (hlist_empty(&tb->owners)) { + tb->fastreuse = reuse; + if (sk->sk_reuseport) { + tb->fastreuseport = FASTREUSEPORT_ANY; + tb->fastuid = uid; + tb->fast_rcv_saddr = sk->sk_rcv_saddr; + tb->fast_ipv6_only = ipv6_only_sock(sk); + tb->fast_sk_family = sk->sk_family; +#if IS_ENABLED(CONFIG_IPV6) + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; +#endif + } else { + tb->fastreuseport = 0; + } + } else { + if (!reuse) + tb->fastreuse = 0; + if (sk->sk_reuseport) { + /* We didn't match or we don't have fastreuseport set on + * the tb, but we have sk_reuseport set on this socket + * and we know that there are no bind conflicts with + * this socket in this tb, so reset our tb's reuseport + * settings so that any subsequent sockets that match + * our current socket will be put on the fast path. + * + * If we reset we need to set FASTREUSEPORT_STRICT so we + * do extra checking for all subsequent sk_reuseport + * socks. + */ + if (!sk_reuseport_match(tb, sk)) { + tb->fastreuseport = FASTREUSEPORT_STRICT; + tb->fastuid = uid; + tb->fast_rcv_saddr = sk->sk_rcv_saddr; + tb->fast_ipv6_only = ipv6_only_sock(sk); + tb->fast_sk_family = sk->sk_family; +#if IS_ENABLED(CONFIG_IPV6) + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; +#endif + } + } else { + tb->fastreuseport = 0; + } + } +} + /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. * We try to allocate an odd port (and leave even ports for connect()) @@ -297,7 +348,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); struct inet_bind_bucket *tb = NULL; - kuid_t uid = sock_i_uid(sk); if (!port) { head = inet_csk_find_open_port(sk, &tb, &port); @@ -337,49 +387,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) goto fail_unlock; } success: - if (hlist_empty(&tb->owners)) { - tb->fastreuse = reuse; - if (sk->sk_reuseport) { - tb->fastreuseport = FASTREUSEPORT_ANY; - tb->fastuid = uid; - tb->fast_rcv_saddr = sk->sk_rcv_saddr; - tb->fast_ipv6_only = ipv6_only_sock(sk); - tb->fast_sk_family = sk->sk_family; -#if IS_ENABLED(CONFIG_IPV6) - tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; -#endif - } else { - tb->fastreuseport = 0; - } - } else { - if (!reuse) - tb->fastreuse = 0; - if (sk->sk_reuseport) { - /* We didn't match or we don't have fastreuseport set on - * the tb, but we have sk_reuseport set on this socket - * and we know that there are no bind conflicts with - * this socket in this tb, so reset our tb's reuseport - * settings so that any subsequent sockets that match - * our current socket will be put on the fast path. - * - * If we reset we need to set FASTREUSEPORT_STRICT so we - * do extra checking for all subsequent sk_reuseport - * socks. - */ - if (!sk_reuseport_match(tb, sk)) { - tb->fastreuseport = FASTREUSEPORT_STRICT; - tb->fastuid = uid; - tb->fast_rcv_saddr = sk->sk_rcv_saddr; - tb->fast_ipv6_only = ipv6_only_sock(sk); - tb->fast_sk_family = sk->sk_family; -#if IS_ENABLED(CONFIG_IPV6) - tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; -#endif - } - } else { - tb->fastreuseport = 0; - } - } + inet_csk_update_fastreuse(tb, sk); + if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index b53da2691adb6e50be4b7a1e35807d7fe182338f..3a5f12f011cb447296a720356e6b8a17677e7161 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -161,6 +161,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) return -ENOMEM; } } + inet_csk_update_fastreuse(tb, child); } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index fbf30122e8bf26b997dc50e14c47f7c5b0828b54..f0faf1193dd899f0849a953208645033aa355262 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include #include @@ -1582,7 +1583,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, if (IS_ERR(rt)) return; - inet_sk(sk)->tos = arg->tos; + inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f752d22cc8a5938487886530085e558617c125aa..3db428242b22d2cd738aad49547dbbf73eb8d3be 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -274,6 +274,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } + (*pos)++; return NULL; } @@ -777,8 +778,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow neigh_event_send(n, NULL); } else { if (fib_lookup(net, fl4, &res, 0) == 0) { - struct fib_nh *nh = &FIB_RES_NH(res); + struct fib_nh *nh; + fib_select_path(net, &res, fl4, skb); + nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, new_gw, 0, false, jiffies + ip_rt_gc_timeout); @@ -1004,6 +1007,7 @@ out: kfree_skb(skb); static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; + struct net *net = dev_net(dst->dev); u32 old_mtu = ipv4_mtu(dst); struct fib_result res; bool lock = false; @@ -1024,9 +1028,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) return; rcu_read_lock(); - if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { - struct fib_nh *nh = &FIB_RES_NH(res); + if (fib_lookup(net, fl4, &res, 0) == 0) { + struct fib_nh *nh; + fib_select_path(net, &res, fl4, NULL); + nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock, jiffies + ip_rt_mtu_expires); } @@ -2536,8 +2542,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, fib_select_path(net, res, fl4, skb); dev_out = FIB_RES_DEV(*res); - fl4->flowi4_oif = dev_out->ifindex; - make_route: rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4c665c832dc18a332e54ceb5395311a707295532..6aaa4d54e1a9669cd9fe4b82526f4cb290da35d0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2046,7 +2046,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, /* Well, if we have backlog, try to process it now yet. */ - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index b371e66502c3665e9f9720fe6062d44e1f2bc175..93f176336297745f609f89edba7c673c3b3262f2 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -115,6 +115,14 @@ struct bbr { unused_b:5; u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ u32 full_bw; /* recent bw, to estimate if pipe is full */ + + /* For tracking ACK aggregation: */ + u64 ack_epoch_mstamp; /* start of ACK sampling epoch */ + u16 extra_acked[2]; /* max excess data ACKed in epoch */ + u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */ + extra_acked_win_rtts:5, /* age of extra_acked, in round trips */ + extra_acked_win_idx:1, /* current index in extra_acked array */ + unused_c:6; }; #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */ @@ -174,6 +182,15 @@ static const u32 bbr_lt_bw_diff = 4000 / 8; /* If we estimate we're policed, use lt_bw for this many round trips: */ static const u32 bbr_lt_bw_max_rtts = 48; +/* Gain factor for adding extra_acked to target cwnd: */ +static const int bbr_extra_acked_gain = BBR_UNIT; +/* Window length of extra_acked window. */ +static const u32 bbr_extra_acked_win_rtts = 5; +/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */ +static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20; +/* Time period for clamping cwnd increment due to ack aggregation */ +static const u32 bbr_extra_acked_max_us = 100 * 1000; + static void bbr_check_probe_rtt_done(struct sock *sk); /* Do we estimate that STARTUP filled the pipe? */ @@ -200,6 +217,16 @@ static u32 bbr_bw(const struct sock *sk) return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); } +/* Return maximum extra acked in past k-2k round trips, + * where k = bbr_extra_acked_win_rtts. + */ +static u16 bbr_extra_acked(const struct sock *sk) +{ + struct bbr *bbr = inet_csk_ca(sk); + + return max(bbr->extra_acked[0], bbr->extra_acked[1]); +} + /* Return rate in bytes per second, optionally with a gain. * The order here is chosen carefully to avoid overflow of u64. This should * work for input rates of up to 2.9Tbit/sec and gain of 2.89x. @@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) if (event == CA_EVENT_TX_START && tp->app_limited) { bbr->idle_restart = 1; + bbr->ack_epoch_mstamp = tp->tcp_mstamp; + bbr->ack_epoch_acked = 0; /* Avoid pointless buffer overflows: pace at est. bw if we don't * need more speed (we're restarting from idle and app-limited). */ @@ -315,30 +344,19 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) } } -/* Find target cwnd. Right-size the cwnd based on min RTT and the - * estimated bottleneck bandwidth: +/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth: * - * cwnd = bw * min_rtt * gain = BDP * gain + * bdp = bw * min_rtt * gain * * The key factor, gain, controls the amount of queue. While a small gain * builds a smaller queue, it becomes more vulnerable to noise in RTT * measurements (e.g., delayed ACKs or other ACK compression effects). This * noise may cause BBR to under-estimate the rate. - * - * To achieve full performance in high-speed paths, we budget enough cwnd to - * fit full-sized skbs in-flight on both end hosts to fully utilize the path: - * - one skb in sending host Qdisc, - * - one skb in sending host TSO/GSO engine - * - one skb being received by receiver host LRO/GRO/delayed-ACK engine - * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because - * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, - * which allows 2 outstanding 2-packet sequences, to try to keep pipe - * full even with ACK-every-other-packet delayed ACKs. */ -static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) +static u32 bbr_bdp(struct sock *sk, u32 bw, int gain) { struct bbr *bbr = inet_csk_ca(sk); - u32 cwnd; + u32 bdp; u64 w; /* If we've never had a valid RTT sample, cap cwnd at the initial @@ -353,7 +371,24 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) w = (u64)bw * bbr->min_rtt_us; /* Apply a gain to the given value, then remove the BW_SCALE shift. */ - cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; + bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; + + return bdp; +} + +/* To achieve full performance in high-speed paths, we budget enough cwnd to + * fit full-sized skbs in-flight on both end hosts to fully utilize the path: + * - one skb in sending host Qdisc, + * - one skb in sending host TSO/GSO engine + * - one skb being received by receiver host LRO/GRO/delayed-ACK engine + * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because + * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, + * which allows 2 outstanding 2-packet sequences, to try to keep pipe + * full even with ACK-every-other-packet delayed ACKs. + */ +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain) +{ + struct bbr *bbr = inet_csk_ca(sk); /* Allow enough full-sized skbs in flight to utilize end systems. */ cwnd += 3 * bbr_tso_segs_goal(sk); @@ -368,6 +403,33 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) return cwnd; } +/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ +static u32 bbr_inflight(struct sock *sk, u32 bw, int gain) +{ + u32 inflight; + + inflight = bbr_bdp(sk, bw, gain); + inflight = bbr_quantization_budget(sk, inflight, gain); + + return inflight; +} + +/* Find the cwnd increment based on estimate of ack aggregation */ +static u32 bbr_ack_aggregation_cwnd(struct sock *sk) +{ + u32 max_aggr_cwnd, aggr_cwnd = 0; + + if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) { + max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us) + / BW_UNIT; + aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk)) + >> BBR_SCALE; + aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd); + } + + return aggr_cwnd; +} + /* An optimization in BBR to reduce losses: On the first round of recovery, we * follow the packet conservation principle: send P packets per P packets acked. * After that, we slow-start and send at most 2*P packets per P packets acked. @@ -428,8 +490,15 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) goto done; + target_cwnd = bbr_bdp(sk, bw, gain); + + /* Increment the cwnd to account for excess ACKed data that seems + * due to aggregation (of data and/or ACKs) visible in the ACK stream. + */ + target_cwnd += bbr_ack_aggregation_cwnd(sk); + target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain); + /* If we're below target cwnd, slow start cwnd toward target cwnd. */ - target_cwnd = bbr_target_cwnd(sk, bw, gain); if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ cwnd = min(cwnd + acked, target_cwnd); else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) @@ -470,14 +539,14 @@ static bool bbr_is_next_cycle_phase(struct sock *sk, if (bbr->pacing_gain > BBR_UNIT) return is_full_length && (rs->losses || /* perhaps pacing_gain*BDP won't fit */ - inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain)); + inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); /* A pacing_gain < 1.0 tries to drain extra queue we added if bw * probing didn't find more bw. If inflight falls to match BDP then we * estimate queue is drained; persisting would underutilize the pipe. */ return is_full_length || - inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT); + inflight <= bbr_inflight(sk, bw, BBR_UNIT); } static void bbr_advance_cycle_phase(struct sock *sk) @@ -699,6 +768,67 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) } } +/* Estimates the windowed max degree of ack aggregation. + * This is used to provision extra in-flight data to keep sending during + * inter-ACK silences. + * + * Degree of ack aggregation is estimated as extra data acked beyond expected. + * + * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval" + * cwnd += max_extra_acked + * + * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms). + * Max filter is an approximate sliding window of 5-10 (packet timed) round + * trips. + */ +static void bbr_update_ack_aggregation(struct sock *sk, + const struct rate_sample *rs) +{ + u32 epoch_us, expected_acked, extra_acked; + struct bbr *bbr = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 || + rs->delivered < 0 || rs->interval_us <= 0) + return; + + if (bbr->round_start) { + bbr->extra_acked_win_rtts = min(0x1F, + bbr->extra_acked_win_rtts + 1); + if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) { + bbr->extra_acked_win_rtts = 0; + bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ? + 0 : 1; + bbr->extra_acked[bbr->extra_acked_win_idx] = 0; + } + } + + /* Compute how many packets we expected to be delivered over epoch. */ + epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp, + bbr->ack_epoch_mstamp); + expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT; + + /* Reset the aggregation epoch if ACK rate is below expected rate or + * significantly large no. of ack received since epoch (potentially + * quite old epoch). + */ + if (bbr->ack_epoch_acked <= expected_acked || + (bbr->ack_epoch_acked + rs->acked_sacked >= + bbr_ack_epoch_acked_reset_thresh)) { + bbr->ack_epoch_acked = 0; + bbr->ack_epoch_mstamp = tp->delivered_mstamp; + expected_acked = 0; + } + + /* Compute excess data delivered, beyond what was expected. */ + bbr->ack_epoch_acked = min_t(u32, 0xFFFFF, + bbr->ack_epoch_acked + rs->acked_sacked); + extra_acked = bbr->ack_epoch_acked - expected_acked; + extra_acked = min(extra_acked, tp->snd_cwnd); + if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) + bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; +} + /* Estimate when the pipe is full, using the change in delivery rate: BBR * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited @@ -736,11 +866,11 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */ bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */ tcp_sk(sk)->snd_ssthresh = - bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT); + bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT); } /* fall through to check if in-flight is already small: */ if (bbr->mode == BBR_DRAIN && tcp_packets_in_flight(tcp_sk(sk)) <= - bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT)) + bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ } @@ -828,6 +958,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) { bbr_update_bw(sk, rs); + bbr_update_ack_aggregation(sk, rs); bbr_update_cycle_phase(sk, rs); bbr_check_full_bw_reached(sk, rs); bbr_check_drain(sk, rs); @@ -878,6 +1009,13 @@ static void bbr_init(struct sock *sk) bbr_reset_lt_bw_sampling(sk); bbr_reset_startup_mode(sk); + bbr->ack_epoch_mstamp = tp->tcp_mstamp; + bbr->ack_epoch_acked = 0; + bbr->extra_acked_win_rtts = 0; + bbr->extra_acked_win_idx = 0; + bbr->extra_acked[0] = 0; + bbr->extra_acked[1] = 0; + cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); } diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 613282c65a10750cccc5985be0ffc65a68138ae1..a32cf50c237d8dd6613222bfd62cd92fcabdd79c 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -321,6 +321,7 @@ config IPV6_SEG6_LWTUNNEL config IPV6_SEG6_HMAC bool "IPv6: Segment Routing HMAC support" depends on IPV6 + select CRYPTO select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 4e0ff7031edd55ce6dbb3f2c62e22b9040cc7fec..55fbe330471c706b3c475049015098f3a9e52559 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -173,7 +173,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -181,10 +181,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -201,6 +198,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5e8979c1f76d86a2c570d6e0581a899ce2658aa1..b924941b96a31974384a36f7e904d67ca66d4631 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1811,14 +1811,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Need to own table->tb6_lock */ int fib6_del(struct fib6_info *rt, struct nl_info *info) { - struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, - lockdep_is_held(&rt->fib6_table->tb6_lock)); - struct fib6_table *table = rt->fib6_table; struct net *net = info->nl_net; struct fib6_info __rcu **rtp; struct fib6_info __rcu **rtp_next; + struct fib6_table *table; + struct fib6_node *fn; - if (!fn || rt == net->ipv6.fib6_null_entry) + if (rt == net->ipv6.fib6_null_entry) + return -ENOENT; + + table = rt->fib6_table; + fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&table->tb6_lock)); + if (!fn) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); @@ -2372,14 +2377,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; + ++(*pos); if (!v) goto iter_table; n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); - if (n) { - ++*pos; + if (n) return n; - } iter_table: ipv6_route_check_sernum(iter); @@ -2387,8 +2391,6 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) r = fib6_walk_continue(&iter->w); spin_unlock_bh(&iter->tbl->tb6_lock); if (r > 0) { - if (v) - ++*pos; return iter->w.leaf; } else if (r < 0) { fib6_walker_unlink(net, &iter->w); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8e70a015c7928d4c38020445ab291440a4dbea97..b825ac025d5bd05fc5fce85c5d3632bdee7e3245 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -865,7 +865,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, struct metadata_dst *tun_dst, bool log_ecn_err) { - return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb); + + dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; + if (tpi->proto == htons(ETH_P_IP)) + dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; + + return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, log_ecn_err); } EXPORT_SYMBOL(ip6_tnl_rcv); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index aa54303c43a629d5e770ccad22f05bf1038adc0f..4e1da6cb9ed717cf02be6de44366ec670b7940eb 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -207,6 +207,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so diff --git a/net/key/af_key.c b/net/key/af_key.c index 1982f9f31debb26c615a91fffdc45887d01cbcba..e340e97224c3ade65f0d201909d8615ff8b3f14d 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1855,6 +1855,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + if ((xfilter->sadb_x_filter_splen >= + (sizeof(xfrm_address_t) << 3)) || + (xfilter->sadb_x_filter_dplen >= + (sizeof(xfrm_address_t) << 3))) { + mutex_unlock(&pfk->dump_lock); + return -EINVAL; + } filter = kmalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) { mutex_unlock(&pfk->dump_lock); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6ead3c39f3566a367f6992c448106d8f95afcf8d..bcba579e292ff81754f1b51483527708c3162c71 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -785,7 +785,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } /* Well, if we have backlog, try to process it now yet. */ - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index cb7076d9a76986456b805519216c3d112227c285..b6670e74aeb7b037e8602a364f44f037678636fd 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2011,6 +2011,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index ac1f5db529945399375c7ab25fc5cd9f44c35373..4fc720c77e37e830b9bfdad8ca51d46e24fe7f04 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -532,6 +532,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ec2e83272f9d87afa5af99ecc6c52d7012b12590..2a82d438991b5648689b5f8652e3ca705e0769de 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -979,7 +979,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) might_sleep(); lockdep_assert_held(&local->sta_mtx); - while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); WARN_ON_ONCE(ret); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3160ffd93a153abfc265efa4fd67c00c57e9e5f8..7d7eb75ce901c9bf1b8d422cc767df6fed071f4e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -166,6 +166,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, break; } case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 259325cbcc314d7c4f351c38291752368774dfef..4d154efb80c88b6a0ba98f6c42f35d7fe466c3d2 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -170,10 +170,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; - vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | - IEEE80211_VHT_CAP_RXLDPC | + vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | @@ -182,6 +179,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, + own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); + /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index bcd1a5e6ebf4202440d51b3d2e2c3eadaae27674..2f873a0dc5836272d52fa0b2419d1bbde915f558 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work) if (res) goto err_tx; - ieee802154_xmit_complete(&local->hw, skb, false); - dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; + ieee802154_xmit_complete(&local->hw, skb, false); + return; err_tx: @@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) /* async is priority, otherwise sync is fallback */ if (local->ops->xmit_async) { + unsigned int len = skb->len; + ret = drv_xmit_async(local, skb); if (ret) { ieee802154_wake_queue(&local->hw); @@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } else { local->tx_skb = skb; queue_work(local->workqueue, &local->tx_work); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index a71f777d1353abf16509c8389fd3863860ba0562..d5e4329579e28ce24672c8496d0f8413c5a54234 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1928,14 +1928,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { - bool uses_ct = false, resched = false; + bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); } else if (is_new_conn_expected(cp, conn_reuse_mode)) { - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; } else { @@ -1943,15 +1943,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int * that uses conntrack while it is still * referenced by controlled connection(s). */ - resched = !uses_ct; + resched = !old_ct; } } if (resched) { + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; if (!atomic_read(&cp->n_control)) ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); - if (uses_ct) + if (old_ct) return NF_DROP; cp = NULL; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 31fa94064a620976c30f80517239d3e14ecde21e..0b89609a6e9d61083fd6852d4d19b09a641b7e87 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1129,6 +1129,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], if (!tb[CTA_TUPLE_IP]) return -EINVAL; + if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) + return -EOPNOTSUPP; tuple->src.l3num = l3num; err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 7d7e30ea0ecf9d5ace089cc6d16737406554ffa0..a937d4f75613f1b37e0ceeac679a7790e20af3b2 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -65,6 +65,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 + #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT @@ -288,6 +290,7 @@ static int sctp_packet(struct nf_conn *ct, u_int32_t offset, count; unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + bool ignore = false; sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); if (sh == NULL) @@ -332,15 +335,39 @@ static int sctp_packet(struct nf_conn *ct, /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_HEARTBEAT_ACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.last_dir = dir; + ignore = true; + continue; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) { pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); ct->proto.sctp.vtag[dir] = sh->vtag; } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out_unlock; + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || + ct->proto.sctp.last_dir == dir) + goto out_unlock; + + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.vtag[dir] = sh->vtag; + ct->proto.sctp.vtag[!dir] = 0; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } } @@ -375,6 +402,10 @@ static int sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + /* allow but do not refresh timeout */ + if (ignore) + return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = sctp_pernet(nf_ct_net(ct))->timeouts; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1b8a53081632ffab249737a89af59ef3fe2906da..5b4632826dc6696f331de83b850151a41486adbb 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -718,11 +718,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) - goto err; + goto err_fill_table_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_table_info: kfree_skb(skb2); return err; } @@ -1383,11 +1383,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) - goto err; + goto err_fill_chain_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_chain_info: kfree_skb(skb2); return err; } @@ -2488,11 +2488,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule); if (err < 0) - goto err; + goto err_fill_rule_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_rule_info: kfree_skb(skb2); return err; } @@ -3204,7 +3204,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + if (set->udata && + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure; desc = nla_nest_start(skb, NFTA_SET_DESC); @@ -3376,11 +3377,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) - goto err; + goto err_fill_set_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_set_info: kfree_skb(skb2); return err; } @@ -4156,24 +4157,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb == NULL) - goto err1; + return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, NFT_MSG_NEWSETELEM, 0, set, &elem); if (err < 0) - goto err2; + goto err_fill_setelem; - err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); - /* This avoids a loop in nfnetlink. */ - if (err < 0) - goto err1; + return nfnetlink_unicast(skb, ctx->net, ctx->portid); - return 0; -err2: +err_fill_setelem: kfree_skb(skb); -err1: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + return err; } /* called with rcu_read_lock held */ @@ -5272,10 +5267,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); if (err < 0) - goto err; + goto err_fill_obj_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_obj_info: kfree_skb(skb2); return err; } @@ -5932,10 +5928,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, NFT_MSG_NEWFLOWTABLE, 0, family, flowtable); if (err < 0) - goto err; + goto err_fill_flowtable_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_flowtable_info: kfree_skb(skb2); return err; } @@ -6096,10 +6093,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) - goto err; + goto err_fill_gen_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_gen_info: kfree_skb(skb2); return err; } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 7f2c1915763f8caec6e2b2b5a460e6db5767d094..9bacddc761ba42560c43e452fffb4657ff2f97be 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -148,10 +148,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) { - return netlink_unicast(net->nfnl, skb, portid, flags); + int err; + + err = nlmsg_unicast(net->nfnl, skb, portid); + if (err == -EAGAIN) + err = -ENOBUFS; + + return err; } EXPORT_SYMBOL_GPL(nfnetlink_unicast); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 332c69d27b4782698f6730befc7bc26d431ff735..25298b3eb8546b63f0bbc9ffae5689ead889a7fd 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -359,8 +359,7 @@ __nfulnl_send(struct nfulnl_instance *inst) goto out; } } - nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); out: inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index d33094f4ec41e380c57b57b26975f263cfe6bf8f..f81a3ce0fe48e1d23ecc3acc6619ddd090aa354c 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -685,7 +685,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, net, queue->peer_portid); if (err < 0) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 19446a89a2a8158317c011fb662e406c4adafa91..b1a9f330a51fe26b73b7d9c7f1d8a5cfa4b65435 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -79,7 +79,9 @@ static void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c index c42724469759b8742cff4354b38b473621a572bd..c9a06d0652fe4b306690a2ceea4ea71b1d84e27c 100644 --- a/net/netfilter/xt_quota2.c +++ b/net/netfilter/xt_quota2.c @@ -306,6 +306,8 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_mtinfo2 *q = (void *)par->matchinfo; struct xt_quota_counter *e = q->master; + int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + bool no_change = q->flags & XT_QUOTA_NO_CHANGE; bool ret = q->flags & XT_QUOTA_INVERT; spin_lock_bh(&e->lock); @@ -314,24 +316,21 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) * While no_change is pointless in "grow" mode, we will * implement it here simply to have a consistent behavior. */ - if (!(q->flags & XT_QUOTA_NO_CHANGE)) { - e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; - } - ret = true; + if (!no_change) + e->quota += charge; + ret = true; /* note: does not respect inversion (bug??) */ } else { - if (e->quota >= skb->len) { - if (!(q->flags & XT_QUOTA_NO_CHANGE)) - e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + if (e->quota > charge) { + if (!no_change) + e->quota -= charge; ret = !ret; - } else { + } else if (e->quota) { /* We are transitioning, log that fact. */ - if (e->quota) { - quota2_log(xt_hooknum(par), - skb, - xt_in(par), - xt_out(par), - q->name); - } + quota2_log(xt_hooknum(par), + skb, + xt_in(par), + xt_out(par), + q->name); /* we do not allow even small packets from now on */ e->quota = 0; } diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 41d0e95d171e185828054733f8a26d099da65cff..b1a1718495f34679528602fcd4f2edae794bee13 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL; @@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ if (entry == NULL) return -ENOENT; @@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); + if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, @@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, audit_log_end(audit_buf); } - if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); + } #if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } #endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; #if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; #endif /* IPv6 */ - } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry); return ret_val; } diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index e2188deb08dc3bb16e2a60808b274a4a092fd2ee..b927730d9ab06888a57150acd080ddf1b9980171 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -344,10 +344,13 @@ static int rawsock_create(struct net *net, struct socket *sock, if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; - if (sock->type == SOCK_RAW) + if (sock->type == SOCK_RAW) { + if (!capable(CAP_NET_RAW)) + return -EPERM; sock->ops = &rawsock_raw_ops; - else + } else { sock->ops = &rawsock_ops; + } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 6dcb59f272e16d9bf486cf9909f488dcf2fe4677..fb13fcfedaf4f55be9c40060621a146c825f48c9 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -283,10 +283,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -#define IN6_ADDR_INITIALIZER(ADDR) \ - { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ - (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } - int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { @@ -308,24 +304,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ct_tuple_ipv4 orig = { - output->ipv4.ct_orig.src, - output->ipv4.ct_orig.dst, - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv4 orig; + + memset(&orig, 0, sizeof(orig)); + orig.ipv4_src = output->ipv4.ct_orig.src; + orig.ipv4_dst = output->ipv4.ct_orig.dst; + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv4_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) return -EMSGSIZE; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ct_tuple_ipv6 orig = { - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv6 orig; + + memset(&orig, 0, sizeof(orig)); + memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, + sizeof(orig.ipv6_src)); + memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, + sizeof(orig.ipv6_dst)); + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv6_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) return -EMSGSIZE; @@ -897,15 +899,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); - if (err == NF_ACCEPT && - ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { - if (maniptype == NF_NAT_MANIP_SRC) - maniptype = NF_NAT_MANIP_DST; - else - maniptype = NF_NAT_MANIP_SRC; - - err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, - maniptype); + if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { + if (ct->status & IPS_SRC_NAT) { + if (maniptype == NF_NAT_MANIP_SRC) + maniptype = NF_NAT_MANIP_DST; + else + maniptype = NF_NAT_MANIP_SRC; + + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, + maniptype); + } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { + err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, + NF_NAT_MANIP_SRC); + } } /* Mark NAT done if successful and update the flow key. */ diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index c038e021a591685cd9cca521c5b32259ea48bdbd..5ea2471ffc03f46b34f6c4d7acdd9be40e72f5f4 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -255,8 +255,8 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) * * Start with a full bucket. */ - band->bucket = (band->burst_size + band->rate) * 1000; - band_max_delta_t = band->bucket / band->rate; + band->bucket = (band->burst_size + band->rate) * 1000ULL; + band_max_delta_t = div_u64(band->bucket, band->rate); if (band_max_delta_t > meter->max_delta_t) meter->max_delta_t = band_max_delta_t; band++; diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h index 964ace2650f89627531afb89d68c32011bc1792b..970557ed5b5b6c580496b1598ac4a964273a8b15 100644 --- a/net/openvswitch/meter.h +++ b/net/openvswitch/meter.h @@ -26,7 +26,7 @@ struct dp_meter_band { u32 type; u32 rate; u32 burst_size; - u32 bucket; /* 1/1000 packets, or in bits */ + u64 bucket; /* 1/1000 packets, or in bits */ struct ovs_flow_stats stats; }; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1d35a9a14cc0cd34f234c6c6e15c78eb7feb8ec4..fe39290bc276dd954d4796cb3dfe5b83f7518040 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -949,6 +949,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) + __releases(&pkc->blk_fill_in_prog_lock) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); @@ -996,6 +997,7 @@ static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) + __acquires(&pkc->blk_fill_in_prog_lock) { struct tpacket3_hdr *ppd; @@ -2160,7 +2162,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; - unsigned short macoff, netoff, hdrlen; + unsigned short macoff, hdrlen; + unsigned int netoff; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; @@ -2223,6 +2226,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } macoff = netoff - maclen; } + if (netoff > USHRT_MAX) { + spin_lock(&sk->sk_receive_queue.lock); + po->stats.stats1.tp_drops++; + spin_unlock(&sk->sk_receive_queue.lock); + goto drop_n_restore; + } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && @@ -2272,8 +2281,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (do_vnet && virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) + vio_le(), true, 0)) { + if (po->tp_version == TPACKET_V3) + prb_clear_blk_fill_status(&po->rx_ring); goto drop_n_account; + } if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); @@ -2379,7 +2391,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, __clear_bit(slot_id, po->rx_ring.rx_owner_map); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); - } else { + } else if (po->tp_version == TPACKET_V3) { prb_clear_blk_fill_status(&po->rx_ring); } diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index a5836f49a57e594f024316f9971192cb16b15e63..a69ead5c2e4524fa57d0a8e2dfb23e8e39b55647 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1334,25 +1334,29 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { + u32 min_port; int rc; if (!*port) { - rc = idr_alloc_cyclic(&qrtr_ports, ipc, QRTR_MIN_EPH_SOCKET, - QRTR_MAX_EPH_SOCKET + 1, GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = QRTR_MIN_EPH_SOCKET; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, + QRTR_MAX_EPH_SOCKET, GFP_ATOMIC); + if (!rc) + *port = min_port; } else if (*port < QRTR_MIN_EPH_SOCKET && !(capable(CAP_NET_ADMIN) || in_egroup_p(AID_VENDOR_QRTR) || in_egroup_p(GLOBAL_ROOT_GID))) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + min_port = 0; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC); } else { - rc = idr_alloc_cyclic(&qrtr_ports, ipc, *port, *port + 1, + min_port = *port; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC); - if (rc >= 0) - *port = rc; + if (!rc) + *port = min_port; } if (rc == -ENOSPC) diff --git a/net/rds/recv.c b/net/rds/recv.c index c0b945516cdbce681c09cf2d7377dfcfd9612162..3ca278988b5291bbec1cd22c66fd7929cfc56df6 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -455,12 +455,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 964c4e45de11f5a3fa68d5b832e856043415f08c..39f5fa3501fff973c93fe5d734720720d276dd32 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -290,7 +290,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -310,18 +310,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), - here, ERR_PTR(ret)); + here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), + here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 126154a97a5921697d1c385dec78a3291d19179b..04213afd7710f71cb25b1723a8d19fc34e76dc4a 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -342,18 +342,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, return ret; spin_lock(&conn->channel_lock); - spin_lock(&conn->state_lock); + spin_lock_bh(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { conn->state = RXRPC_CONN_SERVICE; - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure( rcu_dereference_protected( conn->channels[loop].call, lockdep_is_held(&conn->channel_lock))); } else { - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); } spin_unlock(&conn->channel_lock); diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index c4c4450891e0ff3d49e160ad2259e003708c3a6c..2adb7c5c8966136dfff29db73ea7a947c7099137 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -215,9 +215,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index ad9d1b21cb0ba4ead61569fec8a125e30576e1c7..2fe2add62a8ede3350a8f477e47d927ee0488182 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -905,7 +905,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) _enter(""); - if (optlen <= 0 || optlen > PAGE_SIZE - 1) + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) return -EINVAL; description = memdup_user_nul(optval, optlen); @@ -1075,7 +1075,7 @@ static long rxrpc_read(const struct key *key, switch (token->security_index) { case RXRPC_SECURITY_RXKAD: - toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, + toksize += 8 * 4; /* viceid, kvno, key*2, begin, * end, primary, tktlen */ toksize += RND(token->kad->ticket_len); break; @@ -1110,7 +1110,8 @@ static long rxrpc_read(const struct key *key, break; default: /* we have a ticket we can't encode */ - BUG(); + pr_err("Unsupported key token type (%u)\n", + token->security_index); continue; } @@ -1141,6 +1142,14 @@ static long rxrpc_read(const struct key *key, memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ xdr += (_l + 3) >> 2; \ } while(0) +#define ENCODE_BYTES(l, s) \ + do { \ + u32 _l = (l); \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) #define ENCODE64(x) \ do { \ __be64 y = cpu_to_be64(x); \ @@ -1168,7 +1177,7 @@ static long rxrpc_read(const struct key *key, case RXRPC_SECURITY_RXKAD: ENCODE(token->kad->vice_id); ENCODE(token->kad->kvno); - ENCODE_DATA(8, token->kad->session_key); + ENCODE_BYTES(8, token->kad->session_key); ENCODE(token->kad->start); ENCODE(token->kad->expiry); ENCODE(token->kad->primary_flag); @@ -1218,7 +1227,6 @@ static long rxrpc_read(const struct key *key, break; default: - BUG(); break; } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 6e9d977f07971902be198fd86d0d5177552ec893..e4fde33b887e7c8ebda7e237f56522308cc05bd4 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -530,7 +530,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index caee7632c257dd8d97ce5728705cfd04c62efb60..edd76c41765fe88d082c2c165d537867aada011a 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -654,6 +654,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 119e20cad662b3b63f8617779c0f3c9625557c3a..bd96fd261dba3efc7f888d97140287712ea79f0f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1115,27 +1115,36 @@ static void dev_deactivate_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { - struct Qdisc *qdisc_default = _qdisc_default; - struct Qdisc *qdisc; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc); - qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { - bool nolock = qdisc->flags & TCQ_F_NOLOCK; - - if (nolock) - spin_lock_bh(&qdisc->seqlock); - spin_lock_bh(qdisc_lock(qdisc)); - if (!(qdisc->flags & TCQ_F_BUILTIN)) set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); + } +} - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - qdisc_reset(qdisc); +static void dev_reset_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + bool nolock; - spin_unlock_bh(qdisc_lock(qdisc)); - if (nolock) - spin_unlock_bh(&qdisc->seqlock); - } + qdisc = dev_queue->qdisc_sleeping; + if (!qdisc) + return; + + nolock = qdisc->flags & TCQ_F_NOLOCK; + + if (nolock) + spin_lock_bh(&qdisc->seqlock); + spin_lock_bh(qdisc_lock(qdisc)); + + qdisc_reset(qdisc); + + spin_unlock_bh(qdisc_lock(qdisc)); + if (nolock) + spin_unlock_bh(&qdisc->seqlock); } static bool some_qdisc_is_busy(struct net_device *dev) @@ -1196,12 +1205,20 @@ void dev_deactivate_many(struct list_head *head) dev_watchdog_down(dev); } - /* Wait for outstanding qdisc-less dev_queue_xmit calls. + /* Wait for outstanding qdisc-less dev_queue_xmit calls or + * outstanding qdisc enqueuing calls. * This is avoided if all devices are in dismantle phase : * Caller will call synchronize_net() for us */ synchronize_net(); + list_for_each_entry(dev, head, close_list) { + netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); + + if (dev_ingress_queue(dev)) + dev_reset_queue(dev, dev_ingress_queue(dev), NULL); + } + /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 5b537613946fcaaabcb2716b74b5a7188a828142..2bd8c80bd85fb39b786d7a401bfe9cf4047c6d4f 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -515,6 +515,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) out_err: /* Clean up any successful allocations */ sctp_auth_destroy_hmacs(ep->auth_hmacs); + ep->auth_hmacs = NULL; return -ENOMEM; } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 7bb8e5603298d2e22ac004ee98da2ee4ed2d45e2..d6e83a37a1adfa1b9deec4a85d2fd306dbc8cd10 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -51,6 +51,7 @@ #include #include #include +#include /* Declare internal functions here. */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); @@ -1257,6 +1258,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) /* Grab the association's destination address list. */ transport_list = &asoc->peer.transport_addr_list; + /* SCTP path tracepoint for congestion control debugging. */ + list_for_each_entry(transport, transport_list, transports) { + trace_sctp_probe_path(transport, asoc); + } + sack_ctsn = ntohl(sack->cum_tsn_ack); gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); asoc->stats.gapcnt += gap_ack_blocks; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c93be3ba5df2989ed41e62a904119a983c70c1c3..4a2873f70b372074dd06fccfde1c1cebb0dde2d8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1931,7 +1931,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, if (sctp_wspace(asoc) < (int)msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); - if (sctp_wspace(asoc) <= 0) { + if (sk_under_memory_pressure(sk)) + sk_mem_reclaim(sk); + + if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) { timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) @@ -7640,8 +7643,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) pr_debug("%s: begins, snum:%d\n", __func__, snum); - local_bh_disable(); - if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; @@ -7660,20 +7661,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: - spin_unlock(&head->lock); + spin_unlock_bh(&head->lock); + cond_resched(); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) - goto fail; + return ret; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's @@ -7688,7 +7690,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; @@ -7770,10 +7772,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ret = 0; fail_unlock: - spin_unlock(&head->lock); - -fail: - local_bh_enable(); + spin_unlock_bh(&head->lock); return ret; } @@ -8515,7 +8514,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, goto do_error; if (signal_pending(current)) goto do_interrupted; - if ((int)msg_len <= sctp_wspace(asoc)) + if (sk_under_memory_pressure(sk)) + sk_mem_reclaim(sk); + if ((int)msg_len <= sctp_wspace(asoc) && + sk_wmem_schedule(sk, msg_len)) break; /* Let another process have a go. Since we are going diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 2379a02c319d704b89a684c5160ad26ac8ebdb7b..6c4a7a5938b7a8c389e3042b3ed7d52a953310b6 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -169,13 +169,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_connection *conn = &smc->conn; - struct smcd_diag_dmbinfo dinfo = { - .linkid = *((u32 *)conn->lgr->id), - .peer_gid = conn->lgr->peer_gid, - .my_gid = conn->lgr->smcd->local_gid, - .token = conn->rmb_desc->token, - .peer_token = conn->peer_token - }; + struct smcd_diag_dmbinfo dinfo; + + memset(&dinfo, 0, sizeof(dinfo)); + + dinfo.linkid = *((u32 *)conn->lgr->id); + dinfo.peer_gid = conn->lgr->peer_gid; + dinfo.my_gid = conn->lgr->smcd->local_gid; + dinfo.token = conn->rmb_desc->token; + dinfo.peer_token = conn->peer_token; if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) goto errout; diff --git a/net/socket.c b/net/socket.c index a86adaf9b52e2435a8b6384131eaeff37249119e..8fa21173c4b9c0d3a2f06d721accdcb4218509ab 100644 --- a/net/socket.c +++ b/net/socket.c @@ -484,7 +484,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { - *fput_needed = f.flags; + *fput_needed = f.flags & FDPUT_FPUT; return sock; } fdput(f); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 08b5fa4a2852a9abdef38a2c6115272485aafca7..ba8f36731228931d5532b7df86b6be689b05da58 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -981,8 +981,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, p = xdr_inline_decode(xdr, len); if (unlikely(p == NULL)) goto out_fail; - dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid, - req->rq_task->tk_msg.rpc_proc->p_name, (char *)p); + dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid, + req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p); if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len, sap, sizeof(address)) == 0) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c8ee8e801edb847e7869243c4fc64f5986931e25..709c082dc905933a304b986f3005c82de990b879 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) } EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); -/* - * Format the transport list for printing +/** + * svc_print_xprts - Format the transport list for printing + * @buf: target buffer for formatted address + * @maxlen: length of target buffer + * + * Fills in @buf with a string containing a list of transport names, each name + * terminated with '\n'. If the buffer is too small, some entries may be + * missing, but it is guaranteed that all lines in the output buffer are + * complete. + * + * Returns positive length of the filled-in string. */ int svc_print_xprts(char *buf, int maxlen) { @@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen) list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { int slen; - sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); - slen = strlen(tmpstr); - if (len + slen > maxlen) + slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", + xcl->xcl_name, xcl->xcl_max_payload); + if (slen >= sizeof(tmpstr) || len + slen >= maxlen) break; len += slen; strcat(buf, tmpstr); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index b9827665ff35570491f09976ad7cd4b972a6bee5..d183d4aee822ccab369c453c7cefa817f5e4263c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -256,6 +256,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) { dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); + xprt_rdma_free_addresses(xprt); xprt_free(xprt); module_put(THIS_MODULE); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 16c8174658fd1545be721ac519d851ff1abf8cf4..252495ff9010d422ba75bb6efe0b94520d51dc4d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -268,6 +268,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) { struct svc_rdma_recv_ctxt *ctxt; + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) + return 0; ctxt = svc_rdma_recv_ctxt_get(rdma); if (!ctxt) return -ENOMEM; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 4fc0ce12708949443425cf5831170fd587998fcc..22f135263815181c9ac8b07a5b905162b0245db4 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -679,7 +679,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, struct svc_rdma_read_info *info, __be32 *p) { - unsigned int i; int ret; ret = -EINVAL; @@ -702,12 +701,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, info->ri_chunklen += rs_length; } - /* Pages under I/O have been copied to head->rc_pages. - * Prevent their premature release by svc_xprt_release() . - */ - for (i = 0; i < info->ri_readctxt->rc_page_count; i++) - rqstp->rq_pages[i] = NULL; - return ret; } @@ -802,6 +795,26 @@ static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, return ret; } +/* Pages under I/O have been copied to head->rc_pages. Ensure they + * are not released by svc_xprt_release() until the I/O is complete. + * + * This has to be done after all Read WRs are constructed to properly + * handle a page that is part of I/O on behalf of two different RDMA + * segments. + * + * Do this only if I/O has been posted. Otherwise, we do indeed want + * svc_xprt_release() to clean things up properly. + */ +static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, + const unsigned int start, + const unsigned int num_pages) +{ + unsigned int i; + + for (i = start; i < num_pages + start; i++) + rqstp->rq_pages[i] = NULL; +} + /** * svc_rdma_recv_read_chunk - Pull a Read chunk from the client * @rdma: controlling RDMA transport @@ -855,6 +868,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); if (ret < 0) goto out_err; + svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count); return 0; out_err: diff --git a/net/tipc/group.c b/net/tipc/group.c index 9a9138de4eca5e256e62d55f844d8ffee62c2d62..b656385efad654cd52e9fc747e841a2665ce6029 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, return NULL; } -static void tipc_group_add_to_tree(struct tipc_group *grp, - struct tipc_member *m) +static int tipc_group_add_to_tree(struct tipc_group *grp, + struct tipc_member *m) { u64 nkey, key = (u64)m->node << 32 | m->port; struct rb_node **n, *parent = NULL; @@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, else if (key > nkey) n = &(*n)->rb_right; else - return; + return -EEXIST; } rb_link_node(&m->tree_node, parent, n); rb_insert_color(&m->tree_node, &grp->members); + return 0; } static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, @@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, u32 instance, int state) { struct tipc_member *m; + int ret; m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) @@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, m->port = port; m->instance = instance; m->bc_acked = grp->bc_snd_nxt - 1; + ret = tipc_group_add_to_tree(grp, m); + if (ret < 0) { + kfree(m); + return NULL; + } grp->member_cnt++; - tipc_group_add_to_tree(grp, m); tipc_nlist_add(&grp->dests, m->node); m->state = state; return m; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index cbccf1791d3c5d97cb31070b178b8423a4c441db..b078b77620f189d27a2ff9aab8e5badb85e2ed0d 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; - if (unlikely(skb_unclone(frag, GFP_ATOMIC))) + frag = skb_unshare(frag, GFP_ATOMIC); + if (unlikely(!frag)) goto err; head = *headbuf = frag; *buf = NULL; diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 29e684054abe3b67bb03fb8d569245f0b7cdf642..f8e111218a0ec12da5e176e8bf5462edc231ee97 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -255,8 +255,9 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) { - int err; + struct nlmsghdr *nlh; struct sk_buff *arg; + int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) @@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; } + nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); + if (!nlh) { + kfree_skb(arg); + kfree_skb(msg->rep); + msg->rep = NULL; + return -EMSGSIZE; + } + nlmsg_end(arg, nlh); + err = __tipc_nl_compat_dumpit(cmd, msg, arg); if (err) { kfree_skb(msg->rep); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f0184a5e83aa3c8f3d1dd1521fcb464ec08f7d4f..16e2af3a00ccb8ccf5cf45d895b24f571a079ce5 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2565,18 +2565,18 @@ static int tipc_shutdown(struct socket *sock, int how) lock_sock(sk); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - sk->sk_shutdown = SEND_SHUTDOWN; + sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue); - /* Wake up anyone sleeping in poll */ - sk->sk_state_change(sk); res = 0; } else { res = -ENOTCONN; } + /* Wake up anyone sleeping in poll. */ + sk->sk_state_change(sk); release_sock(sk); return res; diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 41f4464ac6cc5b5f04c278574a7bd6c47e4646eb..ec9a7137d2677901c1eae1f770b15303e3f96171 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -407,7 +407,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) return -EWOULDBLOCK; if (ret == sizeof(s)) { read_lock_bh(&sk->sk_callback_lock); - ret = tipc_conn_rcv_sub(srv, con, &s); + /* RACE: the connection can be closed in the meantime */ + if (likely(connected(con))) + ret = tipc_conn_rcv_sub(srv, con, &s); read_unlock_bh(&sk->sk_callback_lock); if (!ret) return 0; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 8f40bbfd60ea98ad7484ab87e0b998bf8165436f..575d621305786796d518dfa5524a770500401661 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -476,7 +476,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct iov_iter msg_iter; - char *kaddr = kmap(page); + char *kaddr; struct kvec iov; int rc; @@ -490,6 +490,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, goto out; } + kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2318e2e2748f45f2e6f2b8aecef235098beb37b8..2020306468af4e4e9950aa7ae0c7c6c1a6e36ff8 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return unix_peer(osk) == NULL || unix_our_peer(sk, osk); } -static inline int unix_recvq_full(struct sock const *sk) +static inline int unix_recvq_full(const struct sock *sk) { return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } +static inline int unix_recvq_full_lockless(const struct sock *sk) +{ + return skb_queue_len_lockless(&sk->sk_receive_queue) > + READ_ONCE(sk->sk_max_ack_backlog); +} + struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -1788,7 +1794,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, * - unix_peer(sk) == sk by time of get but disconnected before lock */ if (other != sk && - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + unlikely(unix_peer(other) != sk && + unix_recvq_full_lockless(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 96ab344f17bbc1bfd466dfdbf5845df4b6a49c8d..cc70d651d13e00d3bbc60352269482a79cf36878 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -39,6 +39,7 @@ struct virtio_vsock { * must be accessed with tx_lock held. */ struct mutex tx_lock; + bool tx_run; struct work_struct send_pkt_work; spinlock_t send_pkt_list_lock; @@ -54,6 +55,7 @@ struct virtio_vsock { * must be accessed with rx_lock held. */ struct mutex rx_lock; + bool rx_run; int rx_buf_nr; int rx_buf_max_nr; @@ -61,46 +63,28 @@ struct virtio_vsock { * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. */ struct mutex event_lock; + bool event_run; struct virtio_vsock_event event_list[8]; u32 guest_cid; }; -static struct virtio_vsock *virtio_vsock_get(void) -{ - return the_virtio_vsock; -} - static u32 virtio_transport_get_local_cid(void) { - struct virtio_vsock *vsock = virtio_vsock_get(); - - if (!vsock) - return VMADDR_CID_ANY; - - return vsock->guest_cid; -} - -static void virtio_transport_loopback_work(struct work_struct *work) -{ - struct virtio_vsock *vsock = - container_of(work, struct virtio_vsock, loopback_work); - LIST_HEAD(pkts); - - spin_lock_bh(&vsock->loopback_list_lock); - list_splice_init(&vsock->loopback_list, &pkts); - spin_unlock_bh(&vsock->loopback_list_lock); - - mutex_lock(&vsock->rx_lock); - while (!list_empty(&pkts)) { - struct virtio_vsock_pkt *pkt; - - pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); - list_del_init(&pkt->list); + struct virtio_vsock *vsock; + u32 ret; - virtio_transport_recv_pkt(pkt); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); + if (!vsock) { + ret = VMADDR_CID_ANY; + goto out_rcu; } - mutex_unlock(&vsock->rx_lock); + + ret = vsock->guest_cid; +out_rcu: + rcu_read_unlock(); + return ret; } static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, @@ -128,6 +112,9 @@ virtio_transport_send_pkt_work(struct work_struct *work) mutex_lock(&vsock->tx_lock); + if (!vsock->tx_run) + goto out; + vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { @@ -186,6 +173,7 @@ virtio_transport_send_pkt_work(struct work_struct *work) if (added) virtqueue_kick(vq); +out: mutex_unlock(&vsock->tx_lock); if (restart_rx) @@ -198,14 +186,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct virtio_vsock *vsock; int len = pkt->len; - vsock = virtio_vsock_get(); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { virtio_transport_free_pkt(pkt); - return -ENODEV; + len = -ENODEV; + goto out_rcu; } - if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) - return virtio_transport_send_pkt_loopback(vsock, pkt); + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { + len = virtio_transport_send_pkt_loopback(vsock, pkt); + goto out_rcu; + } if (pkt->reply) atomic_inc(&vsock->queued_replies); @@ -215,6 +207,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) spin_unlock_bh(&vsock->send_pkt_list_lock); queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); + +out_rcu: + rcu_read_unlock(); return len; } @@ -223,12 +218,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) { struct virtio_vsock *vsock; struct virtio_vsock_pkt *pkt, *n; - int cnt = 0; + int cnt = 0, ret; LIST_HEAD(freeme); - vsock = virtio_vsock_get(); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { - return -ENODEV; + ret = -ENODEV; + goto out_rcu; } spin_lock_bh(&vsock->send_pkt_list_lock); @@ -256,7 +253,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } - return 0; + ret = 0; + +out_rcu: + rcu_read_unlock(); + return ret; } static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) @@ -308,6 +309,10 @@ static void virtio_transport_tx_work(struct work_struct *work) vq = vsock->vqs[VSOCK_VQ_TX]; mutex_lock(&vsock->tx_lock); + + if (!vsock->tx_run) + goto out; + do { struct virtio_vsock_pkt *pkt; unsigned int len; @@ -318,6 +323,8 @@ static void virtio_transport_tx_work(struct work_struct *work) added = true; } } while (!virtqueue_enable_cb(vq)); + +out: mutex_unlock(&vsock->tx_lock); if (added) @@ -336,56 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock) return val < virtqueue_get_vring_size(vq); } -static void virtio_transport_rx_work(struct work_struct *work) -{ - struct virtio_vsock *vsock = - container_of(work, struct virtio_vsock, rx_work); - struct virtqueue *vq; - - vq = vsock->vqs[VSOCK_VQ_RX]; - - mutex_lock(&vsock->rx_lock); - - do { - virtqueue_disable_cb(vq); - for (;;) { - struct virtio_vsock_pkt *pkt; - unsigned int len; - - if (!virtio_transport_more_replies(vsock)) { - /* Stop rx until the device processes already - * pending replies. Leave rx virtqueue - * callbacks disabled. - */ - goto out; - } - - pkt = virtqueue_get_buf(vq, &len); - if (!pkt) { - break; - } - - vsock->rx_buf_nr--; - - /* Drop short/long packets */ - if (unlikely(len < sizeof(pkt->hdr) || - len > sizeof(pkt->hdr) + pkt->len)) { - virtio_transport_free_pkt(pkt); - continue; - } - - pkt->len = len - sizeof(pkt->hdr); - virtio_transport_deliver_tap_pkt(pkt); - virtio_transport_recv_pkt(pkt); - } - } while (!virtqueue_enable_cb(vq)); - -out: - if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) - virtio_vsock_rx_fill(vsock); - mutex_unlock(&vsock->rx_lock); -} - /* event_lock must be held */ static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, struct virtio_vsock_event *event) @@ -455,6 +412,9 @@ static void virtio_transport_event_work(struct work_struct *work) mutex_lock(&vsock->event_lock); + if (!vsock->event_run) + goto out; + do { struct virtio_vsock_event *event; unsigned int len; @@ -469,7 +429,7 @@ static void virtio_transport_event_work(struct work_struct *work) } while (!virtqueue_enable_cb(vq)); virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); - +out: mutex_unlock(&vsock->event_lock); } @@ -546,6 +506,86 @@ static struct virtio_transport virtio_transport = { .send_pkt = virtio_transport_send_pkt, }; +static void virtio_transport_loopback_work(struct work_struct *work) +{ + struct virtio_vsock *vsock = + container_of(work, struct virtio_vsock, loopback_work); + LIST_HEAD(pkts); + + spin_lock_bh(&vsock->loopback_list_lock); + list_splice_init(&vsock->loopback_list, &pkts); + spin_unlock_bh(&vsock->loopback_list_lock); + + mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + + while (!list_empty(&pkts)) { + struct virtio_vsock_pkt *pkt; + + pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + + virtio_transport_recv_pkt(&virtio_transport, pkt); + } +out: + mutex_unlock(&vsock->rx_lock); +} + +static void virtio_transport_rx_work(struct work_struct *work) +{ + struct virtio_vsock *vsock = + container_of(work, struct virtio_vsock, rx_work); + struct virtqueue *vq; + + vq = vsock->vqs[VSOCK_VQ_RX]; + + mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + + do { + virtqueue_disable_cb(vq); + for (;;) { + struct virtio_vsock_pkt *pkt; + unsigned int len; + + if (!virtio_transport_more_replies(vsock)) { + /* Stop rx until the device processes already + * pending replies. Leave rx virtqueue + * callbacks disabled. + */ + goto out; + } + + pkt = virtqueue_get_buf(vq, &len); + if (!pkt) { + break; + } + + vsock->rx_buf_nr--; + + /* Drop short/long packets */ + if (unlikely(len < sizeof(pkt->hdr) || + len > sizeof(pkt->hdr) + pkt->len)) { + virtio_transport_free_pkt(pkt); + continue; + } + + pkt->len = len - sizeof(pkt->hdr); + virtio_transport_deliver_tap_pkt(pkt); + virtio_transport_recv_pkt(&virtio_transport, pkt); + } + } while (!virtqueue_enable_cb(vq)); + +out: + if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) + virtio_vsock_rx_fill(vsock); + mutex_unlock(&vsock->rx_lock); +} + static int virtio_vsock_probe(struct virtio_device *vdev) { vq_callback_t *callbacks[] = { @@ -566,7 +606,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev) return ret; /* Only one virtio-vsock device per guest is supported */ - if (the_virtio_vsock) { + if (rcu_dereference_protected(the_virtio_vsock, + lockdep_is_held(&the_virtio_vsock_mutex))) { ret = -EBUSY; goto out; } @@ -591,8 +632,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->rx_buf_max_nr = 0; atomic_set(&vsock->queued_replies, 0); - vdev->priv = vsock; - the_virtio_vsock = vsock; mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); mutex_init(&vsock->event_lock); @@ -606,14 +645,23 @@ static int virtio_vsock_probe(struct virtio_device *vdev) INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work); + mutex_lock(&vsock->tx_lock); + vsock->tx_run = true; + mutex_unlock(&vsock->tx_lock); + mutex_lock(&vsock->rx_lock); virtio_vsock_rx_fill(vsock); + vsock->rx_run = true; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->event_lock); virtio_vsock_event_fill(vsock); + vsock->event_run = true; mutex_unlock(&vsock->event_lock); + vdev->priv = vsock; + rcu_assign_pointer(the_virtio_vsock, vsock); + mutex_unlock(&the_virtio_vsock_mutex); return 0; @@ -628,6 +676,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev) struct virtio_vsock *vsock = vdev->priv; struct virtio_vsock_pkt *pkt; + mutex_lock(&the_virtio_vsock_mutex); + + vdev->priv = NULL; + rcu_assign_pointer(the_virtio_vsock, NULL); + synchronize_rcu(); + flush_work(&vsock->loopback_work); flush_work(&vsock->rx_work); flush_work(&vsock->tx_work); @@ -637,6 +691,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev) /* Reset all connected sockets when the device disappear */ vsock_for_each_connected_socket(virtio_vsock_reset_sock); + /* Stop all work handlers to make sure no one is accessing the device, + * so we can safely call vdev->config->reset(). + */ + mutex_lock(&vsock->rx_lock); + vsock->rx_run = false; + mutex_unlock(&vsock->rx_lock); + + mutex_lock(&vsock->tx_lock); + vsock->tx_run = false; + mutex_unlock(&vsock->tx_lock); + + mutex_lock(&vsock->event_lock); + vsock->event_run = false; + mutex_unlock(&vsock->event_lock); + + /* Flush all device writes and interrupts, device will not use any + * more buffers. + */ vdev->config->reset(vdev); mutex_lock(&vsock->rx_lock); @@ -667,12 +739,11 @@ static void virtio_vsock_remove(struct virtio_device *vdev) } spin_unlock_bh(&vsock->loopback_list_lock); - mutex_lock(&the_virtio_vsock_mutex); - the_virtio_vsock = NULL; - mutex_unlock(&the_virtio_vsock_mutex); - + /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); + mutex_unlock(&the_virtio_vsock_mutex); + kfree(vsock); } diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 52242a148c705c567a53fbc26699703b31b766e3..5f8a72d34d3136a629df9c086d1b1e02ec35b364 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -669,9 +669,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk, /* Normally packets are associated with a socket. There may be no socket if an * attempt was made to connect to a socket that does not exist. */ -static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) +static int virtio_transport_reset_no_sock(const struct virtio_transport *t, + struct virtio_vsock_pkt *pkt) { - const struct virtio_transport *t; struct virtio_vsock_pkt *reply; struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, @@ -691,7 +691,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) if (!reply) return -ENOMEM; - t = virtio_transport_get_ops(); if (!t) { virtio_transport_free_pkt(reply); return -ENOTCONN; @@ -993,7 +992,8 @@ static bool virtio_transport_space_update(struct sock *sk, /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex * lock. */ -void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) +void virtio_transport_recv_pkt(struct virtio_transport *t, + struct virtio_vsock_pkt *pkt) { struct sockaddr_vm src, dst; struct vsock_sock *vsk; @@ -1015,7 +1015,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) le32_to_cpu(pkt->hdr.fwd_cnt)); if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) { - (void)virtio_transport_reset_no_sock(pkt); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } @@ -1026,7 +1026,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) if (!sk) { sk = vsock_find_bound_socket(&dst); if (!sk) { - (void)virtio_transport_reset_no_sock(pkt); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } } @@ -1060,6 +1060,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) virtio_transport_free_pkt(pkt); break; default: + (void)virtio_transport_reset_no_sock(t, pkt); virtio_transport_free_pkt(pkt); break; } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 5d5333a56f4f98d8a35553d8f4741f3f80cad699..46f65757ccc5317e9d7a9a1cde823f724e83afdf 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -1050,7 +1050,8 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, if (chan == other_chan) return true; - if (chan->band != NL80211_BAND_5GHZ) + if (chan->band != NL80211_BAND_5GHZ && + chan->band != NL80211_BAND_6GHZ) continue; r1 = cfg80211_get_unii(chan->center_freq); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4f6d59cdb271bf1381303ad0682b264a0a94d76d..bfca30df73943a80c29a7d31a29e3d1d1e8d783b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3727,6 +3727,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) if (err) return err; + if (key.idx < 0) + return -EINVAL; + if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@ -12533,13 +12536,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 9d19bd2a8ecc729149caf4f44cebfc8f9a3042ea..66d2a69bceb6485d53171f73bf934fd0b3846af9 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2983,6 +2983,9 @@ int regulatory_hint_user(const char *alpha2, if (WARN_ON(!alpha2)) return -EINVAL; + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; @@ -3826,8 +3829,9 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy) } /* - * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for - * UNII band definitions + * See FCC notices for UNII band definitions + * 5GHz: https://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii + * 6GHz: https://www.fcc.gov/document/fcc-proposes-more-spectrum-unlicensed-use-0 */ int cfg80211_get_unii(int freq) { @@ -3851,6 +3855,22 @@ int cfg80211_get_unii(int freq) if (freq > 5725 && freq <= 5825) return 4; + /* UNII-5 */ + if (freq > 5925 && freq <= 6425) + return 5; + + /* UNII-6 */ + if (freq > 6425 && freq <= 6525) + return 6; + + /* UNII-7 */ + if (freq > 6525 && freq <= 6875) + return 7; + + /* UNII-8 */ + if (freq > 6875 && freq <= 7125) + return 8; + return -EINVAL; } diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 713c15d6c5e614234c838f82a0c5ea115b3b0c01..d039ec0f98c9ffe4c32b13d0bda1a0d3d30d2d23 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3222,10 +3222,11 @@ TRACE_EVENT(rdev_set_mcast_rate, sizeof(int) * NUM_NL80211_BANDS); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " - "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]", + "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 6GHz=0x%x, 60GHz=0x%x]", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mcast_rate[NL80211_BAND_2GHZ], __entry->mcast_rate[NL80211_BAND_5GHZ], + __entry->mcast_rate[NL80211_BAND_6GHZ], __entry->mcast_rate[NL80211_BAND_60GHZ]) ); diff --git a/net/wireless/util.c b/net/wireless/util.c index ddc6d95e65c3f0f63eb7952e0f32262895123931..52c2ac4d79e3d0c3cc5de57c593c7471e81fd6f6 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -87,6 +87,11 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band) else return 5000 + chan * 5; break; + case NL80211_BAND_6GHZ: + /* see 802.11ax D4.1 27.3.22.2 */ + if (chan <= 253) + return 5940 + chan * 5; + break; case NL80211_BAND_60GHZ: if (chan < 5) return 56160 + chan * 2160; @@ -107,8 +112,11 @@ int ieee80211_frequency_to_channel(int freq) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; - else if (freq <= 45000) /* DMG band lower limit */ + else if (freq < 5940) return (freq - 5000) / 5; + else if (freq <= 45000) /* DMG band lower limit */ + /* see 802.11ax D4.1 27.3.22.2 */ + return (freq - 5940) / 5; else if (freq >= 58320 && freq <= 64800) return (freq - 56160) / 2160; else @@ -1481,6 +1489,9 @@ bool ieee80211_operating_class_to_band(u8 operating_class, case 128 ... 130: *band = NL80211_BAND_5GHZ; return true; + case 131 ... 135: + *band = NL80211_BAND_6GHZ; + return true; case 81: case 82: case 83: diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 743103786652d71824a7f43d64799826f361d8c6..f3d34582581b9fc7142291586e4e97018fbe91ac 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -362,6 +362,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } + if (x25->neighbour) { + read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); + x25->neighbour = NULL; + read_unlock_bh(&x25_list_lock); + } } /* diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 6f00f88adab9088a9149f499da30a4378ca3f058..6cc9f6e2dd2b7e3de696737c791e622857d048c2 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -293,7 +293,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) } mtu = dst_mtu(dst); - if (!skb->ignore_df && skb->len > mtu) { + if (skb->len > mtu) { skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c1900e44bbbca203cd7000ee9f216d3bbecf5e21..c7c8d08a914841483ee9743c7f6f716ee3379b5e 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -923,7 +923,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, */ if (x->km.state == XFRM_STATE_VALID) { if ((x->sel.family && - !xfrm_selector_match(&x->sel, fl, x->sel.family)) || + (x->sel.family != family || + !xfrm_selector_match(&x->sel, fl, family))) || !security_xfrm_state_pol_flow_match(x, pol, fl)) return; @@ -936,7 +937,9 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, *acq_in_progress = 1; } else if (x->km.state == XFRM_STATE_ERROR || x->km.state == XFRM_STATE_EXPIRED) { - if (xfrm_selector_match(&x->sel, fl, x->sel.family) && + if ((!x->sel.family || + (x->sel.family == family && + xfrm_selector_match(&x->sel, fl, family))) && security_xfrm_state_pol_flow_match(x, pol, fl)) *error = -ESRCH; } @@ -976,7 +979,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) - xfrm_state_look_at(pol, x, fl, encap_family, + xfrm_state_look_at(pol, x, fl, family, &best, &acquire_in_progress, &error); } if (best || acquire_in_progress) @@ -993,7 +996,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) - xfrm_state_look_at(pol, x, fl, encap_family, + xfrm_state_look_at(pol, x, fl, family, &best, &acquire_in_progress, &error); } @@ -1341,6 +1344,30 @@ int xfrm_state_add(struct xfrm_state *x) EXPORT_SYMBOL(xfrm_state_add); #ifdef CONFIG_XFRM_MIGRATE +static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security) +{ + struct xfrm_user_sec_ctx *uctx; + int size = sizeof(*uctx) + security->ctx_len; + int err; + + uctx = kmalloc(size, GFP_KERNEL); + if (!uctx) + return -ENOMEM; + + uctx->exttype = XFRMA_SEC_CTX; + uctx->len = size; + uctx->ctx_doi = security->ctx_doi; + uctx->ctx_alg = security->ctx_alg; + uctx->ctx_len = security->ctx_len; + memcpy(uctx + 1, security->ctx_str, security->ctx_len); + err = security_xfrm_state_alloc(x, uctx); + kfree(uctx); + if (err) + return err; + + return 0; +} + static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, struct xfrm_encap_tmpl *encap) { @@ -1397,6 +1424,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, goto error; } + if (orig->security) + if (clone_security(x, orig->security)) + goto error; + if (orig->coaddr) { x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), GFP_KERNEL); @@ -1410,6 +1441,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, } memcpy(&x->mark, &orig->mark, sizeof(x->mark)); + memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); if (xfrm_init_state(x) < 0) goto error; @@ -1421,7 +1453,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->tfcpad = orig->tfcpad; x->replay_maxdiff = orig->replay_maxdiff; x->replay_maxage = orig->replay_maxage; - x->curlft.add_time = orig->curlft.add_time; + memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); x->km.state = orig->km.state; x->km.seq = orig->km.seq; x->replay = orig->replay; diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index f49524e148516af2d591ace5c6ed1d5a81089c5b..8e11362b837427db21228316bdd177482eae1ec7 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2676,8 +2676,8 @@ sub process { # Check if the commit log has what seems like a diff which can confuse patch if ($in_commit_log && !$commit_log_has_diff && - (($line =~ m@^\s+diff\b.*a/[\w/]+@ && - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { ERROR("DIFF_IN_COMMIT_MSG", diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 8f004db6f6034d21efff9ef99396ae0a6e8a4a73..1ee33d2e15bf83e51bf6f6a92d310ad04dae2557 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e) void ConfigList::contextMenuEvent(QContextMenuEvent *e) { - if (e->y() <= header()->geometry().bottom()) { - if (!headerPopup) { - QAction *action; - - headerPopup = new QMenu(this); - action = new QAction("Show Name", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowName(bool))); - connect(parent(), SIGNAL(showNameChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showName); - headerPopup->addAction(action); - action = new QAction("Show Range", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowRange(bool))); - connect(parent(), SIGNAL(showRangeChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showRange); - headerPopup->addAction(action); - action = new QAction("Show Data", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowData(bool))); - connect(parent(), SIGNAL(showDataChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showData); - headerPopup->addAction(action); - } - headerPopup->exec(e->globalPos()); - e->accept(); - } else - e->ignore(); + if (!headerPopup) { + QAction *action; + + headerPopup = new QMenu(this); + action = new QAction("Show Name", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowName(bool))); + connect(parent(), SIGNAL(showNameChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showName); + headerPopup->addAction(action); + + action = new QAction("Show Range", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowRange(bool))); + connect(parent(), SIGNAL(showRangeChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showRange); + headerPopup->addAction(action); + + action = new QAction("Show Data", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowData(bool))); + connect(parent(), SIGNAL(showDataChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showData); + headerPopup->addAction(action); + } + + headerPopup->exec(e->globalPos()); + e->accept(); } ConfigView*ConfigView::viewList; @@ -1228,7 +1228,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) action->setCheckable(true); connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool))); action->setChecked(showDebug()); popup->addSeparator(); popup->addAction(action); diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index ccfbfde615563a7e6738c7d5b33fc725f79a1d51..9f8339c7ce54009230a4db7c3b54a3e6add87613 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -30,6 +30,11 @@ #undef has_rel_mcount #undef tot_relsize #undef get_mcountsym +#undef find_symtab +#undef get_shnum +#undef set_shnum +#undef get_shstrndx +#undef get_symindex #undef get_sym_str_and_relp #undef do_func #undef Elf_Addr @@ -59,6 +64,11 @@ # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize +# define find_symtab find_symtab64 +# define get_shnum get_shnum64 +# define set_shnum set_shnum64 +# define get_shstrndx get_shstrndx64 +# define get_symindex get_symindex64 # define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 # define get_mcountsym get_mcountsym_64 @@ -92,6 +102,11 @@ # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize +# define find_symtab find_symtab32 +# define get_shnum get_shnum32 +# define set_shnum set_shnum32 +# define get_shstrndx get_shstrndx32 +# define get_symindex get_symindex32 # define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 # define get_mcountsym get_mcountsym_32 @@ -174,6 +189,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp) return is_fake; } +static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx) +{ + unsigned long offset; + int index; + + if (sym->st_shndx != SHN_XINDEX) + return w2(sym->st_shndx); + + offset = (unsigned long)sym - (unsigned long)symtab; + index = offset / sizeof(*sym); + + return w(symtab_shndx[index]); +} + +static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (shdr0 && !ehdr->e_shnum) + return w(shdr0->sh_size); + + return w2(ehdr->e_shnum); +} + +static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum) +{ + if (new_shnum >= SHN_LORESERVE) { + ehdr->e_shnum = 0; + shdr0->sh_size = w(new_shnum); + } else + ehdr->e_shnum = w2(new_shnum); +} + +static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (ehdr->e_shstrndx != SHN_XINDEX) + return w2(ehdr->e_shstrndx); + + return w(shdr0->sh_link); +} + +static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0, + unsigned const nhdr, Elf32_Word **symtab, + Elf32_Word **symtab_shndx) +{ + Elf_Shdr const *relhdr; + unsigned k; + + *symtab = NULL; + *symtab_shndx = NULL; + + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { + if (relhdr->sh_type == SHT_SYMTAB) + *symtab = (void *)ehdr + relhdr->sh_offset; + else if (relhdr->sh_type == SHT_SYMTAB_SHNDX) + *symtab_shndx = (void *)ehdr + relhdr->sh_offset; + + if (*symtab && *symtab_shndx) + break; + } +} + /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */ static void append_func(Elf_Ehdr *const ehdr, Elf_Shdr *const shstr, @@ -189,10 +265,12 @@ static void append_func(Elf_Ehdr *const ehdr, char const *mc_name = (sizeof(Elf_Rela) == rel_entsize) ? ".rela__mcount_loc" : ".rel__mcount_loc"; - unsigned const old_shnum = w2(ehdr->e_shnum); uint_t const old_shoff = _w(ehdr->e_shoff); uint_t const old_shstr_sh_size = _w(shstr->sh_size); uint_t const old_shstr_sh_offset = _w(shstr->sh_offset); + Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr); + unsigned int const old_shnum = get_shnum(ehdr, shdr0); + unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size); uint_t new_e_shoff; @@ -202,6 +280,8 @@ static void append_func(Elf_Ehdr *const ehdr, t += (_align & -t); /* word-byte align */ new_e_shoff = t; + set_shnum(ehdr, shdr0, new_shnum); + /* body for new shstrtab */ ulseek(fd_map, sb.st_size, SEEK_SET); uwrite(fd_map, old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size); @@ -246,7 +326,6 @@ static void append_func(Elf_Ehdr *const ehdr, uwrite(fd_map, mrel0, (void *)mrelp - (void *)mrel0); ehdr->e_shoff = _w(new_e_shoff); - ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */ ulseek(fd_map, 0, SEEK_SET); uwrite(fd_map, ehdr, sizeof(*ehdr)); } @@ -419,6 +498,8 @@ static unsigned find_secsym_ndx(unsigned const txtndx, char const *const txtname, uint_t *const recvalp, Elf_Shdr const *const symhdr, + Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx, Elf_Ehdr const *const ehdr) { Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset) @@ -430,7 +511,7 @@ static unsigned find_secsym_ndx(unsigned const txtndx, for (symp = sym0, t = nsym; t; --t, ++symp) { unsigned int const st_bind = ELF_ST_BIND(symp->st_info); - if (txtndx == w2(symp->st_shndx) + if (txtndx == get_symindex(symp, symtab, symtab_shndx) /* avoid STB_WEAK */ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { /* function symbols on ARM have quirks, avoid them */ @@ -498,21 +579,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, return totrelsz; } - /* Overall supervision for Elf32 ET_REL file. */ static void do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); - unsigned const nhdr = w2(ehdr->e_shnum); - Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)]; + unsigned const nhdr = get_shnum(ehdr, shdr0); + Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)]; char const *const shstrtab = (char const *)(_w(shstr->sh_offset) + (void *)ehdr); Elf_Shdr const *relhdr; unsigned k; + Elf32_Word *symtab; + Elf32_Word *symtab_shndx; + /* Upper bound on space: assume all relevant relocs are for mcount. */ unsigned const totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname); Elf_Rel *const mrel0 = umalloc(totrelsz); @@ -525,6 +608,8 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) unsigned rel_entsize = 0; unsigned symsec_sh_link = 0; + find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx); + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); @@ -533,6 +618,7 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) unsigned const recsym = find_secsym_ndx( w(relhdr->sh_info), txtname, &recval, &shdr0[symsec_sh_link = w(relhdr->sh_link)], + symtab, symtab_shndx, ehdr); rel_entsize = _w(relhdr->sh_entsize); diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index b0e9cc0845060c05b57ec9602336fb2b4c4eaffd..234250ce0b6cbd921d757c2bc20bb5c3696ce216 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -19,13 +19,16 @@ config GCC_PLUGIN_STRUCTLEAK menu "Memory initialization" -config CC_HAS_AUTO_VAR_INIT +config CC_HAS_AUTO_VAR_INIT_PATTERN def_bool $(cc-option,-ftrivial-auto-var-init=pattern) +config CC_HAS_AUTO_VAR_INIT_ZERO + def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang) + choice prompt "Initialize kernel stack variables at function entry" default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS - default INIT_STACK_ALL if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT + default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN default INIT_STACK_NONE help This option enables initialization of stack variables at @@ -58,9 +61,9 @@ choice of uninitialized stack variable exploits and information exposures. - config INIT_STACK_ALL + config INIT_STACK_ALL_PATTERN bool "0xAA-init everything on the stack (strongest)" - depends on CC_HAS_AUTO_VAR_INIT + depends on CC_HAS_AUTO_VAR_INIT_PATTERN help Initializes everything on the stack with a 0xAA pattern. This is intended to eliminate all classes @@ -68,6 +71,24 @@ choice exposures, even variables that were warned to have been left uninitialized. + Pattern initialization is known to provoke many existing bugs + related to uninitialized locals, e.g. pointers receive + non-NULL values, buffer sizes and indices are very big. + + config INIT_STACK_ALL_ZERO + bool "zero-init everything on the stack (strongest and safest)" + depends on CC_HAS_AUTO_VAR_INIT_ZERO + help + Initializes everything on the stack with a zero + value. This is intended to eliminate all classes + of uninitialized stack variable exploits and information + exposures, even variables that were warned to have been + left uninitialized. + + Zero initialization provides safe defaults for strings, + pointers, indices and sizes, and is therefore + more suitable as a security mitigation measure. + endchoice config GCC_PLUGIN_STRUCTLEAK_VERBOSE diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e7778880c09327820d068822d6f5523b7d39ba6d..305890cab97b2047ad45c226eb05ccb78102a936 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3305,6 +3305,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); } + if (!selinux_state.initialized) + return (inode_owner_or_capable(inode) ? 0 : -EPERM); + sbsec = inode->i_sb->s_security; if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; @@ -3388,6 +3391,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, return; } + if (!selinux_state.initialized) { + /* If we haven't even been initialized, then we can't validate + * against a policy, so leave the label as invalid. It may + * resolve to a valid label on the next revalidation try if + * we've since initialized. + */ + return; + } + rc = security_context_to_sid_force(&selinux_state, value, size, &newsid); if (rc) { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 4e4114dd4b30f5a804c4ad4969fe224b0d7a560f..f6bc78aba75af3f0b3da4966ac615dccfdec7982 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1535,6 +1535,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) *idx = cpu + 1; return &per_cpu(avc_cache_stats, cpu); } + (*idx)++; return NULL; } diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 371ae368da3555204ea12f7965b077ddce91bc03..accd3846f1e3e3b926517a7e3b261dd24424d9d7 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -912,7 +912,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, } ret = sscanf(rule, "%d", &maplevel); - if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; rule += SMK_DIGITLEN; @@ -933,6 +933,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, for (i = 0; i < catlen; i++) { rule += SMK_DIGITLEN; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } ret = sscanf(rule, "%u", &cat); if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM) goto out; @@ -2746,7 +2750,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file) static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct task_smack *tsp = current_security(); char *data; int rc; LIST_HEAD(list_tmp); @@ -2771,11 +2774,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, kfree(data); if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) { + struct cred *new; + struct task_smack *tsp; + + new = prepare_creds(); + if (!new) { + rc = -ENOMEM; + goto out; + } + tsp = new->security; smk_destroy_label_list(&tsp->smk_relabel); list_splice(&list_tmp, &tsp->smk_relabel); + commit_creds(new); return count; } - +out: smk_destroy_label_list(&list_tmp); return rc; } diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 3788906421a73d2b34979fbf4fbfbd33fc8010e8..fe27034f28460e5e09c3f8e9029eec52daf114b9 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, snd_BUG(); return -EINVAL; } - if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) - return -ENXIO; + if (!snd_pcm_format_linear(format->format)) + return -EINVAL; err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", src_format, dst_format, diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c index e1f44fc8688563d78f9e0433494090e93a113943..ed5bca0db3e73ccb3bac1d0e208e378a2198de82 100644 --- a/sound/core/seq/oss/seq_oss.c +++ b/sound/core/seq/oss/seq_oss.c @@ -181,10 +181,16 @@ static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seq_oss_devinfo *dp; + long rc; + dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; - return snd_seq_oss_ioctl(dp, cmd, arg); + + mutex_lock(®ister_mutex); + rc = snd_seq_oss_ioctl(dp, cmd, arg); + mutex_unlock(®ister_mutex); + return rc; } #ifdef CONFIG_COMPAT diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index ef689997d6a5b55f3d273ab4f8125fc87796cdd9..bf53e342788e2adc7879d90c6504b8d83ec430a7 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2"); #define VENDOR_DIGIDESIGN 0x00a07e #define MODEL_CONSOLE 0x000001 #define MODEL_RACK 0x000002 +#define SPEC_VERSION 0x000001 static int name_card(struct snd_dg00x *dg00x) { @@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { /* Both of 002/003 use the same ID. */ { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_CONSOLE, }, { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_RACK, }, {} diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index d3fdc463a884e31e15518f2780c4ca080573a9de..1e61cdce289525537b1a6b478881408a8b2cd680 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit) } static const struct ieee1394_device_id snd_tscm_id_table[] = { + // Tascam, FW-1884. { .match_flags = IEEE1394_MATCH_VENDOR_ID | - IEEE1394_MATCH_SPECIFIER_ID, + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, .vendor_id = 0x00022e, .specifier_id = 0x00022e, + .version = 0x800000, + }, + // Tascam, FE-8 (.version = 0x800001) + // This kernel module doesn't support FE-8 because the most of features + // can be implemented in userspace without any specific support of this + // module. + // + // .version = 0x800002 is unknown. + // + // Tascam, FW-1082. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800003, + }, + // Tascam, FW-1804. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800004, }, /* FE-08 requires reverse-engineering because it just has faders. */ {} diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 714a51721a313cef9e47309662b778e5c5b3c55b..ab9236e4c157e523604bc76039881aa771a698e1 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c @@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work) struct hdac_driver *drv; unsigned int rp, caddr, res; + spin_lock_irq(&bus->reg_lock); while (bus->unsol_rp != bus->unsol_wp) { rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; bus->unsol_rp = rp; @@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work) codec = bus->caddr_tbl[caddr & 0x0f]; if (!codec || !codec->dev.driver) continue; + spin_unlock_irq(&bus->reg_lock); drv = drv_to_hdac_driver(codec->dev.driver); if (drv->unsol_event) drv->unsol_event(codec, res); + spin_lock_irq(&bus->reg_lock); } + spin_unlock_irq(&bus->reg_lock); } /** diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index dbf02a3a8d2f28d4dab93c06eddd8148f9dacc7c..58b53a4bc4d014f2a046d6f0997fac8329fa2cd8 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -124,6 +124,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); void snd_hdac_device_exit(struct hdac_device *codec) { pm_runtime_put_noidle(&codec->dev); + /* keep balance of runtime PM child_count in parent device */ + pm_runtime_set_suspended(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); kfree(codec->vendor_name); kfree(codec->chip_name); diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 7d049569012c105bdacfff9ac1f406c5e35ecc11..3f06986fbecf81ed80a941f31b4dbe206c132c0d 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -350,7 +350,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; - struct hpi_pci pci; + struct hpi_pci pci = { 0 }; memset(&adapter, 0, sizeof(adapter)); @@ -506,7 +506,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, return 0; err: - for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { + while (--idx >= 0) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index cd27b5536654431643a108dd04887f1f76696a65..675b812e96d63c9efe57099a21e3f0473416cc80 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, else /* Power down */ chip->spi_dac_reg[reg] |= bit; - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) + return -ENXIO; } return 0; } diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c index 146e1a3498c7352c53751871fb1dfd166df8dac1..419da70cd942a94a249dd3620595b55afa0c0ff7 100644 --- a/sound/pci/cs46xx/cs46xx_lib.c +++ b/sound/pci/cs46xx/cs46xx_lib.c @@ -780,7 +780,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned rate = 48000 / 9; /* - * We can not capture at at rate greater than the Input Rate (48000). + * We can not capture at a rate greater than the Input Rate (48000). * Return an error if an attempt is made to stray outside that limit. */ if (rate > 48000) diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c index 8d0a3d357345776658610abb7c314018a38a5b10..8ef51a29380af395d050daf38ea6db8d353c60b9 100644 --- a/sound/pci/cs46xx/dsp_spos_scb_lib.c +++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c @@ -1739,7 +1739,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip) struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { - /* remove AsynchFGTxSCB and and PCMSerialInput_II */ + /* remove AsynchFGTxSCB and PCMSerialInput_II */ cs46xx_dsp_disable_spdif_out (chip); /* save state */ diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 3ef2b27ebbe8c15fa60bb301ade947298e4ce8fe..f32c55ffffc79853bae67d82c37d758f876e3ea3 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2216,7 +2216,6 @@ static int snd_echo_resume(struct device *dev) if (err < 0) { kfree(commpage_bak); dev_err(dev, "resume init_hw err=%d\n", err); - snd_echo_free(chip); return err; } @@ -2243,7 +2242,6 @@ static int snd_echo_resume(struct device *dev) if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(chip->card->dev, "cannot grab irq\n"); - snd_echo_free(chip); return -EBUSY; } chip->irq = pci->irq; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index f3a6b1d869d8a2e06cf4ff05999e859b227b490f..dbeb62362f1c3954af07d31835fdaf3ecc84ceca 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3410,7 +3410,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save); * @nid: NID to check / update * * Check whether the given NID is in the amp list. If it's in the list, - * check the current AMP status, and update the the power-status according + * check the current AMP status, and update the power-status according * to the mute status. * * This function is supposed to be set or called from the check_power_status diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index fa261b27d85886a905555cfd6aa27383f6740bbc..8198d2e53b7df54c72c3374b6996070514dcf4cf 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1169,16 +1169,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) active = true; - /* clear rirb int */ status = azx_readb(chip, RIRBSTS); if (status & RIRB_INT_MASK) { + /* + * Clearing the interrupt status here ensures that no + * interrupt gets masked after the RIRB wp is read in + * snd_hdac_bus_update_rirb. This avoids a possible + * race condition where codec response in RIRB may + * remain unserviced by IRQ, eventually falling back + * to polling mode in azx_rirb_get_response. + */ + azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) udelay(80); snd_hdac_bus_update_rirb(bus); } - azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 2609161707a4141da8b3b1e62cb4eba50b56b2bc..97adb7e340f99895da11e791545edf31061306a6 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -825,7 +825,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, } } -/* sync power of each widget in the the given path */ +/* sync power of each widget in the given path */ static hda_nid_t path_power_update(struct hda_codec *codec, struct nid_path *path, bool allow_powerdown) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8e1eb5f243a278676408b3e6867d7b6859f7d4a8..d43245937db7e4454cc13114cd83da1cc5f483f9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2478,7 +2478,6 @@ static int azx_probe_continue(struct azx *chip) if (azx_has_pm_runtime(chip)) { pm_runtime_use_autosuspend(&pci->dev); - pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 419d099b5582b3fb45244d9951443ca7a203df9f..708efb9b43877fa3e890cd4d70520a85fb7701ce 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2574,6 +2574,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec, hda_nid_t cvt_nid) { if (per_pin) { + haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid); snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); intel_verify_pin_cvt_connect(codec, per_pin); @@ -3430,6 +3431,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) static int patch_tegra_hdmi(struct hda_codec *codec) { + struct hdmi_spec *spec; int err; err = patch_generic_hdmi(codec); @@ -3437,6 +3439,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) return err; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; + spec = codec->spec; + spec->chmap.ops.chmap_cea_alloc_validate_get_type = + nvhdmi_chmap_cea_alloc_validate_get_type; + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index fed9df479ef8a5e230001cda727b118c1ffe6544..24bc9e4460473b7a716c4a1e51cc78be74635e45 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3290,7 +3290,11 @@ static void alc256_shutup(struct hda_codec *codec) /* 3k pull low control for Headset jack. */ /* NOTE: call this before clearing the pin, otherwise codec stalls */ - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); + /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly + * when booting with headset plugged. So skip setting it for the codec alc257 + */ + if (codec->core.vendor_id != 0x10ec0257) + alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, @@ -4072,6 +4076,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, { struct alc_spec *spec = codec->spec; + spec->micmute_led_polarity = 1; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->init_amp = ALC_INIT_DEFAULT; @@ -5611,6 +5616,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, #include "hp_x360_helper.c" enum { + ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, ALC269_FIXUP_DELL_M101Z, @@ -5763,6 +5769,10 @@ enum { }; static const struct hda_fixup alc269_fixups[] = { + [ALC269_FIXUP_GPIO2] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_gpio2, + }, [ALC269_FIXUP_SONY_VAIO] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { @@ -6558,6 +6568,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC233_FIXUP_LENOVO_MULTI_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, + .chained = true, + .chain_id = ALC269_FIXUP_GPIO2 }, [ALC233_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index d8168aa2cef381fc05199d1e4044b4ee8a3c4202..85c33f528d7b3e2f45e0e108018c796a893c5be9 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -845,7 +845,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec, static struct snd_kcontrol_new beep_vol_ctl = HDA_CODEC_VOLUME(NULL, 0, 0, 0); - /* check for mute support for the the amp */ + /* check for mute support for the amp */ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { const struct snd_kcontrol_new *temp; if (spec->anabeep_nid == nid) diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c index 3919aed39ca03ab3005e0bfec8ffd2e65f20a147..5e52086d7b98637357f6a4354ac472e773e1eb0d 100644 --- a/sound/pci/ice1712/prodigy192.c +++ b/sound/pci/ice1712/prodigy192.c @@ -31,7 +31,7 @@ * Experimentally I found out that only a combination of * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 - * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct - * sampling rate. That means the the FPGA doubles the + * sampling rate. That means that the FPGA doubles the * MCK01 rate. * * Copyright (c) 2003 Takashi Iwai diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c index 4cf3200e988b03adf40129af71cfe1c9bb51bb69..df44135e1b0c9c03e643052e1c76e566626412b9 100644 --- a/sound/pci/oxygen/xonar_dg.c +++ b/sound/pci/oxygen/xonar_dg.c @@ -39,7 +39,7 @@ * GPIO 4 <- headphone detect * GPIO 5 -> enable ADC analog circuit for the left channel * GPIO 6 -> enable ADC analog circuit for the right channel - * GPIO 7 -> switch green rear output jack between CS4245 and and the first + * GPIO 7 -> switch green rear output jack between CS4245 and the first * channel of CS4361 (mechanical relay) * GPIO 8 -> enable output to speakers * diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 89b6e187ac235f3161e10a8bb1a6035af8d56d0d..a5b0c40ee545f5c791fd2770894e6d1e83782a6f 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2130,10 +2130,16 @@ static void max98090_pll_work(struct max98090_priv *max98090) dev_info_ratelimited(component->dev, "PLL unlocked\n"); + /* + * As the datasheet suggested, the maximum PLL lock time should be + * 7 msec. The workaround resets the codec softly by toggling SHDN + * off and on if PLL failed to lock for 10 msec. Notably, there is + * no suggested hold time for SHDN off. + */ + /* Toggle shutdown OFF then ON */ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, 0); - msleep(10); snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, M98090_SHDNN_MASK); diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index cbdb6d4bb91ef1c50161b970c2603ccd68e7bcb9..f4aba065c9257348f2bf120a4a915d473f935dc4 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -16,8 +16,8 @@ #define CDC_D_REVISION1 (0xf000) #define CDC_D_PERPH_SUBTYPE (0xf005) -#define CDC_D_INT_EN_SET (0x015) -#define CDC_D_INT_EN_CLR (0x016) +#define CDC_D_INT_EN_SET (0xf015) +#define CDC_D_INT_EN_CLR (0xf016) #define MBHC_SWITCH_INT BIT(7) #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6) #define MBHC_BUTTON_PRESS_DET BIT(5) diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index 108e8bf42a346e3651076b9b320e973787046eb2..f0a409504a13b6e2a39aca9a2d91c8ab819685c3 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -419,8 +419,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wm8994 *control = dev_get_drvdata(component->dev->parent); int i; + if (control->type != WM8958) + return 0; + switch (event) { case SND_SOC_DAPM_POST_PMU: case SND_SOC_DAPM_PRE_PMU: diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 01acb8da2f48e969739b617f34424c52c2bc8e56..e3e069277a3ff568075c711919781b4e4f8421ca 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -3376,6 +3376,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * return -EINVAL; } + pm_runtime_get_sync(component->dev); + switch (micbias) { case 1: micdet = &wm8994->micdet[0]; @@ -3423,6 +3425,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * snd_soc_dapm_sync(dapm); + pm_runtime_put(component->dev); + return 0; } EXPORT_SYMBOL_GPL(wm8994_mic_detect); @@ -3790,6 +3794,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * return -EINVAL; } + pm_runtime_get_sync(component->dev); + if (jack) { snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS"); snd_soc_dapm_sync(dapm); @@ -3858,6 +3864,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * snd_soc_dapm_sync(dapm); } + pm_runtime_put(component->dev); + return 0; } EXPORT_SYMBOL_GPL(wm8958_mic_detect); @@ -4051,11 +4059,13 @@ static int wm8994_component_probe(struct snd_soc_component *component) wm8994->hubs.dcs_readback_mode = 2; break; } + wm8994->hubs.micd_scthr = true; break; case WM8958: wm8994->hubs.dcs_readback_mode = 1; wm8994->hubs.hp_startup_mode = 1; + wm8994->hubs.micd_scthr = true; switch (control->revision) { case 0: diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index fed6ea9b019f70867f1201c8bed2130c1e5d3614..da7fa6f5459e663219f9a2d342303e1735ca11e9 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -1227,6 +1227,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component, snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL, WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB); + if (!hubs->micd_scthr) + return 0; + snd_soc_component_update_bits(component, WM8993_MICBIAS, WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK | WM8993_MICB1_LVL | WM8993_MICB2_LVL, diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h index ee339ad8514d1008e88e4ae127b7e864ba43815e..1433d73e09bf87a71c80e564254fb463d353cea4 100644 --- a/sound/soc/codecs/wm_hubs.h +++ b/sound/soc/codecs/wm_hubs.h @@ -31,6 +31,7 @@ struct wm_hubs_data { int hp_startup_mode; int series_startup; int no_series_update; + bool micd_scthr; bool no_cache_dac_hp_direct; struct list_head dcs_cache; diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c index c22880aea82a24d3d34688fa00796172bd99ef24..7e48c740bf55020ba2d9a837c86e533c61f13d11 100644 --- a/sound/soc/img/img-i2s-in.c +++ b/sound/soc/img/img-i2s-in.c @@ -346,8 +346,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } for (i = 0; i < i2s->active_channels; i++) img_i2s_in_ch_disable(i2s, i); diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c index fc2d1dac63339e84385ad3ffa8228e8259ad4817..798ab579564cb85bf0ae7148271649c8b6fc0fa6 100644 --- a/sound/soc/img/img-i2s-out.c +++ b/sound/soc/img/img-i2s-out.c @@ -350,8 +350,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } img_i2s_out_disable(i2s); @@ -491,8 +493,10 @@ static int img_i2s_out_probe(struct platform_device *pdev) goto err_pm_disable; } ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto err_suspend; + } reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK; img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL); diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c index acc005217be06770459e12d8a8ca94e9d4a9f7f0..f56752662b19932a3b81e3c8b72246fb11bdec0a 100644 --- a/sound/soc/img/img-parallel-out.c +++ b/sound/soc/img/img-parallel-out.c @@ -166,8 +166,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) } ret = pm_runtime_get_sync(prl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(prl->dev); return ret; + } reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL); reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set; diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 6868e71e3a3f010df409f9f5d7b8565f51f62c44..0572c3c9645060bc26e7c912d17e752b6cb22d98 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, ret_val = power_up_sst(stream); if (ret_val < 0) - return ret_val; + goto out_power_up; /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, @@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); out_ops: - kfree(stream); mutex_unlock(&sst_lock); +out_power_up: + kfree(stream); return ret_val; } diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c index 27308337ab127a22071369056d471d8c69b1ce81..ba76e37a4b094a7c7bf5e12dedfb05ab55982509 100644 --- a/sound/soc/intel/boards/bxt_rt298.c +++ b/sound/soc/intel/boards/bxt_rt298.c @@ -544,6 +544,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card) /* broxton audio machine driver for SPT + RT298S */ static struct snd_soc_card broxton_rt298 = { .name = "broxton-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, @@ -559,6 +560,7 @@ static struct snd_soc_card broxton_rt298 = { static struct snd_soc_card geminilake_rt298 = { .name = "geminilake-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 0dcd249877c554bb107f2072190b10dcf186b6b5..ec630127ef2f37f2606d804f0e6accd83a062b80 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -588,6 +588,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"), + DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MPMAN MPWIN895CL */ .matches = { diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index c6a58520d377aca17ef7538282362c69c370d4c2..255cc45905b814a60df398a2779fd327f3b7df18 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, "kirkwood-i2s", priv); if (err) - return -EBUSY; + return err; /* * Enable Error interrupts. We're only ack'ing them but diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index 7b8baf46d9689f43bf592052a65d7506c97cd42b..5c055d8de8c7da7a3b880babd8257e39d9d5319d 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -111,18 +111,25 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai); - /* These modes are not supported */ - if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) { + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + if (!iface->mclk) { + dev_err(dai->dev, "cpu clock master: mclk missing\n"); + return -ENODEV; + } + break; + + case SND_SOC_DAIFMT_CBM_CFM: + break; + + case SND_SOC_DAIFMT_CBS_CFM: + case SND_SOC_DAIFMT_CBM_CFS: dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); + /* Fall-through */ + default: return -EINVAL; } - /* If the TDM interface is the clock master, it requires mclk */ - if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) { - dev_err(dai->dev, "cpu clock master: mclk missing\n"); - return -ENODEV; - } - iface->fmt = fmt; return 0; } @@ -311,7 +318,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream, if (ret) return ret; - if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) { + if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) == + SND_SOC_DAIFMT_CBS_CFS) { ret = axg_tdm_iface_set_sclk(dai, params); if (ret) return ret; diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index 4b559932adc3377b00a10b50d0d6ef91e2dbdcf0..121460db8eacff3a3f8b7c64f5ec2d146db52c86 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -233,6 +233,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; card->dapm_widgets = apq8016_sbc_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets); data = apq8016_sbc_parse_of(card); diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c index 1543e85629f8037c60a37637a3f3607d7a83ad1e..04f814a0a7d511b58a35e895d313769ee170f5a8 100644 --- a/sound/soc/qcom/apq8096.c +++ b/sound/soc/qcom/apq8096.c @@ -46,6 +46,7 @@ static int apq8096_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index c6b51571be945e5262dbf46966caab4e1615f529..44eee18c658ae7b9348f6baf73fc6443ef36d075 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -968,6 +968,20 @@ static int msm_routing_probe(struct snd_soc_component *c) return 0; } +static unsigned int q6routing_reg_read(struct snd_soc_component *component, + unsigned int reg) +{ + /* default value */ + return 0; +} + +static int q6routing_reg_write(struct snd_soc_component *component, + unsigned int reg, unsigned int val) +{ + /* dummy */ + return 0; +} + static const struct snd_soc_component_driver msm_soc_routing_component = { .ops = &q6pcm_routing_ops, .probe = msm_routing_probe, @@ -976,6 +990,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = { .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets), .dapm_routes = intercon, .num_dapm_routes = ARRAY_SIZE(intercon), + .read = q6routing_reg_read, + .write = q6routing_reg_write, }; static int q6pcm_routing_probe(struct platform_device *pdev) diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 2a781d87ee65e1dcd2b798111f384084233ed652..5fdbfa363ab16beea9ed636504f9315cbcf714aa 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -226,6 +226,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev) } card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c index a9fa972466ad155fd4b681bdb530bd953b13e55a..00a3f4c1b6fedfe4c09ca7fda6d6e31a001f35ba 100644 --- a/sound/soc/qcom/storm.c +++ b/sound/soc/qcom/storm.c @@ -99,6 +99,7 @@ static int storm_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = &pdev->dev; + card->owner = THIS_MODULE; ret = snd_soc_of_parse_card_name(card, "qcom,model"); if (ret) { diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 43679aeeb12beadc948d23bc59a174a72b50ecac..88e838ac937dc0f418b3219eee90408e6e83282f 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c @@ -655,8 +655,10 @@ static int tegra30_ahub_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(ahub->regmap_ahub); ret |= regcache_sync(ahub->regmap_apbif); pm_runtime_put(dev); diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index 0b176ea24914ba7923c7de246a780839f1e56a89..bf155c5092f0646ac56beb427b58b07714c538ad 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -551,8 +551,10 @@ static int tegra30_i2s_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(i2s->regmap); pm_runtime_put(dev); diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 28a3ad8b1d74b16bd9c81dae9cf308f950a4d5e2..137e1e8718d6f2237084de6a0fd697ac049f228a 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1828,6 +1828,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, return 0; } +static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( + struct usb_host_endpoint *hostep) +{ + unsigned char *extra = hostep->extra; + int extralen = hostep->extralen; + + while (extralen > 3) { + struct usb_ms_endpoint_descriptor *ms_ep = + (struct usb_ms_endpoint_descriptor *)extra; + + if (ms_ep->bLength > 3 && + ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && + ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) + return ms_ep; + if (!extra[0]) + break; + extralen -= extra[0]; + extra += extra[0]; + } + return NULL; +} + /* * Returns MIDIStreaming device capabilities. */ @@ -1865,11 +1887,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, ep = get_ep_desc(hostep); if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) continue; - ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; - if (hostep->extralen < 4 || - ms_ep->bLength < 4 || - ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || - ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) + ms_ep = find_usb_ms_endpoint_descriptor(hostep); + if (!ms_ep) continue; if (usb_endpoint_dir_out(ep)) { if (endpoints[epidx].out_ep) { diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index e533bcca6ae849e9ab1c6097aa450fbc819012dc..54dcec49a78b3f0df4beefdcec920640414fc031 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1701,6 +1701,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer, /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); + /* skip a bogus volume range */ + if (cval->max <= cval->min) { + usb_audio_dbg(mixer->chip, + "[%d] FU [%s] skipped due to invalid volume\n", + cval->head.id, kctl->id.name); + snd_ctl_free_one(kctl); + return; + } + + if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 21c1135bb89b43e81a89da475e2110b184da5bc2..d1328abd1bc49bd542c68705be5e82f4218b1781 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -195,6 +195,7 @@ static const struct rc_config { { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ }; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 41a5e38b7870b099d39a59ab6bf5c1f6ce346d97..83f72ddf4fda606ec2ea79ba00d5d7c48612f897 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3419,6 +3419,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), .type = QUIRK_SETUP_FMT_AFTER_RESUME } }, +{ + /* + * PIONEER DJ DDJ-RB + * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed + * The feedback for the output is the dummy input. + */ + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 4, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x01, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC| + USB_ENDPOINT_USAGE_IMPLICIT_FB, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = -1 + } + } + } +}, #define ALC1220_VB_DESKTOP(vend, prod) { \ USB_DEVICE(vend, prod), \ @@ -3468,11 +3524,17 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ * they pretend to be 96kHz mono as a workaround for stereo being broken * by that... * - * They also have swapped L-R channels, but that's for userspace to deal - * with. + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. */ { - USB_DEVICE(0x534d, 0x2109), + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_CLASS | + USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x534d, + .idProduct = 0x2109, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .vendor_name = "MacroSilicon", .product_name = "MS2109", diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index e9ec6166acc65224d82d3592c026c8fb86c42d26..87078e5d672b2d2bd18486959a1cc794d46a004d 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1335,12 +1335,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) msleep(20); - /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny - * delay here, otherwise requests like get/set frequency return as - * failed despite actually succeeding. + /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX + * needs a tiny delay here, otherwise requests like get/set + * frequency return as failed despite actually succeeding. */ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || chip->usb_id == USB_ID(0x046d, 0x0a46) || + chip->usb_id == USB_ID(0x046d, 0x0a56) || chip->usb_id == USB_ID(0x0b0e, 0x0349) || chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) diff --git a/tools/build/Build.include b/tools/build/Build.include index 9ec01f4454f9f41da8892ad44493273ccf6bd8d6..585486e40995b95e0d4540c2374901af3ad19150 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), # dependencies in the cmd file if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \ @set -e; \ - $(echo-cmd) $(cmd_$(1)) && $(dep-cmd)) + $(echo-cmd) $(cmd_$(1)); \ + $(dep-cmd)) # if_changed - execute command if any prerequisite is newer than # target, or command line has changed diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 42a787856cd87c21622abbb3ce01ead6596d95fe..7c17f17ea2cd2f9b13f2aa294d4a6feea5d2b3fb 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -7,7 +7,7 @@ endif feature_check = $(eval $(feature_check_code)) define feature_check_code - feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) endef feature_set = $(eval $(feature_set_code)) diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index bf8a8ebcca1eb047af212708ab33ef2c0c511ec6..c4845b66b9baaee5b23cfde24fb9abb020c38c2a 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -62,8 +62,6 @@ FILES= \ FILES := $(addprefix $(OUTPUT),$(FILES)) -CC ?= $(CROSS_COMPILE)gcc -CXX ?= $(CROSS_COMPILE)g++ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config LLVM_CONFIG ?= llvm-config diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c index 4bcb234c0fcab7dafa2f0716dee3b044b66b6fd7..3da5462a0c7d3bf10280841363664504dc895e14 100644 --- a/tools/gpio/gpio-hammer.c +++ b/tools/gpio/gpio-hammer.c @@ -138,7 +138,14 @@ int main(int argc, char **argv) device_name = optarg; break; case 'o': - lines[i] = strtoul(optarg, NULL, 10); + /* + * Avoid overflow. Do not immediately error, we want to + * be able to accurately report on the amount of times + * '-o' was given to give an accurate error message + */ + if (i < GPIOHANDLES_MAX) + lines[i] = strtoul(optarg, NULL, 10); + i++; break; case '?': @@ -146,6 +153,14 @@ int main(int argc, char **argv) return -1; } } + + if (i >= GPIOHANDLES_MAX) { + fprintf(stderr, + "Only %d occurences of '-o' are allowed, %d were found\n", + GPIOHANDLES_MAX, i + 1); + return -1; + } + nlines = i; if (!device_name || !nlines) { diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index f35eb72739c09e3ad0bd22e279fa4a33119c15f6..a45e7b4f03163a462eab3840fe766d6d9f287a00 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1079,7 +1079,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 37 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 382e476629fb1dd0fca8dba6e140dcf9a968eefc..c0fcc8af2a3effbc00e0bfa6afc3c27a119a1725 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -2766,6 +2766,7 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg, if (read_expected(EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 20f67fcf378d540eced6080f41c0c844f7fe8b79..baa92279c137e2e82a976428f43bd4a5e76ae747 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,9 +7,15 @@ ARCH := x86 endif # always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +endif AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index fd3071d83deae90ebe45878afd71e240eea7cbf3..c0ab27368a3455aaf0facbc6d9b7efdc5882859e 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -503,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file) insn->type != INSN_JUMP_UNCONDITIONAL) continue; - if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) + if (insn->offset == FAKE_JUMP_OFFSET) continue; rela = find_rela_by_dest_range(insn->sec, insn->offset, diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 246dee081efda287e39060a55e4bfc8ca7bfde8e..edf2be251788f7401a16262828e58e0786044759 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,6 +33,10 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where 'param1', 'param2', etc are defined as formats for the PMU in /sys/bus/event_source/devices//format/*. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index b10a90b6a7181f8968420a875a2b2fc2b3919321..239af8f71f79bd63fa066171c8197d494f209e62 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -39,6 +39,10 @@ report:: - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed event like 'pmu/param1=0x3,param2/' where param1 and param2 are defined as formats for the PMU in /sys/bus/event_source/devices//format/* diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c index 0251dd348124afc4c7a119503c5704d16ba1c3ff..4864fc67d01b5f4fb9d926b9b6b70216920faf27 100644 --- a/tools/perf/bench/mem-functions.c +++ b/tools/perf/bench/mem-functions.c @@ -222,12 +222,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info * return 0; } -static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst) { - u64 cycle_start = 0ULL, cycle_end = 0ULL; - memcpy_t fn = r->fn.memcpy; - int i; - /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */ memset(src, 0, size); @@ -236,6 +232,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo * to not measure page fault overhead: */ fn(dst, src, size); +} + +static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +{ + u64 cycle_start = 0ULL, cycle_end = 0ULL; + memcpy_t fn = r->fn.memcpy; + int i; + + memcpy_prefault(fn, size, src, dst); cycle_start = get_cycles(); for (i = 0; i < nr_loops; ++i) @@ -251,11 +256,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void memcpy_t fn = r->fn.memcpy; int i; - /* - * We prefault the freshly allocated memory range here, - * to not measure page fault overhead: - */ - fn(dst, src, size); + memcpy_prefault(fn, size, src, dst); BUG_ON(gettimeofday(&tv_start, NULL)); for (i = 0; i < nr_loops; ++i) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6aae10ff954c72847d9648e25e45e7b1d2a7ea1e..adabe9d4dc8667a428ecb5dbbabf132df2233e5a 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -422,7 +422,7 @@ static void process_interval(void) } init_stats(&walltime_nsecs_stats); - update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); + update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); print_counters(&rs, 0, NULL); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index d0733251a386e75ac86981303936e1075bc5c898..9caab84c6294d8a62bb06685138a95d64b419d65 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -651,7 +651,9 @@ static void *display_thread(void *arg) delay_msecs = top->delay_secs * MSEC_PER_SEC; set_term_quiet_input(&save); /* trash return*/ - getc(stdin); + clearerr(stdin); + if (poll(&stdin_poll, 1, 0) > 0) + getc(stdin); while (!done) { perf_top__print_sym_table(top); diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 38b5888ef7b38e7c8a876d9e389d0567563ff910..6631970f96832aa971d3c50028de99b4245e4709 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -137,7 +137,7 @@ static char *fixregex(char *s) return s; /* allocate space for a new string */ - fixed = (char *) malloc(len + 1); + fixed = (char *) malloc(len + esc_count + 1); if (!fixed) return NULL; @@ -1064,10 +1064,9 @@ static int process_one_file(const char *fpath, const struct stat *sb, */ int main(int argc, char *argv[]) { - int rc; + int rc, ret = 0; int maxfds; char ldirname[PATH_MAX]; - const char *arch; const char *output_file; const char *start_dirname; @@ -1138,7 +1137,8 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; + goto out_free_mapfile; } else if (rc) { goto empty_map; } @@ -1156,14 +1156,17 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; } - return 0; + + goto out_free_mapfile; empty_map: fclose(eventsfp); create_empty_mapping(output_file); free_arch_std_events(); - return 0; +out_free_mapfile: + free(mapfile); + return ret; } diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c index 6cf00650602ef2f42a6d765bc16ca4855bd40d6f..697423ce3bdfc252f64c09c111ce791bdf9b7214 100644 --- a/tools/perf/tests/bp_signal.c +++ b/tools/perf/tests/bp_signal.c @@ -44,10 +44,13 @@ volatile long the_var; #if defined (__x86_64__) extern void __test_function(volatile long *ptr); asm ( + ".pushsection .text;" ".globl __test_function\n" + ".type __test_function, @function;" "__test_function:\n" "incq (%rdi)\n" - "ret\n"); + "ret\n" + ".popsection\n"); #else static void __test_function(volatile long *ptr) { diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 7bedf8608fdde62b931484c24c28ef8b2fd8fa9b..3e183eef6f8573118270f91d7bc8d379be75e670 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -172,6 +172,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused) ret = 0; } while (0); + perf_pmu__del_formats(&formats); test_format_dir_put(format); return ret; } diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 7cb99b433888b80d1b56b6331748b3db8414c0ca..c2cc42daf924235762a7528b89c858c7ebb9d062 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -14,7 +14,7 @@ add_probe_vfs_getname() { if [ $had_vfs_getname -eq 1 ] ; then line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" fi } diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index 22c9fc900c847f749133d9ff028f91619ba348e6..f8c44a85650bef52fdd8c5d5214a7cbcc04616b8 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -91,7 +91,7 @@ EoHEADER # in tools/perf/arch archlist="" for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do - test -d arch/$arch && archlist="$archlist $arch" + test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch" done for arch in x86 $archlist generic; do diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index f93846edc1e0d463a685f5114e5b798af02a0608..827d844f4efb1f3b1e7145055d3e530d57912567 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -462,7 +462,7 @@ static void set_max_cpu_num(void) /* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -473,7 +473,7 @@ static void set_max_cpu_num(void) /* get the highest present cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -501,7 +501,7 @@ static void set_max_node_num(void) /* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void) return 0; n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); return -1; } @@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void) continue; n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); continue; } diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 7b5e15cc6b7174a27f8c48d3122bef218b128b4b..ad33b99f5d21e28981ce07f409ec473c8ed07fca 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -87,6 +87,9 @@ struct cs_etm_queue { struct cs_etm_packet *packet; }; +/* RB tree for quick conversion between traceID and metadata pointers */ +static struct intlist *traceid_list; + static int cs_etm__update_queues(struct cs_etm_auxtrace *etm); static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, pid_t tid, u64 time_); diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 37f8d48179caef841128d0d7f7a8c635bb395769..c7ef97b198c773102ad98c088dcc6ccb104070d1 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h @@ -53,9 +53,6 @@ enum { CS_ETMV4_PRIV_MAX, }; -/* RB tree for quick conversion between traceID and CPUs */ -struct intlist *traceid_list; - #define KiB(x) ((x) * 1024) #define MiB(x) ((x) * 1024 * 1024) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 4fad92213609f3923a12daf74df70caf412765e3..11a2aa80802d5aefd2e2eebaeee834d3a0f24d60 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1290,6 +1290,9 @@ void perf_evsel__exit(struct perf_evsel *evsel) thread_map__put(evsel->threads); zfree(&evsel->group_name); zfree(&evsel->name); + zfree(&evsel->pmu_name); + zfree(&evsel->per_pkg_mask); + zfree(&evsel->metric_events); perf_evsel__object.fini(evsel); } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 4357141c7c924b21ce6d51ef3c79095afda8f9df..6522b6513895c6aefbef3089a76f323cf76c9ece 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1129,6 +1129,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) return 0; if (err == -EAGAIN || intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) { + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; if (intel_pt_fup_event(decoder)) return 0; return -EAGAIN; @@ -1780,17 +1781,13 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) } if (decoder->set_fup_mwait) no_tip = true; + if (no_tip) + decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP; + else + decoder->pkt_state = INTEL_PT_STATE_FUP; err = intel_pt_walk_fup(decoder); - if (err != -EAGAIN) { - if (err) - return err; - if (no_tip) - decoder->pkt_state = - INTEL_PT_STATE_FUP_NO_TIP; - else - decoder->pkt_state = INTEL_PT_STATE_FUP; - return 0; - } + if (err != -EAGAIN) + return err; if (no_tip) { no_tip = false; break; @@ -2375,15 +2372,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_walk_tip(decoder); break; case INTEL_PT_STATE_FUP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_fup_tip(decoder); - else if (!err) - decoder->pkt_state = INTEL_PT_STATE_FUP; break; case INTEL_PT_STATE_FUP_NO_TIP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_trace(decoder); diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c index c6fd81c025863dfa46aad8c32b70018cc265ff83..81c5a2e438b7d5533ee0f3bc281257c57e761e0e 100644 --- a/tools/perf/util/mem2node.c +++ b/tools/perf/util/mem2node.c @@ -1,5 +1,6 @@ #include #include +#include #include #include "mem2node.h" #include "util.h" @@ -92,7 +93,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env) /* Cut unused entries, due to merging. */ tmp_entries = realloc(entries, sizeof(*entries) * j); - if (tmp_entries) + if (tmp_entries || WARN_ON_ONCE(j == 0)) entries = tmp_entries; for (i = 0; i < j; i++) { diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 8b3dafe3fac3a7fa5b70ab7d075491f6c6e3b2c1..6dcc6e1182a54766b987c0a9435666e7c2c2eb22 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -171,6 +171,7 @@ static int metricgroup__setup_events(struct list_head *groups, if (!evsel) { pr_debug("Cannot resolve %s: %s\n", eg->metric_name, eg->metric_expr); + free(metric_events); continue; } for (i = 0; i < eg->idnum; i++) @@ -178,11 +179,13 @@ static int metricgroup__setup_events(struct list_head *groups, me = metricgroup__lookup(metric_events_list, evsel, true); if (!me) { ret = -ENOMEM; + free(metric_events); break; } expr = malloc(sizeof(struct metric_expr)); if (!expr) { ret = -ENOMEM; + free(metric_events); break; } expr->metric_expr = eg->metric_expr; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 95043cae57740118ee17c71e09e6c8ad1ff13e26..0eff0c3ba9eeba24095b0307383ac6e1310e0857 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1261,7 +1261,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, attr.type = pmu->type; evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats); if (evsel) { - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; return 0; } else { @@ -1302,7 +1302,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel->snapshot = info.snapshot; evsel->metric_expr = info.metric_expr; evsel->metric_name = info.metric_name; - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; } @@ -1421,12 +1421,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, * event. That can be used to distinguish the leader from * other members, even they have the same event name. */ - if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) { + if ((leader != evsel) && + !strcmp(leader->pmu_name, evsel->pmu_name)) { is_leader = false; continue; } - /* The name is always alias name */ - WARN_ON(strcmp(leader->name, evsel->name)); /* Store the leader event for each PMU */ leaders[nr_pmu++] = (uintptr_t) evsel; diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index c1acf04c9f7abf0947492b856dd971b09a682e87..c42054f42e7e6f32effe922391c6af0494f46abf 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1282,6 +1282,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) set_bit(b, bits); } +void perf_pmu__del_formats(struct list_head *formats) +{ + struct perf_pmu_format *fmt, *tmp; + + list_for_each_entry_safe(fmt, tmp, formats, list) { + list_del(&fmt->list); + free(fmt->name); + free(fmt); + } +} + static int sub_non_neg(int a, int b) { if (b > a) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 76fecec7b3f919b2d41be0c67d36ce977bdbd8dd..21335425f2e424f57df399048433d839f1401b29 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -79,6 +79,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits); void perf_pmu__set_format(unsigned long *bits, long from, long to); int perf_pmu__format_parse(char *dir, struct list_head *head); +void perf_pmu__del_formats(struct list_head *formats); struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 60169196b94810d38f70e6889b6081fab52ff70c..4da4ec25524638337a2ac5d35b5e5d98bcd6595e 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1351,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, tf.ntevs = 0; ret = debuginfo__find_probes(dbg, &tf.pf); - if (ret < 0) { + if (ret < 0 || tf.ntevs == 0) { for (i = 0; i < tf.ntevs; i++) clear_probe_trace_event(&tf.tevs[i]); zfree(tevs); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 46daa22b86e3b576bbfc079926b9e686cb78f472..85ff4f68adc007a96b3b7f26724d5cff8bcae221 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2690,7 +2690,7 @@ static char *prefix_if_not_in(const char *pre, char *str) return str; if (asprintf(&n, "%s,%s", pre, str) < 0) - return NULL; + n = NULL; free(str); return n; diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index a701a8a48f005281eb66d0d107bce49742647cb2..166c621e02235304fe8bb4581a8cac69854af047 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1421,6 +1421,7 @@ struct kcore_copy_info { u64 first_symbol; u64 last_symbol; u64 first_module; + u64 first_module_symbol; u64 last_module_symbol; size_t phnum; struct list_head phdrs; @@ -1497,6 +1498,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, return 0; if (strchr(name, '[')) { + if (!kci->first_module_symbol || start < kci->first_module_symbol) + kci->first_module_symbol = start; if (start > kci->last_module_symbol) kci->last_module_symbol = start; return 0; @@ -1694,6 +1697,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, kci->etext += page_size; } + if (kci->first_module_symbol && + (!kci->first_module || kci->first_module_symbol < kci->first_module)) + kci->first_module = kci->first_module_symbol; + kci->first_module = round_down(kci->first_module, page_size); if (kci->last_module_symbol) { diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py index 2fa3c5757bcb5b4f72cf85a6bf8a6cbad8efbf6f..dbed3d213bf17faf4c4c8bef2f1e796b106f6f02 100755 --- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py +++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py @@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval and generates performance plots. Prerequisites: - Python version 2.7.x + Python version 2.7.x or higher gnuplot 5.0 or higher - gnuplot-py 1.8 + gnuplot-py 1.8 or higher (Most of the distributions have these required packages. They may be called - gnuplot-py, phython-gnuplot. ) + gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... ) HWP (Hardware P-States are disabled) Kernel config for Linux trace is enabled @@ -180,7 +180,7 @@ def plot_pstate_cpu_with_sample(): g_plot('set xlabel "Samples"') g_plot('set ylabel "P-State"') g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -197,7 +197,7 @@ def plot_pstate_cpu(): # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' # - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -211,7 +211,7 @@ def plot_load_cpu(): g_plot('set ylabel "CPU load (percent)"') g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -225,7 +225,7 @@ def plot_frequency_cpu(): g_plot('set ylabel "CPU Frequency (GHz)"') g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -240,7 +240,7 @@ def plot_duration_cpu(): g_plot('set ylabel "Timer Duration (MilliSeconds)"') g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -254,7 +254,7 @@ def plot_scaled_cpu(): g_plot('set ylabel "Scaled Busy (Unitless)"') g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -268,7 +268,7 @@ def plot_boost_cpu(): g_plot('set ylabel "CPU IO Boost (percent)"') g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -282,7 +282,7 @@ def plot_ghz_cpu(): g_plot('set ylabel "TSC Frequency (GHz)"') g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 9b552c0fc47db8cded0fda2a53e40f9d5d42a036..4e202217fae107ad41e2fffa544b971bb07a9c5d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1017,6 +1017,8 @@ static void __run_parallel(int tasks, void (*fn)(int task, void *data), pid_t pid[tasks]; int i; + fflush(stdout); + for (i = 0; i < tasks; i++) { pid[i] = fork(); if (pid[i] == 0) { diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 075cb0c730149907414f0aa7b1f2957d7e8361e9..90418d79ef6768849767689942071028fd5cca35 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -95,7 +95,7 @@ int cg_read_strcmp(const char *cgroup, const char *control, /* Handle the case of comparing against empty string */ if (!expected) - size = 32; + return -1; else size = strlen(expected) + 1; diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 27a54a17da65d60bb3d181fb4662efe7c856be3c..f4e92afab14b2485d66076fd2b002341eb6a0c7a 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$' ftrace_filter_check 'schedule*' '^schedule.*$' # filter by *mid*end -ftrace_filter_check '*aw*lock' '.*aw.*lock$' +ftrace_filter_check '*pin*lock' '.*pin.*lock$' # filter by start*mid* ftrace_filter_check 'mutex*try*' '^mutex.*try.*' diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 406cc70c571dea0c1bfef19165e8cfa251a2cf39..c539591937a17412e819ea2df7e599f8c503c309 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -125,9 +125,8 @@ static int do_setcpu(int cpu) CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask)) - error(1, 0, "setaffinity %d", cpu); - - if (cfg_verbose) + fprintf(stderr, "cpu: unable to pin, may increase variance.\n"); + else if (cfg_verbose) fprintf(stderr, "cpu: %u\n", cpu); return 0; diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index bd9b9632c72b0548b576eb8c802097b1e3c1bc79..f496ba3b1cd37cdc05894cd2108bbd35f05784bb 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -364,7 +364,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index 7a573fb4c1c4ec10aa560e8dd36e53a27ba7feb7..c6428f1ac22fb4965b0c25171d1ab9009fdb399d 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c @@ -328,8 +328,7 @@ int main(int argc, char **argv) bool all_tests = true; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c index 87f1f0252299f662f01e811bfa7bf4eeea9016bf..9ec7674697b1e4c61cbbd59147075bef130cbebc 100644 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -108,8 +109,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) { - int pid; - cpu_set_t cpuset; + int pid, ncpus; + cpu_set_t *cpuset; + size_t size; pid = fork(); if (pid == -1) { @@ -120,14 +122,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) if (pid) return; - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); + ncpus = get_nprocs(); + size = CPU_ALLOC_SIZE(ncpus); + cpuset = CPU_ALLOC(ncpus); + if (!cpuset) { + perror("malloc"); + exit(1); + } + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu, size, cpuset); - if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { + if (sched_setaffinity(0, size, cpuset)) { perror("sched_setaffinity"); + CPU_FREE(cpuset); exit(1); } + CPU_FREE(cpuset); fn(arg); exit(0); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c index 94110b1dcd3d812d97dcc4313f5acadd67cd613c..031baa43646fb8fb8979ad0354013c9ca27152b9 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c @@ -91,8 +91,6 @@ int back_to_back_ebbs(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c index 7c57a8d79535d6178c27fe0d92ea3b7a778240f2..361e0be9df9ae087592e590a29aab763a44ed663 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c @@ -42,8 +42,6 @@ int cycles(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c index ecf5ee3283a3ea135676cb9b20b106ab310b2fc2..fe7d0dc2a1a26bf890d415093594ddebbb407dea 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c @@ -99,8 +99,6 @@ int cycles_with_freeze(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); printf("EBBs while frozen %d\n", ebbs_while_frozen); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c index c0faba520b35c7113d19a80efcca05b07ddf9f88..b9b30f974b5eaceb13f17409f5803e9185ea96b9 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c @@ -71,8 +71,6 @@ int cycles_with_mmcr2(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c index 46681fec549b809e2973a23ed10c5014f28365de..2694ae161a84ae5d624c16d0d00abf98361a8d41 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c @@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c index a991d2ea8d0a1e6908278a984dd227ac1001f8a4..174e4f4dae6c06d960c202489d07dbaf1cdf6b6f 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c @@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); FAIL_IF(ebb_state.stats.ebb_count == 0); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c index 2ed7ad33f7a3b619ec9abd7eb996cb8a07aad979..dddb95938304e80d8e0260e9bb5ce4dafc6c03f0 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c @@ -75,7 +75,6 @@ static int test_body(void) ebb_freeze_pmcs(); ebb_global_disable(); - count_pmc(4, sample_period); mtspr(SPRN_PMC4, 0xdead); dump_summary_ebb_state(); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c index 6ff8c8ff27d66cf9aeb4579f95a8598f1e4e8296..035c02273cd49a076ffe5012ad7bc4c11d3f258c 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c @@ -70,13 +70,6 @@ int multi_counter(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - count_pmc(2, sample_period); - count_pmc(3, sample_period); - count_pmc(4, sample_period); - count_pmc(5, sample_period); - count_pmc(6, sample_period); - dump_ebb_state(); for (i = 0; i < 6; i++) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c index 037cb6154f36070a711ecbdc3985eade494d1761..3e9d4ac965c8513d860bbec2525844eaad23276f 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c @@ -61,8 +61,6 @@ static int cycles_child(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_summary_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c index c5fa64790c22ee55c3a9a44ebf09e1745e58264b..d90891fe96a327be9b18c68e06129b4d45eafba9 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c @@ -82,8 +82,6 @@ static int test_body(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); if (mmcr0_mismatch) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c index 30e1ac62e8cb4249350c89aa64163e3d4ee3bedd..8ca92b9ee5b01c6faa2e17002d853a131b6d897d 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -76,8 +76,6 @@ int pmc56_overflow(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(2, sample_period); - dump_ebb_state(); printf("PMC5/6 overflow %d\n", pmc56_overflowed); diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index bdbbbe8431e03cb5d4476543ac2297779ae05964..3694613f418f65e614654cda61cdf2f423796238 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -44,7 +44,7 @@ struct shared_info { unsigned long amr2; /* AMR value that ptrace should refuse to write to the child. */ - unsigned long amr3; + unsigned long invalid_amr; /* IAMR value the parent expects to read from the child. */ unsigned long expected_iamr; @@ -57,8 +57,8 @@ struct shared_info { * (even though they're valid ones) because userspace doesn't have * access to those registers. */ - unsigned long new_iamr; - unsigned long new_uamor; + unsigned long invalid_iamr; + unsigned long invalid_uamor; }; static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) @@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) return syscall(__NR_pkey_alloc, flags, init_access_rights); } -static int sys_pkey_free(int pkey) -{ - return syscall(__NR_pkey_free, pkey); -} - static int child(struct shared_info *info) { unsigned long reg; @@ -100,28 +95,32 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3); + /* + * invalid amr value where we try to force write + * things which are deined by a uamor setting. + */ + info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); + /* + * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr + */ if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); else info->expected_iamr &= ~(1ul << pkeyshift(pkey1)); - info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3)); - - info->expected_uamor |= 3ul << pkeyshift(pkey1) | - 3ul << pkeyshift(pkey2); - info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->new_uamor |= 3ul << pkeyshift(pkey1); + /* + * We allocated pkey2 and pkey 3 above. Clear the IAMR bits. + */ + info->expected_iamr &= ~(1ul << pkeyshift(pkey2)); + info->expected_iamr &= ~(1ul << pkeyshift(pkey3)); /* - * We won't use pkey3. We just want a plausible but invalid key to test - * whether ptrace will let us write to AMR bits we are not supposed to. - * - * This also tests whether the kernel restores the UAMOR permissions - * after a key is freed. + * Create an IAMR value different from expected value. + * Kernel will reject an IAMR and UAMOR change. */ - sys_pkey_free(pkey3); + info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2)); + info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1)); printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr1, pkey1, pkey2, pkey3); @@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->amr3 = regs[0]; - info->expected_iamr = info->new_iamr = regs[1]; - info->expected_uamor = info->new_uamor = regs[2]; + info->amr1 = info->amr2 = regs[0]; + info->expected_iamr = regs[1]; + info->expected_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); @@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid) return ret; /* Write invalid AMR value in child. */ - ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1); + ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1); PARENT_FAIL_IF(ret, &info->child_sync); - printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3); + printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr); /* Wake up child so that it can verify it didn't change. */ ret = prod_child(&info->child_sync); @@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid) /* Try to write to IAMR. */ regs[0] = info->amr1; - regs[1] = info->new_iamr; + regs[1] = info->invalid_iamr; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2); PARENT_FAIL_IF(!ret, &info->child_sync); @@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid) ptrace_write_running, regs[0], regs[1]); /* Try to write to IAMR and UAMOR. */ - regs[2] = info->new_uamor; + regs[2] = info->invalid_uamor; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3); PARENT_FAIL_IF(!ret, &info->child_sync); diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index aa8fc1e6365bf8e047b3cf4d49a6eded6e5837fd..ba0959d454b3c7010cbc49c6fba3ab7f375b5d04 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -83,28 +84,40 @@ void *get_auxv_entry(int type) int pick_online_cpu(void) { - cpu_set_t mask; - int cpu; + int ncpus, cpu = -1; + cpu_set_t *mask; + size_t size; + + ncpus = get_nprocs_conf(); + size = CPU_ALLOC_SIZE(ncpus); + mask = CPU_ALLOC(ncpus); + if (!mask) { + perror("malloc"); + return -1; + } - CPU_ZERO(&mask); + CPU_ZERO_S(size, mask); - if (sched_getaffinity(0, sizeof(mask), &mask)) { + if (sched_getaffinity(0, size, mask)) { perror("sched_getaffinity"); - return -1; + goto done; } /* We prefer a primary thread, but skip 0 */ - for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = 8; cpu < ncpus; cpu += 8) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; /* Search for anything, but in reverse */ - for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = ncpus - 1; cpu >= 0; cpu--) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; printf("No cpus in affinity mask?!\n"); - return -1; + +done: + CPU_FREE(mask); + return cpu; } bool is_ppc64le(void) diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c index 43fcab367fb0a243c2b96703f3ef91bc323aa806..74e6b3fc2d09e8c50fa1ab02f08c463c543879f0 100644 --- a/tools/testing/selftests/x86/syscall_nt.c +++ b/tools/testing/selftests/x86/syscall_nt.c @@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags) set_eflags(get_eflags() | extraflags); syscall(SYS_getpid); flags = get_eflags(); + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); if ((flags & extraflags) == extraflags) { printf("[OK]\tThe syscall worked and flags are still set\n"); } else { diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index 878e0edb2e1b7fe84ba11c33568e7a61e6f259ad..ff0a1c608371828b423ffe94f1dcaccddf34dc13 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) bool sign_extend; bool sixty_four; - if (kvm_vcpu_dabt_iss1tw(vcpu)) { + if (kvm_vcpu_abt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index a5bc10d30618f2023427874564a8160c0dacf5c9..787f7329d1b7ffad2c2a7caf5b791ed8db6b8c45 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, + bool may_block) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; @@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(kvm, start, size, true); +} + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -1276,6 +1282,9 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) { + if (kvm_vcpu_abt_iss1tw(vcpu)) + return true; + if (kvm_vcpu_trap_is_iabt(vcpu)) return false; @@ -1490,7 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long flags = 0; write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); if (fault_status == FSC_PERM && !write_fault && !exec_fault) { @@ -1820,18 +1829,20 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(kvm, gpa, size); + bool may_block = *(bool *)data; + + __unmap_stage2_range(kvm, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, bool blockable) { if (!kvm->arch.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable); return 0; } diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index cd75df25fe14060f6578405ae82b1556bea07765..2fc1777da50d2a9683fac94f513eca0f8ed710db 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -187,6 +187,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) break; default: kfree(dist->spis); + dist->spis = NULL; return -EINVAL; } } diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 9295addea7ecf2e235e318118d4092cd83fa9dd5..f139b1c62ca386a3a436ae787aef85f3b24444e3 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -107,14 +107,21 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, * We "cache" the configuration table entries in our struct vgic_irq's. * However we only have those structs for mapped IRQs, so we read in * the respective config data from memory here upon mapping the LPI. + * + * Should any of these fail, behave as if we couldn't create the LPI + * by dropping the refcount and returning the error. */ ret = update_lpi_config(kvm, irq, NULL, false); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + } ret = vgic_v3_lpi_sync_pending_status(kvm, irq); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + } return irq; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1218ea663c6d26ecdfe114dc611bba11db21dd6c..9312c7e750ed33452bed6930866497e1ec23dbf0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -169,6 +169,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) */ if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)) && + !is_zero_pfn(pfn) && !kvm_is_zone_device_pfn(pfn); return true; @@ -410,7 +411,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; - need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) @@ -3844,7 +3845,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { - int i; + int i, j; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); @@ -3861,17 +3862,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); - if (!new_bus) { + if (new_bus) { + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + } else { pr_err("kvm: failed to shrink bus, removing it completely\n"); - goto broken; + for (j = 0; j < bus->dev_count; j++) { + if (j == i) + continue; + kvm_iodevice_destructor(bus->range[j].dev); + } } - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); - new_bus->dev_count--; - memcpy(new_bus->range + i, bus->range + i + 1, - (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); - -broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus);