diff --git a/Documentation/cgroups/00-INDEX b/Documentation/cgroup-legacy/00-INDEX similarity index 100% rename from Documentation/cgroups/00-INDEX rename to Documentation/cgroup-legacy/00-INDEX diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroup-legacy/blkio-controller.txt similarity index 81% rename from Documentation/cgroups/blkio-controller.txt rename to Documentation/cgroup-legacy/blkio-controller.txt index 52fa9f353342cc76734f49938f536f37545fee93..4ecc954a3063b3d52ebec507cca73e24be4c82bb 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroup-legacy/blkio-controller.txt @@ -374,82 +374,3 @@ One can experience an overall throughput drop if you have created multiple groups and put applications in that group which are not driving enough IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle on individual groups and throughput should improve. - -Writeback -========= - -Page cache is dirtied through buffered writes and shared mmaps and -written asynchronously to the backing filesystem by the writeback -mechanism. Writeback sits between the memory and IO domains and -regulates the proportion of dirty memory by balancing dirtying and -write IOs. - -On traditional cgroup hierarchies, relationships between different -controllers cannot be established making it impossible for writeback -to operate accounting for cgroup resource restrictions and all -writeback IOs are attributed to the root cgroup. - -If both the blkio and memory controllers are used on the v2 hierarchy -and the filesystem supports cgroup writeback, writeback operations -correctly follow the resource restrictions imposed by both memory and -blkio controllers. - -Writeback examines both system-wide and per-cgroup dirty memory status -and enforces the more restrictive of the two. Also, writeback control -parameters which are absolute values - vm.dirty_bytes and -vm.dirty_background_bytes - are distributed across cgroups according -to their current writeback bandwidth. - -There's a peculiarity stemming from the discrepancy in ownership -granularity between memory controller and writeback. While memory -controller tracks ownership per page, writeback operates on inode -basis. cgroup writeback bridges the gap by tracking ownership by -inode but migrating ownership if too many foreign pages, pages which -don't match the current inode ownership, have been encountered while -writing back the inode. - -This is a conscious design choice as writeback operations are -inherently tied to inodes making strictly following page ownership -complicated and inefficient. The only use case which suffers from -this compromise is multiple cgroups concurrently dirtying disjoint -regions of the same inode, which is an unlikely use case and decided -to be unsupported. Note that as memory controller assigns page -ownership on the first use and doesn't update it until the page is -released, even if cgroup writeback strictly follows page ownership, -multiple cgroups dirtying overlapping areas wouldn't work as expected. -In general, write-sharing an inode across multiple cgroups is not well -supported. - -Filesystem support for cgroup writeback ---------------------------------------- - -A filesystem can make writeback IOs cgroup-aware by updating -address_space_operations->writepage[s]() to annotate bio's using the -following two functions. - -* wbc_init_bio(@wbc, @bio) - - Should be called for each bio carrying writeback data and associates - the bio with the inode's owner cgroup. Can be called anytime - between bio allocation and submission. - -* wbc_account_io(@wbc, @page, @bytes) - - Should be called for each data segment being written out. While - this function doesn't care exactly when it's called during the - writeback session, it's the easiest and most natural to call it as - data segments are added to a bio. - -With writeback bio's annotated, cgroup support can be enabled per -super_block by setting MS_CGROUPWB in ->s_flags. This allows for -selective disabling of cgroup writeback support which is helpful when -certain filesystem features, e.g. journaled data mode, are -incompatible. - -wbc_init_bio() binds the specified bio to its cgroup. Depending on -the configuration, the bio may be executed at a lower priority and if -the writeback session is holding shared resources, e.g. a journal -entry, may lead to priority inversion. There is no one easy solution -for the problem. Filesystems can try to work around specific problem -cases by skipping wbc_init_bio() or using bio_associate_blkcg() -directly. diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroup-legacy/cgroups.txt similarity index 98% rename from Documentation/cgroups/cgroups.txt rename to Documentation/cgroup-legacy/cgroups.txt index 2d984e20783be198586267633e65a842ae61559e..c6256ae9885b8ac7445f6d10066ee541ebb388b8 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroup-legacy/cgroups.txt @@ -578,15 +578,6 @@ is completely unused; @cgrp->parent is still valid. (Note - can also be called for a newly-created cgroup if an error occurs after this subsystem's create() method has been called for the new cgroup). -int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) -(cgroup_mutex held by caller) - -Called prior to moving a task into a cgroup; if the subsystem -returns an error, this will abort the attach operation. Used -to extend the permission checks - if all subsystems in a cgroup -return 0, the attach will be allowed to proceed, even if the -default permission check (root or same user) fails. - int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) (cgroup_mutex held by caller) diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroup-legacy/cpuacct.txt similarity index 100% rename from Documentation/cgroups/cpuacct.txt rename to Documentation/cgroup-legacy/cpuacct.txt diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroup-legacy/cpusets.txt similarity index 100% rename from Documentation/cgroups/cpusets.txt rename to Documentation/cgroup-legacy/cpusets.txt diff --git a/Documentation/cgroups/devices.txt b/Documentation/cgroup-legacy/devices.txt similarity index 100% rename from Documentation/cgroups/devices.txt rename to Documentation/cgroup-legacy/devices.txt diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroup-legacy/freezer-subsystem.txt similarity index 100% rename from Documentation/cgroups/freezer-subsystem.txt rename to Documentation/cgroup-legacy/freezer-subsystem.txt diff --git a/Documentation/cgroups/hugetlb.txt b/Documentation/cgroup-legacy/hugetlb.txt similarity index 100% rename from Documentation/cgroups/hugetlb.txt rename to Documentation/cgroup-legacy/hugetlb.txt diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroup-legacy/memcg_test.txt similarity index 100% rename from Documentation/cgroups/memcg_test.txt rename to Documentation/cgroup-legacy/memcg_test.txt diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroup-legacy/memory.txt similarity index 100% rename from Documentation/cgroups/memory.txt rename to Documentation/cgroup-legacy/memory.txt diff --git a/Documentation/cgroups/net_cls.txt b/Documentation/cgroup-legacy/net_cls.txt similarity index 100% rename from Documentation/cgroups/net_cls.txt rename to Documentation/cgroup-legacy/net_cls.txt diff --git a/Documentation/cgroups/net_prio.txt b/Documentation/cgroup-legacy/net_prio.txt similarity index 100% rename from Documentation/cgroups/net_prio.txt rename to Documentation/cgroup-legacy/net_prio.txt diff --git a/Documentation/cgroups/pids.txt b/Documentation/cgroup-legacy/pids.txt similarity index 100% rename from Documentation/cgroups/pids.txt rename to Documentation/cgroup-legacy/pids.txt diff --git a/Documentation/cgroup.txt b/Documentation/cgroup.txt new file mode 100644 index 0000000000000000000000000000000000000000..31d1f7bf12a19ee4a658461569b7285e3fc5df19 --- /dev/null +++ b/Documentation/cgroup.txt @@ -0,0 +1,1293 @@ + +Control Group v2 + +October, 2015 Tejun Heo + +This is the authoritative documentation on the design, interface and +conventions of cgroup v2. It describes all userland-visible aspects +of cgroup including core and specific controller behaviors. All +future changes must be reflected in this document. Documentation for +v1 is available under Documentation/cgroup-legacy/. + +CONTENTS + +1. Introduction + 1-1. Terminology + 1-2. What is cgroup? +2. Basic Operations + 2-1. Mounting + 2-2. Organizing Processes + 2-3. [Un]populated Notification + 2-4. Controlling Controllers + 2-4-1. Enabling and Disabling + 2-4-2. Top-down Constraint + 2-4-3. No Internal Process Constraint + 2-5. Delegation + 2-5-1. Model of Delegation + 2-5-2. Delegation Containment + 2-6. Guidelines + 2-6-1. Organize Once and Control + 2-6-2. Avoid Name Collisions +3. Resource Distribution Models + 3-1. Weights + 3-2. Limits + 3-3. Protections + 3-4. Allocations +4. Interface Files + 4-1. Format + 4-2. Conventions + 4-3. Core Interface Files +5. Controllers + 5-1. CPU + 5-1-1. CPU Interface Files + 5-2. Memory + 5-2-1. Memory Interface Files + 5-2-2. Usage Guidelines + 5-2-3. Memory Ownership + 5-3. IO + 5-3-1. IO Interface Files + 5-3-2. Writeback +P. Information on Kernel Programming + P-1. Filesystem Support for Writeback +D. Deprecated v1 Core Features +R. Issues with v1 and Rationales for v2 + R-1. Multiple Hierarchies + R-2. Thread Granularity + R-3. Competition Between Inner Nodes and Threads + R-4. Other Interface Issues + R-5. Controller Issues and Remedies + R-5-1. Memory + + +1. Introduction + +1-1. Terminology + +"cgroup" stands for "control group" and is never capitalized. The +singular form is used to designate the whole feature and also as a +qualifier as in "cgroup controllers". When explicitly referring to +multiple individual control groups, the plural form "cgroups" is used. + + +1-2. What is cgroup? + +cgroup is a mechanism to organize processes hierarchically and +distribute system resources along the hierarchy in a controlled and +configurable manner. + +cgroup is largely composed of two parts - the core and controllers. +cgroup core is primarily responsible for hierarchically organizing +processes. A cgroup controller is usually responsible for +distributing a specific type of system resource along the hierarchy +although there are utility controllers which serve purposes other than +resource distribution. + +cgroups form a tree structure and every process in the system belongs +to one and only one cgroup. All threads of a process belong to the +same cgroup. On creation, all processes are put in the cgroup that +the parent process belongs to at the time. A process can be migrated +to another cgroup. Migration of a process doesn't affect already +existing descendant processes. + +Following certain structural constraints, controllers may be enabled or +disabled selectively on a cgroup. All controller behaviors are +hierarchical - if a controller is enabled on a cgroup, it affects all +processes which belong to the cgroups consisting the inclusive +sub-hierarchy of the cgroup. When a controller is enabled on a nested +cgroup, it always restricts the resource distribution further. The +restrictions set closer to the root in the hierarchy can not be +overridden from further away. + + +2. Basic Operations + +2-1. Mounting + +Unlike v1, cgroup v2 has only single hierarchy. The cgroup v2 +hierarchy can be mounted with the following mount command. + + # mount -t cgroup2 none $MOUNT_POINT + +cgroup2 filesystem has the magic number 0x63677270 ("cgrp"). All +controllers which support v2 and are not bound to a v1 hierarchy are +automatically bound to the v2 hierarchy and show up at the root. +Controllers which are not in active use in the v2 hierarchy can be +bound to other hierarchies. This allows mixing v2 hierarchy with the +legacy v1 multiple hierarchies in a fully backward compatible way. + +A controller can be moved across hierarchies only after the controller +is no longer referenced in its current hierarchy. Because per-cgroup +controller states are destroyed asynchronously and controllers may +have lingering references, a controller may not show up immediately on +the v2 hierarchy after the final umount of the previous hierarchy. +Similarly, a controller should be fully disabled to be moved out of +the unified hierarchy and it may take some time for the disabled +controller to become available for other hierarchies; furthermore, due +to inter-controller dependencies, other controllers may need to be +disabled too. + +While useful for development and manual configurations, moving +controllers dynamically between the v2 and other hierarchies is +strongly discouraged for production use. It is recommended to decide +the hierarchies and controller associations before starting using the +controllers after system boot. + + +2-2. Organizing Processes + +Initially, only the root cgroup exists to which all processes belong. +A child cgroup can be created by creating a sub-directory. + + # mkdir $CGROUP_NAME + +A given cgroup may have multiple child cgroups forming a tree +structure. Each cgroup has a read-writable interface file +"cgroup.procs". When read, it lists the PIDs of all processes which +belong to the cgroup one-per-line. The PIDs are not ordered and the +same PID may show up more than once if the process got moved to +another cgroup and then back or the PID got recycled while reading. + +A process can be migrated into a cgroup by writing its PID to the +target cgroup's "cgroup.procs" file. Only one process can be migrated +on a single write(2) call. If a process is composed of multiple +threads, writing the PID of any thread migrates all threads of the +process. + +When a process forks a child process, the new process is born into the +cgroup that the forking process belongs to at the time of the +operation. After exit, a process stays associated with the cgroup +that it belonged to at the time of exit until it's reaped; however, a +zombie process does not appear in "cgroup.procs" and thus can't be +moved to another cgroup. + +A cgroup which doesn't have any children or live processes can be +destroyed by removing the directory. Note that a cgroup which doesn't +have any children and is associated only with zombie processes is +considered empty and can be removed. + + # rmdir $CGROUP_NAME + +"/proc/$PID/cgroup" lists a process's cgroup membership. If legacy +cgroup is in use in the system, this file may contain multiple lines, +one for each hierarchy. The entry for cgroup v2 is always in the +format "0::$PATH". + + # cat /proc/842/cgroup + ... + 0::/test-cgroup/test-cgroup-nested + +If the process becomes a zombie and the cgroup it was associated with +is removed subsequently, " (deleted)" is appended to the path. + + # cat /proc/842/cgroup + ... + 0::/test-cgroup/test-cgroup-nested (deleted) + + +2-3. [Un]populated Notification + +Each non-root cgroup has a "cgroup.events" file which contains +"populated" field indicating whether the cgroup's sub-hierarchy has +live processes in it. Its value is 0 if there is no live process in +the cgroup and its descendants; otherwise, 1. poll and [id]notify +events are triggered when the value changes. This can be used, for +example, to start a clean-up operation after all processes of a given +sub-hierarchy have exited. The populated state updates and +notifications are recursive. Consider the following sub-hierarchy +where the numbers in the parentheses represent the numbers of processes +in each cgroup. + + A(4) - B(0) - C(1) + \ D(0) + +A, B and C's "populated" fields would be 1 while D's 0. After the one +process in C exits, B and C's "populated" fields would flip to "0" and +file modified events will be generated on the "cgroup.events" files of +both cgroups. + + +2-4. Controlling Controllers + +2-4-1. Enabling and Disabling + +Each cgroup has a "cgroup.controllers" file which lists all +controllers available for the cgroup to enable. + + # cat cgroup.controllers + cpu io memory + +No controller is enabled by default. Controllers can be enabled and +disabled by writing to the "cgroup.subtree_control" file. + + # echo "+cpu +memory -io" > cgroup.subtree_control + +Only controllers which are listed in "cgroup.controllers" can be +enabled. When multiple operations are specified as above, either they +all succeed or fail. If multiple operations on the same controller +are specified, the last one is effective. + +Enabling a controller in a cgroup indicates that the distribution of +the target resource across its immediate children will be controlled. +Consider the following sub-hierarchy. The enabled controllers are +listed in parentheses. + + A(cpu,memory) - B(memory) - C() + \ D() + +As A has "cpu" and "memory" enabled, A will control the distribution +of CPU cycles and memory to its children, in this case, B. As B has +"memory" enabled but not "CPU", C and D will compete freely on CPU +cycles but their division of memory available to B will be controlled. + +As a controller regulates the distribution of the target resource to +the cgroup's children, enabling it creates the controller's interface +files in the child cgroups. In the above example, enabling "cpu" on B +would create the "cpu." prefixed controller interface files in C and +D. Likewise, disabling "memory" from B would remove the "memory." +prefixed controller interface files from C and D. This means that the +controller interface files - anything which doesn't start with +"cgroup." are owned by the parent rather than the cgroup itself. + + +2-4-2. Top-down Constraint + +Resources are distributed top-down and a cgroup can further distribute +a resource only if the resource has been distributed to it from the +parent. This means that all non-root "cgroup.subtree_control" files +can only contain controllers which are enabled in the parent's +"cgroup.subtree_control" file. A controller can be enabled only if +the parent has the controller enabled and a controller can't be +disabled if one or more children have it enabled. + + +2-4-3. No Internal Process Constraint + +Non-root cgroups can only distribute resources to their children when +they don't have any processes of their own. In other words, only +cgroups which don't contain any processes can have controllers enabled +in their "cgroup.subtree_control" files. + +This guarantees that, when a controller is looking at the part of the +hierarchy which has it enabled, processes are always only on the +leaves. This rules out situations where child cgroups compete against +internal processes of the parent. + +The root cgroup is exempt from this restriction. Root contains +processes and anonymous resource consumption which can't be associated +with any other cgroups and requires special treatment from most +controllers. How resource consumption in the root cgroup is governed +is up to each controller. + +Note that the restriction doesn't get in the way if there is no +enabled controller in the cgroup's "cgroup.subtree_control". This is +important as otherwise it wouldn't be possible to create children of a +populated cgroup. To control resource distribution of a cgroup, the +cgroup must create children and transfer all its processes to the +children before enabling controllers in its "cgroup.subtree_control" +file. + + +2-5. Delegation + +2-5-1. Model of Delegation + +A cgroup can be delegated to a less privileged user by granting write +access of the directory and its "cgroup.procs" file to the user. Note +that resource control interface files in a given directory control the +distribution of the parent's resources and thus must not be delegated +along with the directory. + +Once delegated, the user can build sub-hierarchy under the directory, +organize processes as it sees fit and further distribute the resources +it received from the parent. The limits and other settings of all +resource controllers are hierarchical and regardless of what happens +in the delegated sub-hierarchy, nothing can escape the resource +restrictions imposed by the parent. + +Currently, cgroup doesn't impose any restrictions on the number of +cgroups in or nesting depth of a delegated sub-hierarchy; however, +this may be limited explicitly in the future. + + +2-5-2. Delegation Containment + +A delegated sub-hierarchy is contained in the sense that processes +can't be moved into or out of the sub-hierarchy by the delegatee. For +a process with a non-root euid to migrate a target process into a +cgroup by writing its PID to the "cgroup.procs" file, the following +conditions must be met. + +- The writer's euid must match either uid or suid of the target process. + +- The writer must have write access to the "cgroup.procs" file. + +- The writer must have write access to the "cgroup.procs" file of the + common ancestor of the source and destination cgroups. + +The above three constraints ensure that while a delegatee may migrate +processes around freely in the delegated sub-hierarchy it can't pull +in from or push out to outside the sub-hierarchy. + +For an example, let's assume cgroups C0 and C1 have been delegated to +user U0 who created C00, C01 under C0 and C10 under C1 as follows and +all processes under C0 and C1 belong to U0. + + ~~~~~~~~~~~~~ - C0 - C00 + ~ cgroup ~ \ C01 + ~ hierarchy ~ + ~~~~~~~~~~~~~ - C1 - C10 + +Let's also say U0 wants to write the PID of a process which is +currently in C10 into "C00/cgroup.procs". U0 has write access to the +file and uid match on the process; however, the common ancestor of the +source cgroup C10 and the destination cgroup C00 is above the points +of delegation and U0 would not have write access to its "cgroup.procs" +files and thus the write will be denied with -EACCES. + + +2-6. Guidelines + +2-6-1. Organize Once and Control + +Migrating a process across cgroups is a relatively expensive operation +and stateful resources such as memory are not moved together with the +process. This is an explicit design decision as there often exist +inherent trade-offs between migration and various hot paths in terms +of synchronization cost. + +As such, migrating processes across cgroups frequently as a means to +apply different resource restrictions is discouraged. A workload +should be assigned to a cgroup according to the system's logical and +resource structure once on start-up. Dynamic adjustments to resource +distribution can be made by changing controller configuration through +the interface files. + + +2-6-2. Avoid Name Collisions + +Interface files for a cgroup and its children cgroups occupy the same +directory and it is possible to create children cgroups which collide +with interface files. + +All cgroup core interface files are prefixed with "cgroup." and each +controller's interface files are prefixed with the controller name and +a dot. A controller's name is composed of lower case alphabets and +'_'s but never begins with an '_' so it can be used as the prefix +character for collision avoidance. Also, interface file names won't +start or end with terms which are often used in categorizing workloads +such as job, service, slice, unit or workload. + +cgroup doesn't do anything to prevent name collisions and it's the +user's responsibility to avoid them. + + +3. Resource Distribution Models + +cgroup controllers implement several resource distribution schemes +depending on the resource type and expected use cases. This section +describes major schemes in use along with their expected behaviors. + + +3-1. Weights + +A parent's resource is distributed by adding up the weights of all +active children and giving each the fraction matching the ratio of its +weight against the sum. As only children which can make use of the +resource at the moment participate in the distribution, this is +work-conserving. Due to the dynamic nature, this model is usually +used for stateless resources. + +All weights are in the range [1, 10000] with the default at 100. This +allows symmetric multiplicative biases in both directions at fine +enough granularity while staying in the intuitive range. + +As long as the weight is in range, all configuration combinations are +valid and there is no reason to reject configuration changes or +process migrations. + +"cpu.weight" proportionally distributes CPU cycles to active children +and is an example of this type. + + +3-2. Limits + +A child can only consume upto the configured amount of the resource. +Limits can be over-committed - the sum of the limits of children can +exceed the amount of resource available to the parent. + +Limits are in the range [0, max] and defaults to "max", which is noop. + +As limits can be over-committed, all configuration combinations are +valid and there is no reason to reject configuration changes or +process migrations. + +"io.max" limits the maximum BPS and/or IOPS that a cgroup can consume +on an IO device and is an example of this type. + + +3-3. Protections + +A cgroup is protected to be allocated upto the configured amount of +the resource if the usages of all its ancestors are under their +protected levels. Protections can be hard guarantees or best effort +soft boundaries. Protections can also be over-committed in which case +only upto the amount available to the parent is protected among +children. + +Protections are in the range [0, max] and defaults to 0, which is +noop. + +As protections can be over-committed, all configuration combinations +are valid and there is no reason to reject configuration changes or +process migrations. + +"memory.low" implements best-effort memory protection and is an +example of this type. + + +3-4. Allocations + +A cgroup is exclusively allocated a certain amount of a finite +resource. Allocations can't be over-committed - the sum of the +allocations of children can not exceed the amount of resource +available to the parent. + +Allocations are in the range [0, max] and defaults to 0, which is no +resource. + +As allocations can't be over-committed, some configuration +combinations are invalid and should be rejected. Also, if the +resource is mandatory for execution of processes, process migrations +may be rejected. + +"cpu.rt.max" hard-allocates realtime slices and is an example of this +type. + + +4. Interface Files + +4-1. Format + +All interface files should be in one of the following formats whenever +possible. + + New-line separated values + (when only one value can be written at once) + + VAL0\n + VAL1\n + ... + + Space separated values + (when read-only or multiple values can be written at once) + + VAL0 VAL1 ...\n + + Flat keyed + + KEY0 VAL0\n + KEY1 VAL1\n + ... + + Nested keyed + + KEY0 SUB_KEY0=VAL00 SUB_KEY1=VAL01... + KEY1 SUB_KEY0=VAL10 SUB_KEY1=VAL11... + ... + +For a writable file, the format for writing should generally match +reading; however, controllers may allow omitting later fields or +implement restricted shortcuts for most common use cases. + +For both flat and nested keyed files, only the values for a single key +can be written at a time. For nested keyed files, the sub key pairs +may be specified in any order and not all pairs have to be specified. + + +4-2. Conventions + +- Settings for a single feature should be contained in a single file. + +- The root cgroup should be exempt from resource control and thus + shouldn't have resource control interface files. Also, + informational files on the root cgroup which end up showing global + information available elsewhere shouldn't exist. + +- If a controller implements weight based resource distribution, its + interface file should be named "weight" and have the range [1, + 10000] with 100 as the default. The values are chosen to allow + enough and symmetric bias in both directions while keeping it + intuitive (the default is 100%). + +- If a controller implements an absolute resource guarantee and/or + limit, the interface files should be named "min" and "max" + respectively. If a controller implements best effort resource + guarantee and/or limit, the interface files should be named "low" + and "high" respectively. + + In the above four control files, the special token "max" should be + used to represent upward infinity for both reading and writing. + +- If a setting has a configurable default value and keyed specific + overrides, the default entry should be keyed with "default" and + appear as the first entry in the file. + + The default value can be updated by writing either "default $VAL" or + "$VAL". + + When writing to update a specific override, "default" can be used as + the value to indicate removal of the override. Override entries + with "default" as the value must not appear when read. + + For example, a setting which is keyed by major:minor device numbers + with integer values may look like the following. + + # cat cgroup-example-interface-file + default 150 + 8:0 300 + + The default value can be updated by + + # echo 125 > cgroup-example-interface-file + + or + + # echo "default 125" > cgroup-example-interface-file + + An override can be set by + + # echo "8:16 170" > cgroup-example-interface-file + + and cleared by + + # echo "8:0 default" > cgroup-example-interface-file + # cat cgroup-example-interface-file + default 125 + 8:16 170 + +- For events which are not very high frequency, an interface file + "events" should be created which lists event key value pairs. + Whenever a notifiable event happens, file modified event should be + generated on the file. + + +4-3. Core Interface Files + +All cgroup core files are prefixed with "cgroup." + + cgroup.procs + + A read-write new-line separated values file which exists on + all cgroups. + + When read, it lists the PIDs of all processes which belong to + the cgroup one-per-line. The PIDs are not ordered and the + same PID may show up more than once if the process got moved + to another cgroup and then back or the PID got recycled while + reading. + + A PID can be written to migrate the process associated with + the PID to the cgroup. The writer should match all of the + following conditions. + + - Its euid is either root or must match either uid or suid of + the target process. + + - It must have write access to the "cgroup.procs" file. + + - It must have write access to the "cgroup.procs" file of the + common ancestor of the source and destination cgroups. + + When delegating a sub-hierarchy, write access to this file + should be granted along with the containing directory. + + cgroup.controllers + + A read-only space separated values file which exists on all + cgroups. + + It shows space separated list of all controllers available to + the cgroup. The controllers are not ordered. + + cgroup.subtree_control + + A read-write space separated values file which exists on all + cgroups. Starts out empty. + + When read, it shows space separated list of the controllers + which are enabled to control resource distribution from the + cgroup to its children. + + Space separated list of controllers prefixed with '+' or '-' + can be written to enable or disable controllers. A controller + name prefixed with '+' enables the controller and '-' + disables. If a controller appears more than once on the list, + the last one is effective. When multiple enable and disable + operations are specified, either all succeed or all fail. + + cgroup.events + + A read-only flat-keyed file which exists on non-root cgroups. + The following entries are defined. Unless specified + otherwise, a value change in this file generates a file + modified event. + + populated + + 1 if the cgroup or its descendants contains any live + processes; otherwise, 0. + + +5. Controllers + +5-1. CPU + +[NOTE: The interface for the cpu controller hasn't been merged yet] + +The "cpu" controllers regulates distribution of CPU cycles. This +controller implements weight and absolute bandwidth limit models for +normal scheduling policy and absolute bandwidth allocation model for +realtime scheduling policy. + + +5-1-1. CPU Interface Files + +All time durations are in microseconds. + + cpu.stat + + A read-only flat-keyed file which exists on non-root cgroups. + + It reports the following six stats. + + usage_usec + user_usec + system_usec + nr_periods + nr_throttled + throttled_usec + + cpu.weight + + A read-write single value file which exists on non-root + cgroups. The default is "100". + + The weight in the range [1, 10000]. + + cpu.max + + A read-write two value file which exists on non-root cgroups. + The default is "max 100000". + + The maximum bandwidth limit. It's in the following format. + + $MAX $PERIOD + + which indicates that the group may consume upto $MAX in each + $PERIOD duration. "max" for $MAX indicates no limit. If only + one number is written, $MAX is updated. + + cpu.rt.max + + [NOTE: The semantics of this file is still under discussion and the + interface hasn't been merged yet] + + A read-write two value file which exists on all cgroups. + The default is "0 100000". + + The maximum realtime runtime allocation. Over-committing + configurations are disallowed and process migrations are + rejected if not enough bandwidth is available. It's in the + following format. + + $MAX $PERIOD + + which indicates that the group may consume upto $MAX in each + $PERIOD duration. If only one number is written, $MAX is + updated. + + +5-2. Memory + +The "memory" controller regulates distribution of memory. Memory is +stateful and implements both limit and protection models. Due to the +intertwining between memory usage and reclaim pressure and the +stateful nature of memory, the distribution model is relatively +complex. + +While not completely water-tight, all major memory usages by a given +cgroup are tracked so that the total memory consumption can be +accounted and controlled to a reasonable extent. Currently, the +following types of memory usages are tracked. + +- Userland memory - page cache and anonymous memory. + +- Kernel data structures such as dentries and inodes. + +- TCP socket buffers. + +The above list may expand in the future for better coverage. + + +5-2-1. Memory Interface Files + +All memory amounts are in bytes. If a value which is not aligned to +PAGE_SIZE is written, the value may be rounded up to the closest +PAGE_SIZE multiple when read back. + + memory.current + + A read-only single value file which exists on non-root + cgroups. + + The total amount of memory currently being used by the cgroup + and its descendants. + + memory.low + + A read-write single value file which exists on non-root + cgroups. The default is "0". + + Best-effort memory protection. If the memory usages of a + cgroup and all its ancestors are below their low boundaries, + the cgroup's memory won't be reclaimed unless memory can be + reclaimed from unprotected cgroups. + + Putting more memory than generally available under this + protection is discouraged. + + memory.high + + A read-write single value file which exists on non-root + cgroups. The default is "max". + + Memory usage throttle limit. This is the main mechanism to + control memory usage of a cgroup. If a cgroup's usage goes + over the high boundary, the processes of the cgroup are + throttled and put under heavy reclaim pressure. + + Going over the high limit never invokes the OOM killer and + under extreme conditions the limit may be breached. + + memory.max + + A read-write single value file which exists on non-root + cgroups. The default is "max". + + Memory usage hard limit. This is the final protection + mechanism. If a cgroup's memory usage reaches this limit and + can't be reduced, the OOM killer is invoked in the cgroup. + Under certain circumstances, the usage may go over the limit + temporarily. + + This is the ultimate protection mechanism. As long as the + high limit is used and monitored properly, this limit's + utility is limited to providing the final safety net. + + memory.events + + A read-only flat-keyed file which exists on non-root cgroups. + The following entries are defined. Unless specified + otherwise, a value change in this file generates a file + modified event. + + low + + The number of times the cgroup is reclaimed due to + high memory pressure even though its usage is under + the low boundary. This usually indicates that the low + boundary is over-committed. + + high + + The number of times processes of the cgroup are + throttled and routed to perform direct memory reclaim + because the high memory boundary was exceeded. For a + cgroup whose memory usage is capped by the high limit + rather than global memory pressure, this event's + occurrences are expected. + + max + + The number of times the cgroup's memory usage was + about to go over the max boundary. If direct reclaim + fails to bring it down, the OOM killer is invoked. + + oom + + The number of times the OOM killer has been invoked in + the cgroup. This may not exactly match the number of + processes killed but should generally be close. + + +5-2-2. General Usage + +"memory.high" is the main mechanism to control memory usage. +Over-committing on high limit (sum of high limits > available memory) +and letting global memory pressure to distribute memory according to +usage is a viable strategy. + +Because breach of the high limit doesn't trigger the OOM killer but +throttles the offending cgroup, a management agent has ample +opportunities to monitor and take appropriate actions such as granting +more memory or terminating the workload. + +Determining whether a cgroup has enough memory is not trivial as +memory usage doesn't indicate whether the workload can benefit from +more memory. For example, a workload which writes data received from +network to a file can use all available memory but can also operate as +performant with a small amount of memory. A measure of memory +pressure - how much the workload is being impacted due to lack of +memory - is necessary to determine whether a workload needs more +memory; unfortunately, memory pressure monitoring mechanism isn't +implemented yet. + + +5-2-3. Memory Ownership + +A memory area is charged to the cgroup which instantiated it and stays +charged to the cgroup until the area is released. Migrating a process +to a different cgroup doesn't move the memory usages that it +instantiated while in the previous cgroup to the new cgroup. + +A memory area may be used by processes belonging to different cgroups. +To which cgroup the area will be charged is in-deterministic; however, +over time, the memory area is likely to end up in a cgroup which has +enough memory allowance to avoid high reclaim pressure. + +If a cgroup sweeps a considerable amount of memory which is expected +to be accessed repeatedly by other cgroups, it may make sense to use +POSIX_FADV_DONTNEED to relinquish the ownership of memory areas +belonging to the affected files to ensure correct memory ownership. + + +5-3. IO + +The "io" controller regulates the distribution of IO resources. This +controller implements both weight based and absolute bandwidth or IOPS +limit distribution; however, weight based distribution is available +only if cfq-iosched is in use and neither scheme is available for +blk-mq devices. + + +5-3-1. IO Interface Files + + io.stat + + A read-only nested-keyed file which exists on non-root + cgroups. + + Lines are keyed by $MAJ:$MIN device numbers and not ordered. + The following nested keys are defined. + + rbytes Bytes read + wbytes Bytes written + rios Number of read IOs + wios Number of write IOs + + An example read output follows. + + 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 + 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 + + io.weight + + A read-write flat-keyed file which exists on non-root cgroups. + The default is "default 100". + + The first line is the default weight applied to devices + without specific override. The rest are overrides keyed by + $MAJ:$MIN device numbers and not ordered. The weights are in + the range [1, 10000] and specifies the relative amount IO time + the cgroup can use in relation to its siblings. + + The default weight can be updated by writing either "default + $WEIGHT" or simply "$WEIGHT". Overrides can be set by writing + "$MAJ:$MIN $WEIGHT" and unset by writing "$MAJ:$MIN default". + + An example read output follows. + + default 100 + 8:16 200 + 8:0 50 + + io.max + + A read-write nested-keyed file which exists on non-root + cgroups. + + BPS and IOPS based IO limit. Lines are keyed by $MAJ:$MIN + device numbers and not ordered. The following nested keys are + defined. + + rbps Max read bytes per second + wbps Max write bytes per second + riops Max read IO operations per second + wiops Max write IO operations per second + + When writing, any number of nested key-value pairs can be + specified in any order. "max" can be specified as the value + to remove a specific limit. If the same key is specified + multiple times, the outcome is undefined. + + BPS and IOPS are measured in each IO direction and IOs are + delayed if limit is reached. Temporary bursts are allowed. + + Setting read limit at 2M BPS and write at 120 IOPS for 8:16. + + echo "8:16 rbps=2097152 wiops=120" > io.max + + Reading returns the following. + + 8:16 rbps=2097152 wbps=max riops=max wiops=120 + + Write IOPS limit can be removed by writing the following. + + echo "8:16 wiops=max" > io.max + + Reading now returns the following. + + 8:16 rbps=2097152 wbps=max riops=max wiops=max + + +5-3-2. Writeback + +Page cache is dirtied through buffered writes and shared mmaps and +written asynchronously to the backing filesystem by the writeback +mechanism. Writeback sits between the memory and IO domains and +regulates the proportion of dirty memory by balancing dirtying and +write IOs. + +The io controller, in conjunction with the memory controller, +implements control of page cache writeback IOs. The memory controller +defines the memory domain that dirty memory ratio is calculated and +maintained for and the io controller defines the io domain which +writes out dirty pages for the memory domain. Both system-wide and +per-cgroup dirty memory states are examined and the more restrictive +of the two is enforced. + +cgroup writeback requires explicit support from the underlying +filesystem. Currently, cgroup writeback is implemented on ext2, ext4 +and btrfs. On other filesystems, all writeback IOs are attributed to +the root cgroup. + +There are inherent differences in memory and writeback management +which affects how cgroup ownership is tracked. Memory is tracked per +page while writeback per inode. For the purpose of writeback, an +inode is assigned to a cgroup and all IO requests to write dirty pages +from the inode are attributed to that cgroup. + +As cgroup ownership for memory is tracked per page, there can be pages +which are associated with different cgroups than the one the inode is +associated with. These are called foreign pages. The writeback +constantly keeps track of foreign pages and, if a particular foreign +cgroup becomes the majority over a certain period of time, switches +the ownership of the inode to that cgroup. + +While this model is enough for most use cases where a given inode is +mostly dirtied by a single cgroup even when the main writing cgroup +changes over time, use cases where multiple cgroups write to a single +inode simultaneously are not supported well. In such circumstances, a +significant portion of IOs are likely to be attributed incorrectly. +As memory controller assigns page ownership on the first use and +doesn't update it until the page is released, even if writeback +strictly follows page ownership, multiple cgroups dirtying overlapping +areas wouldn't work as expected. It's recommended to avoid such usage +patterns. + +The sysctl knobs which affect writeback behavior are applied to cgroup +writeback as follows. + + vm.dirty_background_ratio + vm.dirty_ratio + + These ratios apply the same to cgroup writeback with the + amount of available memory capped by limits imposed by the + memory controller and system-wide clean memory. + + vm.dirty_background_bytes + vm.dirty_bytes + + For cgroup writeback, this is calculated into ratio against + total available memory and applied the same way as + vm.dirty[_background]_ratio. + + +P. Information on Kernel Programming + +This section contains kernel programming information in the areas +where interacting with cgroup is necessary. cgroup core and +controllers are not covered. + + +P-1. Filesystem Support for Writeback + +A filesystem can support cgroup writeback by updating +address_space_operations->writepage[s]() to annotate bio's using the +following two functions. + + wbc_init_bio(@wbc, @bio) + + Should be called for each bio carrying writeback data and + associates the bio with the inode's owner cgroup. Can be + called anytime between bio allocation and submission. + + wbc_account_io(@wbc, @page, @bytes) + + Should be called for each data segment being written out. + While this function doesn't care exactly when it's called + during the writeback session, it's the easiest and most + natural to call it as data segments are added to a bio. + +With writeback bio's annotated, cgroup support can be enabled per +super_block by setting SB_I_CGROUPWB in ->s_iflags. This allows for +selective disabling of cgroup writeback support which is helpful when +certain filesystem features, e.g. journaled data mode, are +incompatible. + +wbc_init_bio() binds the specified bio to its cgroup. Depending on +the configuration, the bio may be executed at a lower priority and if +the writeback session is holding shared resources, e.g. a journal +entry, may lead to priority inversion. There is no one easy solution +for the problem. Filesystems can try to work around specific problem +cases by skipping wbc_init_bio() or using bio_associate_blkcg() +directly. + + +D. Deprecated v1 Core Features + +- Multiple hierarchies including named ones are not supported. + +- All mount options and remounting are not supported. + +- The "tasks" file is removed and "cgroup.procs" is not sorted. + +- "cgroup.clone_children" is removed. + +- /proc/cgroups is meaningless for v2. Use "cgroup.controllers" file + at the root instead. + + +R. Issues with v1 and Rationales for v2 + +R-1. Multiple Hierarchies + +cgroup v1 allowed an arbitrary number of hierarchies and each +hierarchy could host any number of controllers. While this seemed to +provide a high level of flexibility, it wasn't useful in practice. + +For example, as there is only one instance of each controller, utility +type controllers such as freezer which can be useful in all +hierarchies could only be used in one. The issue is exacerbated by +the fact that controllers couldn't be moved to another hierarchy once +hierarchies were populated. Another issue was that all controllers +bound to a hierarchy were forced to have exactly the same view of the +hierarchy. It wasn't possible to vary the granularity depending on +the specific controller. + +In practice, these issues heavily limited which controllers could be +put on the same hierarchy and most configurations resorted to putting +each controller on its own hierarchy. Only closely related ones, such +as the cpu and cpuacct controllers, made sense to be put on the same +hierarchy. This often meant that userland ended up managing multiple +similar hierarchies repeating the same steps on each hierarchy +whenever a hierarchy management operation was necessary. + +Furthermore, support for multiple hierarchies came at a steep cost. +It greatly complicated cgroup core implementation but more importantly +the support for multiple hierarchies restricted how cgroup could be +used in general and what controllers was able to do. + +There was no limit on how many hierarchies there might be, which meant +that a thread's cgroup membership couldn't be described in finite +length. The key might contain any number of entries and was unlimited +in length, which made it highly awkward to manipulate and led to +addition of controllers which existed only to identify membership, +which in turn exacerbated the original problem of proliferating number +of hierarchies. + +Also, as a controller couldn't have any expectation regarding the +topologies of hierarchies other controllers might be on, each +controller had to assume that all other controllers were attached to +completely orthogonal hierarchies. This made it impossible, or at +least very cumbersome, for controllers to cooperate with each other. + +In most use cases, putting controllers on hierarchies which are +completely orthogonal to each other isn't necessary. What usually is +called for is the ability to have differing levels of granularity +depending on the specific controller. In other words, hierarchy may +be collapsed from leaf towards root when viewed from specific +controllers. For example, a given configuration might not care about +how memory is distributed beyond a certain level while still wanting +to control how CPU cycles are distributed. + + +R-2. Thread Granularity + +cgroup v1 allowed threads of a process to belong to different cgroups. +This didn't make sense for some controllers and those controllers +ended up implementing different ways to ignore such situations but +much more importantly it blurred the line between API exposed to +individual applications and system management interface. + +Generally, in-process knowledge is available only to the process +itself; thus, unlike service-level organization of processes, +categorizing threads of a process requires active participation from +the application which owns the target process. + +cgroup v1 had an ambiguously defined delegation model which got abused +in combination with thread granularity. cgroups were delegated to +individual applications so that they can create and manage their own +sub-hierarchies and control resource distributions along them. This +effectively raised cgroup to the status of a syscall-like API exposed +to lay programs. + +First of all, cgroup has a fundamentally inadequate interface to be +exposed this way. For a process to access its own knobs, it has to +extract the path on the target hierarchy from /proc/self/cgroup, +construct the path by appending the name of the knob to the path, open +and then read and/or write to it. This is not only extremely clunky +and unusual but also inherently racy. There is no conventional way to +define transaction across the required steps and nothing can guarantee +that the process would actually be operating on its own sub-hierarchy. + +cgroup controllers implemented a number of knobs which would never be +accepted as public APIs because they were just adding control knobs to +system-management pseudo filesystem. cgroup ended up with interface +knobs which were not properly abstracted or refined and directly +revealed kernel internal details. These knobs got exposed to +individual applications through the ill-defined delegation mechanism +effectively abusing cgroup as a shortcut to implementing public APIs +without going through the required scrutiny. + +This was painful for both userland and kernel. Userland ended up with +misbehaving and poorly abstracted interfaces and kernel exposing and +locked into constructs inadvertently. + + +R-3. Competition Between Inner Nodes and Threads + +cgroup v1 allowed threads to be in any cgroups which created an +interesting problem where threads belonging to a parent cgroup and its +children cgroups competed for resources. This was nasty as two +different types of entities competed and there was no obvious way to +settle it. Different controllers did different things. + +The cpu controller considered threads and cgroups as equivalents and +mapped nice levels to cgroup weights. This worked for some cases but +fell flat when children wanted to be allocated specific ratios of CPU +cycles and the number of internal threads fluctuated - the ratios +constantly changed as the number of competing entities fluctuated. +There also were other issues. The mapping from nice level to weight +wasn't obvious or universal, and there were various other knobs which +simply weren't available for threads. + +The io controller implicitly created a hidden leaf node for each +cgroup to host the threads. The hidden leaf had its own copies of all +the knobs with "leaf_" prefixed. While this allowed equivalent +control over internal threads, it was with serious drawbacks. It +always added an extra layer of nesting which wouldn't be necessary +otherwise, made the interface messy and significantly complicated the +implementation. + +The memory controller didn't have a way to control what happened +between internal tasks and child cgroups and the behavior was not +clearly defined. There were attempts to add ad-hoc behaviors and +knobs to tailor the behavior to specific workloads which would have +led to problems extremely difficult to resolve in the long term. + +Multiple controllers struggled with internal tasks and came up with +different ways to deal with it; unfortunately, all the approaches were +severely flawed and, furthermore, the widely different behaviors +made cgroup as a whole highly inconsistent. + +This clearly is a problem which needs to be addressed from cgroup core +in a uniform way. + + +R-4. Other Interface Issues + +cgroup v1 grew without oversight and developed a large number of +idiosyncrasies and inconsistencies. One issue on the cgroup core side +was how an empty cgroup was notified - a userland helper binary was +forked and executed for each event. The event delivery wasn't +recursive or delegatable. The limitations of the mechanism also led +to in-kernel event delivery filtering mechanism further complicating +the interface. + +Controller interfaces were problematic too. An extreme example is +controllers completely ignoring hierarchical organization and treating +all cgroups as if they were all located directly under the root +cgroup. Some controllers exposed a large amount of inconsistent +implementation details to userland. + +There also was no consistency across controllers. When a new cgroup +was created, some controllers defaulted to not imposing extra +restrictions while others disallowed any resource usage until +explicitly configured. Configuration knobs for the same type of +control used widely differing naming schemes and formats. Statistics +and information knobs were named arbitrarily and used different +formats and units even in the same controller. + +cgroup v2 establishes common conventions where appropriate and updates +controllers so that they expose minimal and consistent interfaces. + + +R-5. Controller Issues and Remedies + +R-5-1. Memory + +The original lower boundary, the soft limit, is defined as a limit +that is per default unset. As a result, the set of cgroups that +global reclaim prefers is opt-in, rather than opt-out. The costs for +optimizing these mostly negative lookups are so high that the +implementation, despite its enormous size, does not even provide the +basic desirable behavior. First off, the soft limit has no +hierarchical meaning. All configured groups are organized in a global +rbtree and treated like equal peers, regardless where they are located +in the hierarchy. This makes subtree delegation impossible. Second, +the soft limit reclaim pass is so aggressive that it not just +introduces high allocation latencies into the system, but also impacts +system performance due to overreclaim, to the point where the feature +becomes self-defeating. + +The memory.low boundary on the other hand is a top-down allocated +reserve. A cgroup enjoys reclaim protection when it and all its +ancestors are below their low boundaries, which makes delegation of +subtrees possible. Secondly, new cgroups have no reserve per default +and in the common case most cgroups are eligible for the preferred +reclaim pass. This allows the new low boundary to be efficiently +implemented with just a minor addition to the generic reclaim code, +without the need for out-of-band data structures and reclaim passes. +Because the generic reclaim code considers all cgroups except for the +ones running low in the preferred first reclaim pass, overreclaim of +individual groups is eliminated as well, resulting in much better +overall workload performance. + +The original high boundary, the hard limit, is defined as a strict +limit that can not budge, even if the OOM killer has to be called. +But this generally goes against the goal of making the most out of the +available memory. The memory consumption of workloads varies during +runtime, and that requires users to overcommit. But doing that with a +strict upper limit requires either a fairly accurate prediction of the +working set size or adding slack to the limit. Since working set size +estimation is hard and error prone, and getting it wrong results in +OOM kills, most users tend to err on the side of a looser limit and +end up wasting precious resources. + +The memory.high boundary on the other hand can be set much more +conservatively. When hit, it throttles allocations by forcing them +into direct reclaim to work off the excess, but it never invokes the +OOM killer. As a result, a high boundary that is chosen too +aggressively will not terminate the processes, but instead it will +lead to gradual performance degradation. The user can monitor this +and make corrections until the minimal memory footprint that still +gives acceptable performance is found. + +In extreme cases, with many concurrent allocations and a complete +breakdown of reclaim progress within the group, the high boundary can +be exceeded. But even then it's mostly better to satisfy the +allocation from the slack available in other groups or the rest of the +system than killing the group. Otherwise, memory.max is there to +limit this type of spillover and ultimately contain buggy or even +malicious applications. diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt deleted file mode 100644 index 781b1d475bcfc9666d0d3dad80832c74adb48b22..0000000000000000000000000000000000000000 --- a/Documentation/cgroups/unified-hierarchy.txt +++ /dev/null @@ -1,647 +0,0 @@ - -Cgroup unified hierarchy - -April, 2014 Tejun Heo - -This document describes the changes made by unified hierarchy and -their rationales. It will eventually be merged into the main cgroup -documentation. - -CONTENTS - -1. Background -2. Basic Operation - 2-1. Mounting - 2-2. cgroup.subtree_control - 2-3. cgroup.controllers -3. Structural Constraints - 3-1. Top-down - 3-2. No internal tasks -4. Delegation - 4-1. Model of delegation - 4-2. Common ancestor rule -5. Other Changes - 5-1. [Un]populated Notification - 5-2. Other Core Changes - 5-3. Controller File Conventions - 5-3-1. Format - 5-3-2. Control Knobs - 5-4. Per-Controller Changes - 5-4-1. io - 5-4-2. cpuset - 5-4-3. memory -6. Planned Changes - 6-1. CAP for resource control - - -1. Background - -cgroup allows an arbitrary number of hierarchies and each hierarchy -can host any number of controllers. While this seems to provide a -high level of flexibility, it isn't quite useful in practice. - -For example, as there is only one instance of each controller, utility -type controllers such as freezer which can be useful in all -hierarchies can only be used in one. The issue is exacerbated by the -fact that controllers can't be moved around once hierarchies are -populated. Another issue is that all controllers bound to a hierarchy -are forced to have exactly the same view of the hierarchy. It isn't -possible to vary the granularity depending on the specific controller. - -In practice, these issues heavily limit which controllers can be put -on the same hierarchy and most configurations resort to putting each -controller on its own hierarchy. Only closely related ones, such as -the cpu and cpuacct controllers, make sense to put on the same -hierarchy. This often means that userland ends up managing multiple -similar hierarchies repeating the same steps on each hierarchy -whenever a hierarchy management operation is necessary. - -Unfortunately, support for multiple hierarchies comes at a steep cost. -Internal implementation in cgroup core proper is dazzlingly -complicated but more importantly the support for multiple hierarchies -restricts how cgroup is used in general and what controllers can do. - -There's no limit on how many hierarchies there may be, which means -that a task's cgroup membership can't be described in finite length. -The key may contain any varying number of entries and is unlimited in -length, which makes it highly awkward to handle and leads to addition -of controllers which exist only to identify membership, which in turn -exacerbates the original problem. - -Also, as a controller can't have any expectation regarding what shape -of hierarchies other controllers would be on, each controller has to -assume that all other controllers are operating on completely -orthogonal hierarchies. This makes it impossible, or at least very -cumbersome, for controllers to cooperate with each other. - -In most use cases, putting controllers on hierarchies which are -completely orthogonal to each other isn't necessary. What usually is -called for is the ability to have differing levels of granularity -depending on the specific controller. In other words, hierarchy may -be collapsed from leaf towards root when viewed from specific -controllers. For example, a given configuration might not care about -how memory is distributed beyond a certain level while still wanting -to control how CPU cycles are distributed. - -Unified hierarchy is the next version of cgroup interface. It aims to -address the aforementioned issues by having more structure while -retaining enough flexibility for most use cases. Various other -general and controller-specific interface issues are also addressed in -the process. - - -2. Basic Operation - -2-1. Mounting - -Currently, unified hierarchy can be mounted with the following mount -command. Note that this is still under development and scheduled to -change soon. - - mount -t cgroup -o __DEVEL__sane_behavior cgroup $MOUNT_POINT - -All controllers which support the unified hierarchy and are not bound -to other hierarchies are automatically bound to unified hierarchy and -show up at the root of it. Controllers which are enabled only in the -root of unified hierarchy can be bound to other hierarchies. This -allows mixing unified hierarchy with the traditional multiple -hierarchies in a fully backward compatible way. - -A controller can be moved across hierarchies only after the controller -is no longer referenced in its current hierarchy. Because per-cgroup -controller states are destroyed asynchronously and controllers may -have lingering references, a controller may not show up immediately on -the unified hierarchy after the final umount of the previous -hierarchy. Similarly, a controller should be fully disabled to be -moved out of the unified hierarchy and it may take some time for the -disabled controller to become available for other hierarchies; -furthermore, due to dependencies among controllers, other controllers -may need to be disabled too. - -While useful for development and manual configurations, dynamically -moving controllers between the unified and other hierarchies is -strongly discouraged for production use. It is recommended to decide -the hierarchies and controller associations before starting using the -controllers. - - -2-2. cgroup.subtree_control - -All cgroups on unified hierarchy have a "cgroup.subtree_control" file -which governs which controllers are enabled on the children of the -cgroup. Let's assume a hierarchy like the following. - - root - A - B - C - \ D - -root's "cgroup.subtree_control" file determines which controllers are -enabled on A. A's on B. B's on C and D. This coincides with the -fact that controllers on the immediate sub-level are used to -distribute the resources of the parent. In fact, it's natural to -assume that resource control knobs of a child belong to its parent. -Enabling a controller in a "cgroup.subtree_control" file declares that -distribution of the respective resources of the cgroup will be -controlled. Note that this means that controller enable states are -shared among siblings. - -When read, the file contains a space-separated list of currently -enabled controllers. A write to the file should contain a -space-separated list of controllers with '+' or '-' prefixed (without -the quotes). Controllers prefixed with '+' are enabled and '-' -disabled. If a controller is listed multiple times, the last entry -wins. The specific operations are executed atomically - either all -succeed or fail. - - -2-3. cgroup.controllers - -Read-only "cgroup.controllers" file contains a space-separated list of -controllers which can be enabled in the cgroup's -"cgroup.subtree_control" file. - -In the root cgroup, this lists controllers which are not bound to -other hierarchies and the content changes as controllers are bound to -and unbound from other hierarchies. - -In non-root cgroups, the content of this file equals that of the -parent's "cgroup.subtree_control" file as only controllers enabled -from the parent can be used in its children. - - -3. Structural Constraints - -3-1. Top-down - -As it doesn't make sense to nest control of an uncontrolled resource, -all non-root "cgroup.subtree_control" files can only contain -controllers which are enabled in the parent's "cgroup.subtree_control" -file. A controller can be enabled only if the parent has the -controller enabled and a controller can't be disabled if one or more -children have it enabled. - - -3-2. No internal tasks - -One long-standing issue that cgroup faces is the competition between -tasks belonging to the parent cgroup and its children cgroups. This -is inherently nasty as two different types of entities compete and -there is no agreed-upon obvious way to handle it. Different -controllers are doing different things. - -The cpu controller considers tasks and cgroups as equivalents and maps -nice levels to cgroup weights. This works for some cases but falls -flat when children should be allocated specific ratios of CPU cycles -and the number of internal tasks fluctuates - the ratios constantly -change as the number of competing entities fluctuates. There also are -other issues. The mapping from nice level to weight isn't obvious or -universal, and there are various other knobs which simply aren't -available for tasks. - -The io controller implicitly creates a hidden leaf node for each -cgroup to host the tasks. The hidden leaf has its own copies of all -the knobs with "leaf_" prefixed. While this allows equivalent control -over internal tasks, it's with serious drawbacks. It always adds an -extra layer of nesting which may not be necessary, makes the interface -messy and significantly complicates the implementation. - -The memory controller currently doesn't have a way to control what -happens between internal tasks and child cgroups and the behavior is -not clearly defined. There have been attempts to add ad-hoc behaviors -and knobs to tailor the behavior to specific workloads. Continuing -this direction will lead to problems which will be extremely difficult -to resolve in the long term. - -Multiple controllers struggle with internal tasks and came up with -different ways to deal with it; unfortunately, all the approaches in -use now are severely flawed and, furthermore, the widely different -behaviors make cgroup as whole highly inconsistent. - -It is clear that this is something which needs to be addressed from -cgroup core proper in a uniform way so that controllers don't need to -worry about it and cgroup as a whole shows a consistent and logical -behavior. To achieve that, unified hierarchy enforces the following -structural constraint: - - Except for the root, only cgroups which don't contain any task may - have controllers enabled in their "cgroup.subtree_control" files. - -Combined with other properties, this guarantees that, when a -controller is looking at the part of the hierarchy which has it -enabled, tasks are always only on the leaves. This rules out -situations where child cgroups compete against internal tasks of the -parent. - -There are two things to note. Firstly, the root cgroup is exempt from -the restriction. Root contains tasks and anonymous resource -consumption which can't be associated with any other cgroup and -requires special treatment from most controllers. How resource -consumption in the root cgroup is governed is up to each controller. - -Secondly, the restriction doesn't take effect if there is no enabled -controller in the cgroup's "cgroup.subtree_control" file. This is -important as otherwise it wouldn't be possible to create children of a -populated cgroup. To control resource distribution of a cgroup, the -cgroup must create children and transfer all its tasks to the children -before enabling controllers in its "cgroup.subtree_control" file. - - -4. Delegation - -4-1. Model of delegation - -A cgroup can be delegated to a less privileged user by granting write -access of the directory and its "cgroup.procs" file to the user. Note -that the resource control knobs in a given directory concern the -resources of the parent and thus must not be delegated along with the -directory. - -Once delegated, the user can build sub-hierarchy under the directory, -organize processes as it sees fit and further distribute the resources -it got from the parent. The limits and other settings of all resource -controllers are hierarchical and regardless of what happens in the -delegated sub-hierarchy, nothing can escape the resource restrictions -imposed by the parent. - -Currently, cgroup doesn't impose any restrictions on the number of -cgroups in or nesting depth of a delegated sub-hierarchy; however, -this may in the future be limited explicitly. - - -4-2. Common ancestor rule - -On the unified hierarchy, to write to a "cgroup.procs" file, in -addition to the usual write permission to the file and uid match, the -writer must also have write access to the "cgroup.procs" file of the -common ancestor of the source and destination cgroups. This prevents -delegatees from smuggling processes across disjoint sub-hierarchies. - -Let's say cgroups C0 and C1 have been delegated to user U0 who created -C00, C01 under C0 and C10 under C1 as follows. - - ~~~~~~~~~~~~~ - C0 - C00 - ~ cgroup ~ \ C01 - ~ hierarchy ~ - ~~~~~~~~~~~~~ - C1 - C10 - -C0 and C1 are separate entities in terms of resource distribution -regardless of their relative positions in the hierarchy. The -resources the processes under C0 are entitled to are controlled by -C0's ancestors and may be completely different from C1. It's clear -that the intention of delegating C0 to U0 is allowing U0 to organize -the processes under C0 and further control the distribution of C0's -resources. - -On traditional hierarchies, if a task has write access to "tasks" or -"cgroup.procs" file of a cgroup and its uid agrees with the target, it -can move the target to the cgroup. In the above example, U0 will not -only be able to move processes in each sub-hierarchy but also across -the two sub-hierarchies, effectively allowing it to violate the -organizational and resource restrictions implied by the hierarchical -structure above C0 and C1. - -On the unified hierarchy, let's say U0 wants to write the pid of a -process which has a matching uid and is currently in C10 into -"C00/cgroup.procs". U0 obviously has write access to the file and -migration permission on the process; however, the common ancestor of -the source cgroup C10 and the destination cgroup C00 is above the -points of delegation and U0 would not have write access to its -"cgroup.procs" and thus be denied with -EACCES. - - -5. Other Changes - -5-1. [Un]populated Notification - -cgroup users often need a way to determine when a cgroup's -subhierarchy becomes empty so that it can be cleaned up. cgroup -currently provides release_agent for it; unfortunately, this mechanism -is riddled with issues. - -- It delivers events by forking and execing a userland binary - specified as the release_agent. This is a long deprecated method of - notification delivery. It's extremely heavy, slow and cumbersome to - integrate with larger infrastructure. - -- There is single monitoring point at the root. There's no way to - delegate management of a subtree. - -- The event isn't recursive. It triggers when a cgroup doesn't have - any tasks or child cgroups. Events for internal nodes trigger only - after all children are removed. This again makes it impossible to - delegate management of a subtree. - -- Events are filtered from the kernel side. A "notify_on_release" - file is used to subscribe to or suppress release events. This is - unnecessarily complicated and probably done this way because event - delivery itself was expensive. - -Unified hierarchy implements "populated" field in "cgroup.events" -interface file which can be used to monitor whether the cgroup's -subhierarchy has tasks in it or not. Its value is 0 if there is no -task in the cgroup and its descendants; otherwise, 1. poll and -[id]notify events are triggered when the value changes. - -This is significantly lighter and simpler and trivially allows -delegating management of subhierarchy - subhierarchy monitoring can -block further propagation simply by putting itself or another process -in the subhierarchy and monitor events that it's interested in from -there without interfering with monitoring higher in the tree. - -In unified hierarchy, the release_agent mechanism is no longer -supported and the interface files "release_agent" and -"notify_on_release" do not exist. - - -5-2. Other Core Changes - -- None of the mount options is allowed. - -- remount is disallowed. - -- rename(2) is disallowed. - -- The "tasks" file is removed. Everything should at process - granularity. Use the "cgroup.procs" file instead. - -- The "cgroup.procs" file is not sorted. pids will be unique unless - they got recycled in-between reads. - -- The "cgroup.clone_children" file is removed. - -- /proc/PID/cgroup keeps reporting the cgroup that a zombie belonged - to before exiting. If the cgroup is removed before the zombie is - reaped, " (deleted)" is appeneded to the path. - - -5-3. Controller File Conventions - -5-3-1. Format - -In general, all controller files should be in one of the following -formats whenever possible. - -- Values only files - - VAL0 VAL1...\n - -- Flat keyed files - - KEY0 VAL0\n - KEY1 VAL1\n - ... - -- Nested keyed files - - KEY0 SUB_KEY0=VAL00 SUB_KEY1=VAL01... - KEY1 SUB_KEY0=VAL10 SUB_KEY1=VAL11... - ... - -For a writeable file, the format for writing should generally match -reading; however, controllers may allow omitting later fields or -implement restricted shortcuts for most common use cases. - -For both flat and nested keyed files, only the values for a single key -can be written at a time. For nested keyed files, the sub key pairs -may be specified in any order and not all pairs have to be specified. - - -5-3-2. Control Knobs - -- Settings for a single feature should generally be implemented in a - single file. - -- In general, the root cgroup should be exempt from resource control - and thus shouldn't have resource control knobs. - -- If a controller implements ratio based resource distribution, the - control knob should be named "weight" and have the range [1, 10000] - and 100 should be the default value. The values are chosen to allow - enough and symmetric bias in both directions while keeping it - intuitive (the default is 100%). - -- If a controller implements an absolute resource guarantee and/or - limit, the control knobs should be named "min" and "max" - respectively. If a controller implements best effort resource - gurantee and/or limit, the control knobs should be named "low" and - "high" respectively. - - In the above four control files, the special token "max" should be - used to represent upward infinity for both reading and writing. - -- If a setting has configurable default value and specific overrides, - the default settings should be keyed with "default" and appear as - the first entry in the file. Specific entries can use "default" as - its value to indicate inheritance of the default value. - -- For events which are not very high frequency, an interface file - "events" should be created which lists event key value pairs. - Whenever a notifiable event happens, file modified event should be - generated on the file. - - -5-4. Per-Controller Changes - -5-4-1. io - -- blkio is renamed to io. The interface is overhauled anyway. The - new name is more in line with the other two major controllers, cpu - and memory, and better suited given that it may be used for cgroup - writeback without involving block layer. - -- Everything including stat is always hierarchical making separate - recursive stat files pointless and, as no internal node can have - tasks, leaf weights are meaningless. The operation model is - simplified and the interface is overhauled accordingly. - - io.stat - - The stat file. The reported stats are from the point where - bio's are issued to request_queue. The stats are counted - independent of which policies are enabled. Each line in the - file follows the following format. More fields may later be - added at the end. - - $MAJ:$MIN rbytes=$RBYTES wbytes=$WBYTES rios=$RIOS wrios=$WIOS - - io.weight - - The weight setting, currently only available and effective if - cfq-iosched is in use for the target device. The weight is - between 1 and 10000 and defaults to 100. The first line - always contains the default weight in the following format to - use when per-device setting is missing. - - default $WEIGHT - - Subsequent lines list per-device weights of the following - format. - - $MAJ:$MIN $WEIGHT - - Writing "$WEIGHT" or "default $WEIGHT" changes the default - setting. Writing "$MAJ:$MIN $WEIGHT" sets per-device weight - while "$MAJ:$MIN default" clears it. - - This file is available only on non-root cgroups. - - io.max - - The maximum bandwidth and/or iops setting, only available if - blk-throttle is enabled. The file is of the following format. - - $MAJ:$MIN rbps=$RBPS wbps=$WBPS riops=$RIOPS wiops=$WIOPS - - ${R|W}BPS are read/write bytes per second and ${R|W}IOPS are - read/write IOs per second. "max" indicates no limit. Writing - to the file follows the same format but the individual - settings may be omitted or specified in any order. - - This file is available only on non-root cgroups. - - -5-4-2. cpuset - -- Tasks are kept in empty cpusets after hotplug and take on the masks - of the nearest non-empty ancestor, instead of being moved to it. - -- A task can be moved into an empty cpuset, and again it takes on the - masks of the nearest non-empty ancestor. - - -5-4-3. memory - -- use_hierarchy is on by default and the cgroup file for the flag is - not created. - -- The original lower boundary, the soft limit, is defined as a limit - that is per default unset. As a result, the set of cgroups that - global reclaim prefers is opt-in, rather than opt-out. The costs - for optimizing these mostly negative lookups are so high that the - implementation, despite its enormous size, does not even provide the - basic desirable behavior. First off, the soft limit has no - hierarchical meaning. All configured groups are organized in a - global rbtree and treated like equal peers, regardless where they - are located in the hierarchy. This makes subtree delegation - impossible. Second, the soft limit reclaim pass is so aggressive - that it not just introduces high allocation latencies into the - system, but also impacts system performance due to overreclaim, to - the point where the feature becomes self-defeating. - - The memory.low boundary on the other hand is a top-down allocated - reserve. A cgroup enjoys reclaim protection when it and all its - ancestors are below their low boundaries, which makes delegation of - subtrees possible. Secondly, new cgroups have no reserve per - default and in the common case most cgroups are eligible for the - preferred reclaim pass. This allows the new low boundary to be - efficiently implemented with just a minor addition to the generic - reclaim code, without the need for out-of-band data structures and - reclaim passes. Because the generic reclaim code considers all - cgroups except for the ones running low in the preferred first - reclaim pass, overreclaim of individual groups is eliminated as - well, resulting in much better overall workload performance. - -- The original high boundary, the hard limit, is defined as a strict - limit that can not budge, even if the OOM killer has to be called. - But this generally goes against the goal of making the most out of - the available memory. The memory consumption of workloads varies - during runtime, and that requires users to overcommit. But doing - that with a strict upper limit requires either a fairly accurate - prediction of the working set size or adding slack to the limit. - Since working set size estimation is hard and error prone, and - getting it wrong results in OOM kills, most users tend to err on the - side of a looser limit and end up wasting precious resources. - - The memory.high boundary on the other hand can be set much more - conservatively. When hit, it throttles allocations by forcing them - into direct reclaim to work off the excess, but it never invokes the - OOM killer. As a result, a high boundary that is chosen too - aggressively will not terminate the processes, but instead it will - lead to gradual performance degradation. The user can monitor this - and make corrections until the minimal memory footprint that still - gives acceptable performance is found. - - In extreme cases, with many concurrent allocations and a complete - breakdown of reclaim progress within the group, the high boundary - can be exceeded. But even then it's mostly better to satisfy the - allocation from the slack available in other groups or the rest of - the system than killing the group. Otherwise, memory.max is there - to limit this type of spillover and ultimately contain buggy or even - malicious applications. - -- The original control file names are unwieldy and inconsistent in - many different ways. For example, the upper boundary hit count is - exported in the memory.failcnt file, but an OOM event count has to - be manually counted by listening to memory.oom_control events, and - lower boundary / soft limit events have to be counted by first - setting a threshold for that value and then counting those events. - Also, usage and limit files encode their units in the filename. - That makes the filenames very long, even though this is not - information that a user needs to be reminded of every time they type - out those names. - - To address these naming issues, as well as to signal clearly that - the new interface carries a new configuration model, the naming - conventions in it necessarily differ from the old interface. - -- The original limit files indicate the state of an unset limit with a - Very High Number, and a configured limit can be unset by echoing -1 - into those files. But that very high number is implementation and - architecture dependent and not very descriptive. And while -1 can - be understood as an underflow into the highest possible value, -2 or - -10M etc. do not work, so it's not consistent. - - memory.low, memory.high, and memory.max will use the string "max" to - indicate and set the highest possible value. - -6. Planned Changes - -6-1. CAP for resource control - -Unified hierarchy will require one of the capabilities(7), which is -yet to be decided, for all resource control related knobs. Process -organization operations - creation of sub-cgroups and migration of -processes in sub-hierarchies may be delegated by changing the -ownership and/or permissions on the cgroup directory and -"cgroup.procs" interface file; however, all operations which affect -resource control - writes to a "cgroup.subtree_control" file or any -controller-specific knobs - will require an explicit CAP privilege. - -This, in part, is to prevent the cgroup interface from being -inadvertently promoted to programmable API used by non-privileged -binaries. cgroup exposes various aspects of the system in ways which -aren't properly abstracted for direct consumption by regular programs. -This is an administration interface much closer to sysctl knobs than -system calls. Even the basic access model, being filesystem path -based, isn't suitable for direct consumption. There's no way to -access "my cgroup" in a race-free way or make multiple operations -atomic against migration to another cgroup. - -Another aspect is that, for better or for worse, the cgroup interface -goes through far less scrutiny than regular interfaces for -unprivileged userland. The upside is that cgroup is able to expose -useful features which may not be suitable for general consumption in a -reasonable time frame. It provides a relatively short path between -internal details and userland-visible interface. Of course, this -shortcut comes with high risk. We go through what we go through for -general kernel APIs for good reasons. It may end up leaking internal -details in a way which can exert significant pain by locking the -kernel into a contract that can't be maintained in a reasonable -manner. - -Also, due to the specific nature, cgroup and its controllers don't -tend to attract attention from a wide scope of developers. cgroup's -short history is already fraught with severely mis-designed -interfaces, unnecessary commitments to and exposing of internal -details, broken and dangerous implementations of various features. - -Keeping cgroup as an administration interface is both advantageous for -its role and imperative given its nature. Some of the cgroup features -may make sense for unprivileged access. If deemed justified, those -must be further abstracted and implemented as a different interface, -be it a system call or process-private filesystem, and survive through -the scrutiny that any interface for general consumption is required to -go through. - -Requiring CAP is not a complete solution but should serve as a -significant deterrent against spraying cgroup usages in non-privileged -programs. diff --git a/Documentation/devicetree/bindings/goldfish/audio.txt b/Documentation/devicetree/bindings/goldfish/audio.txt new file mode 100644 index 0000000000000000000000000000000000000000..d043fda433bac51e42b99bb464ee7ba9c3a689db --- /dev/null +++ b/Documentation/devicetree/bindings/goldfish/audio.txt @@ -0,0 +1,17 @@ +Android Goldfish Audio + +Android goldfish audio device generated by android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-audio" to match emulator +- reg : +- interrupts : + +Example: + + goldfish_audio@9030000 { + compatible = "google,goldfish-audio"; + reg = <0x9030000 0x100>; + interrupts = <0x4>; + }; diff --git a/Documentation/devicetree/bindings/goldfish/battery.txt b/Documentation/devicetree/bindings/goldfish/battery.txt new file mode 100644 index 0000000000000000000000000000000000000000..4fb613933214adb299a4a57ce719d4de2981083f --- /dev/null +++ b/Documentation/devicetree/bindings/goldfish/battery.txt @@ -0,0 +1,17 @@ +Android Goldfish Battery + +Android goldfish battery device generated by android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-battery" to match emulator +- reg : +- interrupts : + +Example: + + goldfish_battery@9020000 { + compatible = "google,goldfish-battery"; + reg = <0x9020000 0x1000>; + interrupts = <0x3>; + }; diff --git a/Documentation/devicetree/bindings/goldfish/events.txt b/Documentation/devicetree/bindings/goldfish/events.txt new file mode 100644 index 0000000000000000000000000000000000000000..5babf46317a4e46e55d00f57c35e21caba4a6c78 --- /dev/null +++ b/Documentation/devicetree/bindings/goldfish/events.txt @@ -0,0 +1,17 @@ +Android Goldfish Events Keypad + +Android goldfish events keypad device generated by android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-events-keypad" to match emulator +- reg : +- interrupts : + +Example: + + goldfish-events@9040000 { + compatible = "google,goldfish-events-keypad"; + reg = <0x9040000 0x1000>; + interrupts = <0x5>; + }; diff --git a/Documentation/devicetree/bindings/goldfish/tty.txt b/Documentation/devicetree/bindings/goldfish/tty.txt new file mode 100644 index 0000000000000000000000000000000000000000..82648278da774ebba0a78723665cceaeabcb60a4 --- /dev/null +++ b/Documentation/devicetree/bindings/goldfish/tty.txt @@ -0,0 +1,17 @@ +Android Goldfish TTY + +Android goldfish tty device generated by android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-tty" to match emulator +- reg : +- interrupts : + +Example: + + goldfish_tty@1f004000 { + compatible = "google,goldfish-tty"; + reg = <0x1f004000 0x1000>; + interrupts = <0xc>; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt index a9a5fe19ff2a037db14de181280196cce7745d3d..ec9d6568270261707c0fdd26f83daed2c734d11c 100644 --- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt +++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt @@ -12,6 +12,11 @@ Required properties: - vref-supply: The regulator supply ADC reference voltage. - #io-channel-cells: Should be 1, see ../iio-bindings.txt +Optional properties: +- resets: Must contain an entry for each entry in reset-names if need support + this option. See ../reset/reset.txt for details. +- reset-names: Must include the name "saradc-apb". + Example: saradc: saradc@2006c000 { compatible = "rockchip,saradc"; @@ -19,6 +24,8 @@ Example: interrupts = ; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clock-names = "saradc", "apb_pclk"; + resets = <&cru SRST_SARADC>; + reset-names = "saradc-apb"; #io-channel-cells = <1>; vref-supply = <&vcc18>; }; diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt deleted file mode 100644 index ed1ddf5970163785f5b01781625f64b10b615947..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt +++ /dev/null @@ -1,180 +0,0 @@ -Qualcomm Technologies PNP Flash LED - -QPNP (Qualcomm Technologies Plug N Play) Flash LED (Light -Emitting Diode) driver is used to provide illumination to -camera sensor when background light is dim to capture good -picture. It can also be used for flashlight/torch application. -It is part of PMIC on Qualcomm Technologies reference platforms. -The PMIC is connected to the host processor via SPMI bus. - -Required properties: -- compatible : should be "qcom,qpnp-flash-led" -- reg : base address and size for flash LED modules - -Optional properties: -- qcom,headroom : headroom to use. Values should be 250, 300, - 400 and 500 in mV. -- qcom,startup-dly : delay before flashing after flash executed. - Values should 10, 32, 64, and 128 in us. -- qcom,clamp-curr : current to clamp at when voltage droop happens. - Values are in integer from 0 to 1000 inclusive, - indicating 0 to 1000 mA. -- qcom,self-check-enabled : boolean type. self fault check enablement -- qcom,thermal-derate-enabled : boolean type. derate enablement when module - temperature reaches threshold -- qcom,thermal-derate-threshold : thermal threshold for derate. Values - should be 95, 105, 115, 125 in C. -- qcom,thermal-derate-rate : derate rate when module temperature - reaches threshold. Values should be - "1_PERCENT", "1P25_PERCENT", "2_PERCENT", - "2P5_PERCENT", "5_PERCENT" in string. -- qcom,current-ramp-enabled : boolean type. stepped current ramp enablement -- qcom,ramp-up-step : current ramp up rate. Values should be - "0P2US", "0P4US", "0P8US", "1P6US", "3P3US", - "6P7US", "13P5US", "27US". -- qcom,ramp-dn-step : current ramp down rate. Values should be - "0P2US", "0P4US", "0P8US", "1P6US", "3P3US", - "6P7US", "13P5US", "27US". -- qcom,vph-pwr-droop-enabled : boolean type. VPH power droop enablement. Enablement - allows current clamp when phone power drops below - pre-determined threshold -- qcom,vph-pwr-droop-threshold : VPH power threshold for module to clamp current. - Values are 2500 - 3200 in mV with 100 mV steps. -- qcom,vph-pwr-droop-debounce-time : debounce time for module to confirm a voltage - droop is happening. Values are 0, 10, 32, 64 - in us. -- qcom,pmic-charger-support : Boolean type. This tells if flash utilizes charger boost - support -- qcom,headroom-sense-ch0-enabled: Boolean type. This configures headroom sensing enablement - for LED channel 0 -- qcom,headroom-sense-ch1-enabled: Boolean type. This configures headroom sensing enablement - for LED channel 1 -- qcom,power-detect-enabled : Boolean type. This enables driver to get maximum flash LED - current at current battery level to avoid intensity clamp - when battery voltage is low -- qcom,otst2-moduled-enabled : Boolean type. This enables driver to enable MASK to support - OTST2 connection. -- qcom,follow-otst2-rb-disabled : Boolean type. This allows driver to reset/deset module. - By default, driver resets module. This entry allows driver to - bypass reset module sequence. -- qcom,die-current-derate-enabled: Boolean type. This enables driver to get maximum flash LED - current, based on PMIC die temperature threshold to - avoid significant current derate from hardware. This property - is not needed if PMIC is older than PMI8994v2.0. -- qcom,die-temp-vadc : VADC channel source for flash LED. This property is not - needed if PMIC is older than PMI8994v2.0. -- qcom,die-temp-threshold : Integer type array for PMIC die temperature threshold. - Array should have at least one value. Values should be in - celcius. This property is not needed if PMIC is older than - PMI8994v2.0. -- qcom,die-temp-derate-current : Integer type arrray for PMIC die temperature derate - current. Array should have at least one value. Values - should be in mA. This property is not needed if PMIC is older - than PMI8994v2.0. - -Required properties inside child node. Chile node contains settings for each individual LED. -Each LED hardware needs a node for itself and a switch node to control brightness. -For the purpose of turning on/off LED and better regulator control, "led:switch" node -is introduced. "led:switch" acquires several existing properties from other nodes for -operational simplification. For backward compatibility purpose, switch node can be optional: -- label : type of led that will be used, either "flash" or "torch". -- qcom,led-name : name of the LED. Accepted values are "led:flash_0", - "led:flash_1", "led:torch_0", "led:torch_1" -- qcom,default-led-trigger : trigger for the camera flash and torch. Accepted values are - "flash0_trigger", "flash1_trigger", "torch0_trigger", torch1_trigger" -- qcom,id : enumerated ID for each physical LED. Accepted values are "0", - "1", etc.. -- qcom,max-current : maximum current allowed on this LED. Valid values should be - integer from 0 to 1000 inclusive, indicating 0 to 1000 mA. -- qcom,pmic-revid : PMIC revision id source. This property is needed for PMI8996 - revision check. - -Optional properties inside child node: -- qcom,current : default current intensity for LED. Accepted values should be - integer from 0 t 1000 inclusive, indicating 0 to 1000 mA. -- qcom,duration : Duration for flash LED. When duration time expires, hardware will turn off - flash LED. Values should be from 10 ms to 1280 ms with 10 ms incremental - step. Not applicable to torch. It is required for LED:SWITCH node to handle - LED used as flash. -- reg : reg ( represents number. eg 0,1,2,..) property is to add support for - multiple power sources. It includes two properties regulator-name and max-voltage. - Required property inside regulator node: - - regulator-name : This denotes this node is a regulator node and which - regulator to use. - Optional property inside regulator node: - - max-voltage : This specifies max voltage of regulator. Some switch - or boost regulator does not need this property. - -Example: - qcom,leds@d300 { - compatible = "qcom,qpnp-flash-led"; - status = "okay"; - reg = <0xd300 0x100>; - label = "flash"; - qcom,headroom = <500>; - qcom,startup-dly = <128>; - qcom,clamp-curr = <200>; - qcom,pmic-charger-support; - qcom,self-check-enabled; - qcom,thermal-derate-enabled; - qcom,thermal-derate-threshold = <80>; - qcom,thermal-derate-rate = "4_PERCENT"; - qcom,current-ramp-enabled; - qcom,ramp_up_step = "27US"; - qcom,ramp_dn_step = "27US"; - qcom,vph-pwr-droop-enabled; - qcom,vph-pwr-droop-threshold = <3200>; - qcom,vph-pwr-droop-debounce-time = <10>; - qcom,headroom-sense-ch0-enabled; - qcom,headroom-sense-ch1-enabled; - qcom,die-current-derate-enabled; - qcom,die-temp-vadc = <&pmi8994_vadc>; - qcom,die-temp-threshold = <85 80 75 70 65>; - qcom,die-temp-derate-current = <400 800 1200 1600 2000>; - qcom,pmic-revid = <&pmi8994_revid>; - - pm8226_flash0: qcom,flash_0 { - label = "flash"; - qcom,led-name = "led:flash_0"; - qcom,default-led-trigger = - "flash0_trigger"; - qcom,max-current = <1000>; - qcom,id = <0>; - qcom,duration = <1280>; - qcom,current = <625>; - }; - - pm8226_torch: qcom,torch_0 { - label = "torch"; - qcom,led-name = "led:torch_0"; - qcom,default-led-trigger = - "torch0_trigger"; - boost-supply = <&pm8226_chg_boost>; - qcom,max-current = <200>; - qcom,id = <0>; - qcom,current = <120>; - qcom,max-current = <200>; - reg0 { - regulator-name = - "pm8226_chg_boost"; - max-voltage = <3600000>; - }; - }; - - pm8226_switch: qcom,switch { - lable = "switch"; - qcom,led-name = "led:switch"; - qcom,default-led-trigger = - "switch_trigger"; - qcom,id = <2>; - qcom,current = <625>; - qcom,duration = <1280>; - qcom,max-current = <1000>; - reg0 { - regulator-name = - "pm8226_chg_boost"; - max-voltage = <3600000>; - }; - }; - }; - diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt index 6ca0ac31a58198e3c42f721eb4eace8bebf08e3c..c26e9fb456e337fc6b16c469d19265c0d5378c38 100644 --- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt +++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt @@ -126,7 +126,7 @@ Optional properties: internal persist1 = 0x400 internal cmd queue = 0x800 - qcom,pm-qos-latency-us = The latency used to vote for QOS power manager. This -value is typically max(latencies of every cluster at all power levels) + 1 + value is typically max(latencies of every cluster at all power levels) + 1 - qcom,max-secure-instances = An int containing max number of concurrent secure instances supported, accounting for venus and system wide limitations like memory, performance etc. @@ -135,6 +135,12 @@ value is typically max(latencies of every cluster at all power levels) + 1 - qcom,power-conf = Indicates the value at which or beyond, a video session is configured in low power mode to have power benefits. Value is defined interms of HxW of the video session beyond which power benefit is desired. +- qcom,cx-ipeak-data : phandle of cx_ipeak device node and bit position on + the cx register where venus is supposed to vote +- qcom,clock-freq-threshold : Operating threshold frequency of venus which + video driver uses to check against the frequency voted. Whenever venus + clock frequency crosses this mark, driver intimates cx ipeak driver on + supported targets. [Second level nodes] Context Banks diff --git a/Documentation/devicetree/bindings/platform/msm/msm_mhi_uci.txt b/Documentation/devicetree/bindings/platform/msm/msm_mhi_uci.txt new file mode 100644 index 0000000000000000000000000000000000000000..5c255b19ea5a8231911f2b809f1a4dcbfc1dc81b --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/msm_mhi_uci.txt @@ -0,0 +1,55 @@ +MSM MHI UCI interface device + +MHI userpace control interface (UCI) enables userspace software clients to +communicate with device using MHI protocol. + +============== +Node Structure +============== + +Main node properties: + +- compatible + Usage: required + Value type: + Definition: "qcom,mhi-uci" + +- qcom,mhi-uci-channels + Usage: required + Value type: Array of + Definition: Array tuples which define the channel configuration + parameters. Each tuple is of length 2, 1st value + represent channel, and 2nd value represent maximum + payload supported. Maximum payload supported is 64 + bytes. Number of tuples must be even value. Max # of + tuples is 46. + +- qcom,mhi-uci-ctrlchan + Usage: optional + Value type: + Definition: Channel that will be handling flow control (DTR/RTS) signals. + +======= +Example +======= +qcom,mhi-uci@0 { + compatible = "qcom,mhi-uci"; + qcom,mhi-uci-channels = <0 0x1000>, + <1 0x1000>, + <2 0x1000>, + <3 0xffff>, + <10 0x1000>, + <11 0x1000>, + <14 0x1000>, + <15 0x1000>, + <16 0x1000>, + <17 0x1000>, + <18 0x1000>, + <19 0x1000>, + <24 0x1000>, + <25 0x1000>, + <32 0x1000>, + <33 0x1000>; + qcom,mhi-uci-ctrlchan = <18>; + status = "ok"; +}; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt index af80de7f5f1fbe0e357b43a9c88505ad5e2845c6..9638888ebc9eb22d736817615a7efbb4facaabd7 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt @@ -278,6 +278,14 @@ First Level Node - FG Gen3 device is specified, then ESR to Rslow scaling factors will be updated to account it for an accurate ESR. +- qcom,fg-esr-clamp-mohms + Usage: optional + Value type: + Definition: Equivalent series resistance (ESR) in milliohms. If this + is specified, then ESR will be clamped to this value when + ESR is found to be dropping below this. Default value is + 20. + - qcom,fg-esr-filter-switch-temp Usage: optional Value type: diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt deleted file mode 100644 index f6a7a1ba300528d27ca3d386a184043851187172..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt +++ /dev/null @@ -1,275 +0,0 @@ -QTI's QPNP PMIC Fuel Gauge Device - -QPNP PMIC FG provides interface to clients to read properties related -to the battery. Its main function is to retrieve the State of Charge (SOC), -a 0-100 percentage representing the amount of charge left in the battery. - -There are two required peripherals in the FG driver, both implemented as -subnodes in the example. These peripherals must not be disabled if the FG -device is to enabled: - -- qcom,fg-soc : The main FG device. Supports battery fuel gauge controls and - sensors. -- qcom,fg-batt : The FG battery device supports interrupts and controls with - respect to the state of the connected battery.For example: the - peripheral informs the driver if the battery has been identified - by the fuel gauge based on a given battery resistance range. - -Optionally ADC nodes can be added -- qcom,revid-tp-rev: A subnode with a register address for the TP_REV register - in the REVID peripheral. This is used to apply workarounds that - may depend on the trim program. -- qcom,fg-adc-vbat : A subnode with a register address for the FG_ADC_USR - peripheral which is used mainly for battery current limiting (BCL). - This node maps out the VBAT reading register which allows to have - a +/- 32 mV accurate reading of VBAT. -- qcom,fg-adc-ibat : A subnode with a register address for the FG_ADC_USR - peripheral which is used mainly for battery current limiting (BCL). - This node maps out the IBAT current reading register which allows - to have a +/- 32 mA accurate reading of IBAT. - -Parent node required properties: -- compatible : should be "qcom,qpnp-fg" for the FG driver. -- qcom,pmic-revid : Should specify the phandle of PMIC - revid module. This is used to identify - the PMIC subtype. - -Parent node optional properties: -- qcom,warm-bat-decidegc: Warm battery temperature in decidegC. -- qcom,cool-bat-decidegc: Cool battery temperature in decidegC. -- qcom,hot-bat-decidegc: Hot battery temperature in decidegC. -- qcom,cold-bat-decidegc: Cold battery temperature in decidegC. -- qcom,cold-hot-jeita-hysteresis: A tuple of 2. Index[0] is cold - hysteresis and index[1] is hot - hysterisis(in decidegC). -- qcom,ext-sense-type: Current sense channel used by the FG. - Set this to use external rsense. -- qcom,thermal-coefficients: Byte array of thermal coefficients for - reading battery thermistor. This should - be exactly 6 bytes in length. - Example: [01 02 03 04 05 06] -- qcom,resume-soc: soc to resume charging in percentage. -- qcom,resume-soc-raw: soc to resume charging in the scale of - [0-255]. This overrides qcom,resume-soc - if defined. -- qcom,hold-soc-while-full: A boolean property that when defined - holds SOC at 100% when the battery is - full. -- qcom,bcl-lm-threshold-ma: BCL LPM to MPM mode transition threshold - in milliAmpere. -- qcom,bcl-mh-threshold-ma: BCL MPM to HPM mode transition threshold - in milliAmpere. -- qcom,use-otp-profile: Specify this flag to avoid RAM loading - any battery profile. -- qcom,sw-rbias-control: Boolean property which defines whether - the Rbias needs to be controlled by - software. If this is not set, it will - be controlled by hardware (default). -- qcom,fg-iterm-ma: Battery current at which the fuel gauge - will try to scale 100% towards. When - the charge current goes above this, the - SoC should be at 100%. -- qcom,fg-chg-iterm-ma: Battery current at which the fuel gauge - will issue end of charge if the charger - is configured to use the fuel gauge - ADCs for end of charge detection. This - property is in milliamps and should be - positive (e.g. 100mA to terminate at - -100mA). -- qcom,irq-volt-empty-mv: The voltage threshold that the empty - soc interrupt will be triggered. When - the empty soc interrupt fires, battery - soc will be pulled to 0 and the - userspace will be notified via the - power supply framework. The userspace - will read 0% soc and immediately - shutdown. -- qcom,fg-cutoff-voltage-mv: The voltage where the fuel gauge will - steer the SOC to be zero. For example, - if the cutoff voltage is set to 3400mv, - the fuel gauge will try to count SoC so - that the battery SoC will be 0 when it - is 3400mV. -- qcom,fg-vbat-estimate-diff-mv: If the estimated voltage based on SoC - and battery current/resistance differs - from the actual voltage by more than - this amount, the fuel gauge will - redo the first SoC estimate when the - driver probes. -- qcom,fg-delta-soc: How many percent the monotonic SoC must - change before a new delta_soc interrupt - is asserted. If this value is raised - above 3-4, some period workarounds may - not function well, so it's best to - leave this at 1 or 2%. -- qcom,fg-vbatt-low-threshold: Voltage (in mV) which upon set will be - used for configuring the low battery - voltage threshold. Interrupt will be - asserted and handled based upon - this. If this property is not specified, - low battery voltage threshold will be - configured to 4200 mV. -- qcom,cycle-counter-en: Boolean property which enables the cycle - counter feature. If this property is - present, then the following properties - to specify low and high soc thresholds - should be defined. -- qcom,capacity-learning-on: A boolean property to have the fuel - gauge driver attempt to learn the - battery capacity when charging. Takes - precedence over capacity-estimation-on. -- qcom,capacity-learning-feedback: A boolean property to have the fuel - gauge driver to feedback the learned - capacity into the capacity learning - algorithm. This has to be used only if - the property "qcom,capacity-learning-on" - is specified. -- qcom,cl-max-increment-deciperc: The maximum percent that the capacity - can rise as the result of a single - charge cycle. This property corresponds - to .1% increments. -- qcom,cl-max-decrement-deciperc: The maximum percent that the capacity - can fall as the result of a single - charge cycle. This property corresponds - to .1% decrements. -- qcom,cl-max-temp-decidegc: Above this temperature, capacity - learning will be canceled. -- qcom,cl-mix-temp-decidegc: Below this temperature, capacity - learning will be canceled. -- qcom,cl-max-start-soc: The battery soc has to be below this - value at the start of a charge cycle - for capacity learning to be run. -- qcom,cl-vbat-est-thr-uv: The maximum difference between the - battery voltage shadow and the current - predicted voltage in uV to initiate - capacity learning. -- qcom,capacity-estimation-on: A boolean property to have the fuel - gauge driver attempt to estimate the - battery capacity using battery - resistance. -- qcom,aging-eval-current-ma: Current used to evaluate battery aging. - This value should be around the steady - state current drawn from the battery - when the phone is low on battery. -- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for configuring - constant charge (CC) to constant - voltage (CV) setpoint in FG upon - which the battery EOC status will - be determined. This value should be - 10 mV less than the float voltage - configured in the charger. - This property should only be specified - if "qcom,autoadjust-vfloat" property is - specified in the charger driver to - ensure a proper operation. -- qcom,bad-battery-detection-enable: A boolean property to enable the fuel - gauge driver to detect the damaged battery - when the safety-timer expires by using the - coulomb count. -- qcom,fg-therm-delay-us: The time in microseconds to delay battery - thermistor biasing. -- qcom,esr-pulse-tuning-en: A boolean property to enable ESR pulse - tuning feature. If this is enabled, - ESR pulse extraction will be disabled - when state of charge (SOC) is less than - 2%. It will be enabled back when SOC - gets above 2%. In addition, for SOC - between 2% and 5%, ESR pulse timing - settings will be different from default. - Once SOC crosses 5%, ESR pulse timings - will be restored back to default. - -qcom,fg-soc node required properties: -- reg : offset and length of the PMIC peripheral register map. -- interrupts : the interrupt mappings. - The format should be - . -- interrupt-names : names for the mapped fg soc interrupts - The following interrupts are required: - 0: high-soc - 1: low-soc - 2: full-soc - 3: empty-soc - 4: delta-soc - 5: first-est-done - 6: sw-fallbk-ocv - 7: sw-fallbk-new-batt - -qcom,fg-memif node required properties: -- reg : offset and length of the PMIC peripheral register map. -- interrupts : the interrupt mappings. - The format should be - . -- interrupt-names : names for the mapped fg adc interrupts - The following interrupts are required: - 0: mem-avail - -Example: -pmi8994_fg: qcom,fg { - compatible = "qcom,qpnp-fg"; - #address-cells = <1>; - #size-cells = <1>; - status = "disabled"; - qcom,pmic-revid = <&pmi8994_revid>; - - qcom,fg-soc@4000 { - reg = <0x4000 0x100>; - interrupts = <0x2 0x40 0x0>, - <0x2 0x40 0x1>, - <0x2 0x40 0x2>, - <0x2 0x40 0x3>, - <0x2 0x40 0x4>, - <0x2 0x40 0x5>, - <0x2 0x40 0x6>, - <0x2 0x40 0x7>; - - interrupt-names = "high-soc", - "low-soc", - "full-soc", - "empty-soc", - "delta-soc", - "first-est-done", - "sw-fallbk-ocv", - "sw-fallbk-new-batt"; - }; - - qcom,fg-batt@4100 { - reg = <0x4100 0x100>; - interrupts = <0x2 0x41 0x0>, - <0x2 0x41 0x1>, - <0x2 0x41 0x2>, - <0x2 0x41 0x3>, - <0x2 0x41 0x4>, - <0x2 0x41 0x5>, - <0x2 0x41 0x6>, - <0x2 0x41 0x7>; - - interrupt-names = "soft-cold", - "soft-hot", - "vbatt-low", - "batt-ided", - "batt-id-req", - "batt-unknown", - "batt-missing", - "batt-match"; - }; - - qcom,fg-adc-vbat@4254 { - reg = <0x4254 0x1>; - }; - - qcom,fg-adc-ibat@4255 { - reg = <0x4255 0x1>; - }; - - qcom,fg-memif@4400 { - reg = <0x4400 0x100>; - interrupts = <0x2 0x44 0x0>, - <0x2 0x44 0x1>; - - interrupt-names = "mem-avail", - "data-rcvry-sug"; - - qcom,cold-hot-jeita-hysteresis = <30 50>; - }; -}; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt deleted file mode 100644 index efd64cd90878e1bf3b10d0cf089675079cd7af29..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt +++ /dev/null @@ -1,394 +0,0 @@ -QPNP SMB Battery Charger - -QPNP SMB Charger is a single-cell switching mode battery charger. It can charge -the battery and power the system via the USB and AC adapter input. - -The QPNP SMB Charger interfaces via the SPMI bus. - -There are six different peripherals adding the following functionality. -Each of these peripherals are implemented as subnodes in the example at the -end of this file. - -- qcom,chgr: Supports charging control and status - reporting. -- qcom,bat-if: Battery status reporting such as presence, - temperature reporting and voltage collapse - protection. -- qcom,usb-chgpth: USB charge path detection and input current - limiting configuration. -- qcom,dc-chgpth: DC charge path detection and input current - limiting configuration. -- qcom,chg-misc: Miscellaneous features such as watchdog timers - and SYSOK pin control -- qcom,chg-otg: OTG configuration control. - -Parent node required properties: -- compatible: Must be "qcom,qpnp-smbcharger" -- #address-cells: Must be <1> -- #size-cells: Must be <1> -- qcom,pmic-revid: Should specify the phandle of PMIC - revid module. This is used to identify - the PMIC subtype. - - - -Sub node required properties: -- reg: The SPMI address for this peripheral -- interrupts: Specifies the interrupt associated with the peripheral. -- interrupt-names: Specifies the interrupt names for the peripheral. Every - available interrupt needs to have an associated name - with it to indentify its purpose. - - The following lists each subnode and their corresponding - required interrupt names: - - qcom,chgr: - - chg-tcc-thr: Triggers on charge completion. - - chg-taper-thr: Triggers on the taper charge - transtion. - - chg-inhibit: Notifies on battery voltage - being too high to resume - charging. - - chg-p2f-thr: Triggers on transitioning from - precharge to fastcharge. - - chg-rechg-thr: Triggers on battery voltage - falling below the resume - threshold. - - qcom,bat-if: - - batt-hot: Triggers on battery temperature - hitting the hot threshold. - Charging stops. - - batt-warm: Triggers on battery temperature - hitting the warm threshold. - Charging current is reduced. - - batt-cool: Triggers on battery temperature - hitting the cool threshold. - Charging current is reduced - - batt-cold: Triggers on battery temperature - hitting the cold threshold. - Charging stops. - - batt-missing: Battery missing status - interrupt. - - batt-low: Triggers on battery voltage - falling across a low threshold. - - qcom,usb-chgpth: - - usbin-uv: USB input voltage falls below a - valid threshold. - - usbin-src-det: USB automatic source detection - finishes. - - qcom,dc-chgpth: - - dcin-uv: DC input voltage falls below a - valid threshold. - - qcom,chgr-misc: - - wdog-timeout-mins: Charger watchdog timer - interrupt. - - temp-shutdown: Triggers when charger goes - overtemp and causes a shutdown. - - power-ok: Triggers when the charger - switcher turns on or off. - -Regulator Subnodes: -- qcom,smbcharger-boost-otg A subnode for a regulator device that turns on - the charger boost for OTG operation. -- qcom,smbcharger-external-otg A subnode for a regulator device that switches - off charging and the USB input charge path - in order to allow an external regulator to - operate. This can be used in place of the - qcom,smbcharger-boost-otg if an external boost - is available. - -Regulator Sub node required properties: -- regulator-name A name string for the regulator in question - -Optional Properties: -- qcom,battery-psy-name The name of the main battery power supply that - the charger will register. Failing to define - this property will default the name to - "battery". -- qcom,bms-psy-name The psy name to use for reporting battery - capacity. If left unspecified the capacity uses - a preprogrammed default value of 50. -- qcom,float-voltage-mv Float Voltage in mV - the maximum voltage up - to which the battery is charged. Supported - range 3600mV to 4500mV -- qcom,float-voltage-comp Specifies the JEITA float voltage compensation. - Value ranges from 0 to 63. -- qcom,fastchg-current-ma Specifies the fast charge current in mA. Supported - range is from 300mA to 3000mA. -- qcom,fastchg-current-comp Specifies the fast charge current compensation in - mA. Supported values are 250, 700, 900 and 1200mA. -- qcom,charging-timeout-mins Maximum duration in minutes that a single - charge cycle may last. Supported values are: - 0, 192, 384, 768, and 1536. A value of 0 - means that no charge cycle timeout is used and - charging can continue indefinitely. -- qcom,precharging-timeout-mins Maximum duration in minutes that a single - precharge cycle may last. Supported values - are: 0, 24, 48, 96, 192. A value of 0 means - that no precharge cycle timeout is used and - charging can continue indefinitely. Note that - the qcom,charging-timeout-mins property must - be specified in order for this to take effect. -- qcom,dc-psy-type The type of charger connected to the DC path. - Can be "Mains", "Wireless" or "Wipower" -- qcom,dc-psy-ma The current in mA dc path can support. Must be - specified if dc-psy-type is specified. Valid - range 300mA to 2000mA. -- qcom,dcin-vadc The phandle to pmi8994 voltage adc. The ADC is - used to get notifications when the DCIN voltage - crosses a programmed min/max threshold. This is - used to make configurations for optimized power - draw for Wipower. -- qcom,wipower-div2-ilim-map -- qcom,wipower-pt-ilim-map -- qcom,wipower-default-ilim-map - Array of 5 elements to indicate the voltage ranges and their corresponding - current limits. The 5 elements with index [0..4] are: - [0] => voltage_low in uV - [1] => voltage_high in uV - [2] => current limit for pass through in mA - [3] => current limit for div2 mode dcin low voltage in mA - [4] => current limit for div2 mode dcin high voltage in mA - The div2 and pt tables indicate the current limits - to use when Wipower is operating in divide_by_2 mode - and pass through mode respectively. - The default table is used when the voltage ranges - are beyond the ones specified in the mapping table. - Note that if dcin-vadc or any of these mapping - tables are not specified, dynamic dcin input - is disabled. -- qcom,charging-disabled Set this if charging should be disabled in the - build by default. -- qcom,resume-delta-mv Specifies the minimum voltage drop in - millivolts below the float voltage that is - required in order to initiate a new charging - cycle. Supported values are: 50, 100, 200 and - 300mV. -- qcom,chg-inhibit-en Boolean that indicates whether the charge inhibit - feature needs to be enabled. If this is not set, - charge inhibit feature is disabled by default. -- qcom,chg-inhibit-fg Indicates if the recharge threshold source has - to be Fuel gauge ADC. If this is not set, it - will be analog sensor by default. -- qcom,bmd-algo-disabled Indicates if the battery missing detection - algorithm is disabled. If this node is present - SMB uses the THERM pin for battery missing - detection. -- qcom,charge-unknown-battery Boolean that indicates whether an unknown - battery without a matching profile will be - charged. If this is not set, if the fuel gauge - does not recognize the battery based on its - battery ID, the charger will not start - charging. -- qcom,bmd-pin-src A string that indicates the source pin for the - battery missind detection. This can be either: - - "bpd_none" - battery is considered always present - - "bpd_id" - battery id pin is used - - "bpd_thm" - battery therm pin is used - - "bpd_thm_id" - both pins are used (battery is - considered missing if either pin is - floating). -- qcom,iterm-ma Specifies the termination current to indicate - end-of-charge. Possible values in mA: - 50, 100, 150, 200, 250, 300, 500, 600. -- qcom,iterm-disabled Disables the termination current feature. This - is a boolean property. -- otg-parent-supply A phandle to an external boost regulator for - OTG if it exists. -- qcom,thermal-mitigation: Array of input current limit values for - different system thermal mitigation levels. - This should be a flat array that denotates the - maximum charge current in mA for each thermal - level. -- qcom,rparasitics-uohm: The parasitic resistance of the board following - the line from the battery connectors through - vph_power. This is used to calculate maximum - available current of the battery. -- qcom,vled-max-uv: The maximum input voltage of the flash leds. - This is used to calculate maximum available - current of the battery. -- qcom,autoadjust-vfloat A boolean property that when set, makes the - driver automatically readjust vfloat using the - fuel gauge ADC readings to make charging more - accurate. -- qcom,jeita-temp-hard-limit property when present will enable or disable - the jeita temperature hard limit based on the - value 1 or 0. Specify 0 if the jeita temp hard - limit needs to be disabled. If it is not present, - jeita temperature hard limit will be based on what - the bootloader had set earlier. -- qcom,low-volt-dcin: A boolean property which upon set will enable the - AICL deglitch configuration dynamically. This needs - to be set if the DCIN supply is going to be less - than or equal to 5V. -- qcom,force-aicl-rerun: A boolean property which upon set will enable the - AICL rerun by default along with the deglitch time - configured to long interval (20 ms). Also, specifying - this property will not adjust the AICL deglitch time - dynamically for handling the battery over-voltage - oscillations when the charger is headroom limited. -- qcom,aicl-rerun-period-s If force-aicl-rerun is on, this property dictates - how often aicl is reran in seconds. Possible values - are 45, 90, 180, and 360. -- qcom,ibat-ocp-threshold-ua Maximum current before the battery will trigger - overcurrent protection. Use the recommended - battery pack value minus some margin. -- qcom,soft-vfloat-comp-disabled Set this property when the battery is - powered via external source and could - go above the float voltage. -- qcom,parallel-usb-min-current-ma Minimum current drawn by the primary - charger before enabling the parallel - charger if one exists. Do not define - this property if no parallel chargers - exist. -- qcom,parallel-usb-9v-min-current-ma Minimum current drawn by the primary - charger before enabling the parallel - charger if one exists. This property - applies only for 9V chargers. -- qcom,parallel-allowed-lowering-ma Acceptable current drop from the initial limit - to keep parallel charger activated. If the - charger current reduces beyond this threshold - parallel charger is disabled. Must be specified - if parallel charger is used. -- qcom,parallel-main-chg-fcc-percent Percentage of the fast charge current allotted to the - main charger when parallel charging is enabled and - operational. If this property is not defined, the - driver defaults to a 50%/50% split between the main - and parallel charger. -- qcom,parallel-main-chg-icl-percent Percentage of the input current allotted to the - main charger when parallel charging is enabled and - operational. If this property is not defined, the - driver defaults to a 60%/40% split between the main - and parallel charger. -- qcom,battery-data Points to the phandle of node which - contains the battery-profiles supported - by the charger/FG. -- qcom,chg-led-support A bool property to support the charger led feature. -- qcom,chg-led-sw-controls A bool property to allow the software to control - the charger led without a valid charger. -- qcom,skip-usb-notification A boolean property to be used when usb gets present - and type from other means. Especially true on - liquid hardware, where usb presence is detected based on GPIO. -- qcom,skip-usb-suspend-for-fake-battery A boolean property to skip - suspending USB path for fake - battery. -- qcom,vchg_sns-vadc Phandle of the VADC node. -- qcom,vchg-adc-channel-id The ADC channel to which the VCHG is routed. - -Example: - qcom,qpnp-smbcharger { - compatible = "qcom,qpnp-smbcharger"; - #address-cells = <1>; - #size-cells = <1>; - - qcom,iterm-ma = <100>; - qcom,float-voltage-mv = <4200>; - qcom,resume-delta-mv = <100>; - qcom,bmd-pin-src = "bpd_thm_id"; - qcom,dc-psy-type = "Mains"; - qcom,dc-psy-ma = <1500>; - qcom,bms-psy-name = "bms"; - qcom,battery-psy-name = "battery"; - qcom,thermal-mitigation = <1500 700 600 325>; - qcom,vchg_sns-vadc = <&pmi8950_vadc>; - qcom,vchg-adc-channel-id = <3>; - - qcom,chgr@1000 { - reg = <0x1000 0x100>; - interrupts = <0x2 0x10 0x0>, - <0x2 0x10 0x1>, - <0x2 0x10 0x2>, - <0x2 0x10 0x3>, - <0x2 0x10 0x4>, - <0x2 0x10 0x5>, - <0x2 0x10 0x6>, - <0x2 0x10 0x7>; - - interrupt-names = "chg-error", - "chg-inhibit", - "chg-prechg-sft", - "chg-complete-chg-sft", - "chg-p2f-thr", - "chg-rechg-thr", - "chg-taper-thr", - "chg-tcc-thr"; - }; - - qcom,otg@1100 { - reg = <0x1100 0x100>; - }; - - qcom,bat-if@1200 { - reg = <0x1200 0x100>; - interrupts = <0x2 0x12 0x0>, - <0x2 0x12 0x1>, - <0x2 0x12 0x2>, - <0x2 0x12 0x3>, - <0x2 0x12 0x4>, - <0x2 0x12 0x5>, - <0x2 0x12 0x6>, - <0x2 0x12 0x7>; - - interrupt-names = "batt-hot", - "batt-warm", - "batt-cold", - "batt-cool", - "batt-ov", - "batt-low", - "batt-missing", - "batt-term-missing"; - }; - - qcom,usb-chgpth@1300 { - reg = <0x1300 0x100>; - interrupts = <0x2 0x13 0x0>, - <0x2 0x13 0x1>, - <0x2 0x13 0x2>, - <0x2 0x13 0x3>, - <0x2 0x13 0x4>, - <0x2 0x13 0x5>, - <0x2 0x13 0x6>; - - interrupt-names = "usbin-uv", - "usbin-ov", - "usbin-src-det", - "otg-fail", - "otg-oc", - "aicl-done", - "usbid-change"; - }; - - qcom,dc-chgpth@1400 { - reg = <0x1400 0x100>; - interrupts = <0x2 0x14 0x0>, - <0x2 0x14 0x1>; - - interrupt-names = "dcin-uv", - "dcin-ov"; - }; - - qcom,chgr-misc@1600 { - reg = <0x1600 0x100>; - interrupts = <0x2 0x16 0x0>, - <0x2 0x16 0x1>, - <0x2 0x16 0x2>, - <0x2 0x16 0x3>, - <0x2 0x16 0x4>, - <0x2 0x16 0x5>; - - interrupt-names = "power-ok", - "temp-shutdown", - "wdog-timeout", - "flash-fail", - "otst2", - "otst3"; - }; - }; diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt index d00bfd8624a500eb797aac8273f8fc5f1df3c785..e0381c28773de5ab972a8368012760cbf8ca1b29 100644 --- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt @@ -81,9 +81,9 @@ pm8916: l14, l15, l16, l17, l18 pm8941: - s1, s2, s3, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, - l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2, lvs3, - mvs1, mvs2 + s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, + l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2, lvs3, + 5vs1, 5vs2 The content of each sub-node is defined by the standard binding for regulators - see regulator.txt - with additional custom properties described below: diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 628e08f958f635c35ba69ed79dfd3ff7197f8276..45d680644dfe981d13f59bec657be2f6524424f5 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1381,7 +1381,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX controllers i8042.notimeout [HW] Ignore timeout condition signalled by controller - i8042.reset [HW] Reset the controller during init and cleanup + i8042.reset [HW] Reset the controller during init, cleanup and + suspend-to-ram transitions, only during s2r + transitions, or never reset + Format: { 1 | Y | y | 0 | N | n } + 1, Y, y: always reset controller + 0, N, n: don't ever reset controller + Default: only on s2r transitions on x86; most other + architectures force reset to be always executed i8042.unlock [HW] Unlock (ignore) the keylock i8042.kbdreset [HW] Reset device connected to KBD port diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c index aaeafa18d99bb70bee8cf5bd1d39829b17d6d3d4..c99a75968c011ef802e184f91255842c0b672608 100644 --- a/Documentation/mic/mpssd/mpssd.c +++ b/Documentation/mic/mpssd/mpssd.c @@ -1538,9 +1538,9 @@ set_cmdline(struct mic_info *mic) len = snprintf(buffer, PATH_MAX, "clocksource=tsc highres=off nohz=off "); - len += snprintf(buffer + len, PATH_MAX, + len += snprintf(buffer + len, PATH_MAX - len, "cpufreq_on;corec6_off;pc3_off;pc6_off "); - len += snprintf(buffer + len, PATH_MAX, + len += snprintf(buffer + len, PATH_MAX - len, "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0", mic->id + 1); diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt index 4976389e432d4dd5207d65ad7c37d407c00d9d87..dd15a699ee1cdbb25221481d2d9de1865f775b16 100644 --- a/Documentation/pinctrl.txt +++ b/Documentation/pinctrl.txt @@ -831,7 +831,7 @@ separate memory range only intended for GPIO driving, and the register range dealing with pin config and pin multiplexing get placed into a different memory range and a separate section of the data sheet. -A flag "strict" in struct pinctrl_desc is available to check and deny +A flag "strict" in struct pinmux_ops is available to check and deny simultaneous access to the same pin from GPIO and pin multiplexing consumers on hardware of this type. The pinctrl driver should set this flag accordingly. diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index cf2f9e9dfe4d6357693eb4ce98d798d73c26e627..fa16fb2302a58c1dcdf127a7a4a71f2afb739cc5 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -357,6 +357,26 @@ of ftrace. Here is a list of some of the key files: to correlate events across hypervisor/guest if tb_offset is known. + mono: This uses the fast monotonic clock (CLOCK_MONOTONIC) + which is monotonic and is subject to NTP rate adjustments. + + mono_raw: + This is the raw monotonic clock (CLOCK_MONOTONIC_RAW) + which is montonic but is not subject to any rate adjustments + and ticks at the same rate as the hardware clocksource. + + boot: This is the boot clock (CLOCK_BOOTTIME) and is based on the + fast monotonic clock, but also accounts for time spent in + suspend. Since the clock access is designed for use in + tracing in the suspend path, some side effects are possible + if clock is accessed after the suspend time is accounted before + the fast mono clock is updated. In this case, the clock update + appears to happen slightly sooner than it normally would have. + Also on 32-bit systems, its possible that the 64-bit boot offset + sees a partial update. These effects are rare and post + processing should be able to handle them. See comments on + ktime_get_boot_fast_ns function for more information. + To set a clock, simply echo the clock name into this file. echo global > trace_clock diff --git a/Makefile b/Makefile index 11ab8feb7fc9a41bc05fa5f33dc0889cf836b970..cf7babcabaa7810da397d0b7b089e0d24400c468 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 21 +SUBLEVEL = 38 EXTRAVERSION = NAME = Blurry Fish Butt @@ -128,6 +128,10 @@ _all: # Cancel implicit rules on top Makefile $(CURDIR)/Makefile Makefile: ; +ifneq ($(words $(subst :, ,$(CURDIR))), 1) + $(error main directory cannot contain spaces nor colons) +endif + ifneq ($(KBUILD_OUTPUT),) # Invoke a second make in the output directory, passing relevant variables # check that the output directory actually exists @@ -395,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -std=gnu89 + -std=gnu89 $(call cc-option,-fno-PIE) + KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := -KBUILD_AFLAGS := -D__ASSEMBLY__ +KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds @@ -499,6 +504,12 @@ ifeq ($(KBUILD_EXTMOD),) endif endif endif +# install and module_install need also be processed one by one +ifneq ($(filter install,$(MAKECMDGOALS)),) + ifneq ($(filter modules_install,$(MAKECMDGOALS)),) + mixed-targets := 1 + endif +endif ifeq ($(mixed-targets),1) # =========================================================================== @@ -610,11 +621,17 @@ ARCH_CFLAGS := include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) +KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) +KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) +KBUILD_CFLAGS += -Os else +ifdef CONFIG_PROFILE_ALL_BRANCHES KBUILD_CFLAGS += -O2 +else +KBUILD_CFLAGS += -O2 +endif endif # Tell gcc to never replace conditional load with a non-conditional one @@ -1265,7 +1282,7 @@ help: @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH' @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)' @echo ' dir/ - Build all files in dir and below' - @echo ' dir/file.[oisS] - Build specified target only' + @echo ' dir/file.[ois] - Build specified target only' @echo ' dir/file.lst - Build specified mixed source/assembly target only' @echo ' (requires a recent binutils and recent build (System.map))' @echo ' dir/file.ko - Build module including final link' @@ -1505,11 +1522,11 @@ image_name: # Clear a bunch of variables before executing the submake tools/: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ tools/%: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $* + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $* # Single targets # --------------------------------------------------------------------------- diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg index 34fa64a669aca71025c4f601a474c44fb2ca4275..8531a7a79e336d534ecb9fc05d6f952e6d9ce94b 100644 --- a/android/configs/android-base.cfg +++ b/android/configs/android-base.cfg @@ -24,6 +24,7 @@ CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y CONFIG_EMBEDDED=y CONFIG_FB=y +CONFIG_HARDENED_USERCOPY=y CONFIG_HIGH_RES_TIMERS=y CONFIG_INET6_AH=y CONFIG_INET6_ESP=y @@ -139,6 +140,7 @@ CONFIG_PPP_MPPE=y CONFIG_PREEMPT=y CONFIG_PROFILING=y CONFIG_QUOTA=y +CONFIG_RANDOMIZE_BASE=y CONFIG_RTC_CLASS=y CONFIG_RT_GROUP_SCHED=y CONFIG_SECCOMP=y diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg index 3465a848d74da4e4d0945e64bd2c4080f639b77a..3fd0b13488a1cbf9325cf9b4c912e6e97eac1c74 100644 --- a/android/configs/android-recommended.cfg +++ b/android/configs/android-recommended.cfg @@ -1,4 +1,5 @@ # KEEP ALPHABETICALLY SORTED +# CONFIG_AIO is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_LEGACY_PTYS is not set diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 9b0d40093c9a377ff142ffe2241c44b152e41ffd..c0ddbbf73400eb8bf5cd123f6891b35fa604cc6e 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) return __cu_len; } -extern inline long -__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) -{ - if (__access_ok((unsigned long)validate, len, get_fs())) - len = __copy_tofrom_user_nocheck(to, from, len); - return len; -} - #define __copy_to_user(to, from, n) \ ({ \ __chk_user_ptr(to); \ @@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user - extern inline long copy_to_user(void __user *to, const void *from, long n) { - return __copy_tofrom_user((__force void *)to, from, n, to); + if (likely(__access_ok((unsigned long)to, n, get_fs()))) + n = __copy_tofrom_user_nocheck((__force void *)to, from, n); + return n; } extern inline long copy_from_user(void *to, const void __user *from, long n) { - return __copy_tofrom_user(to, (__force void *)from, n, from); + if (likely(__access_ok((unsigned long)from, n, get_fs()))) + n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); + else + memset(to, 0, n); + return n; } extern void __do_clear_user(void); diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index 08e7e2a16ac176a597ceb21c3b0f399b6ad98ea6..a36e8601114d2ca2970f257f9f8d7e5c03ec08a2 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h @@ -22,10 +22,11 @@ static inline void __delay(unsigned long loops) { __asm__ __volatile__( - " lp 1f \n" - " nop \n" - "1: \n" - : "+l"(loops)); + " mov lp_count, %0 \n" + " lp 1f \n" + " nop \n" + "1: \n" + : : "r"(loops)); } extern void __bad_udelay(void); diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 3cab04255ae0a4e9db1599ae348d6f1360acd970..e5fec320f158e51ba6519dd7ee3ef71725943d8c 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -277,8 +277,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) -#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \ - pgprot_val(prot))) +#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) /* diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d1da6032b715a7fea35d71fcedeeeb8cdfe590e4..d4d8df706efa738ae115ddf68b37dad2d8d271c9 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -83,7 +83,10 @@ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ - "3: mov %0, %3\n" \ + "3: # return -EFAULT\n" \ + " mov %0, %3\n" \ + " # zero out dst ptr\n" \ + " mov %1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ @@ -101,7 +104,11 @@ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ - "3: mov %0, %3\n" \ + "3: # return -EFAULT\n" \ + " mov %0, %3\n" \ + " # zero out dst ptr\n" \ + " mov %1, 0\n" \ + " mov %R1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 004b7f0bc76cc58c6988547df1bb8705cd36004d..257b8699efde4f7557b35d8eec2252b7db4903fd 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) struct user_regs_struct uregs; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); - if (!err) - set_current_blocked(&set); - err |= __copy_from_user(&uregs.scratch, &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); + if (err) + return err; + set_current_blocked(&set); regs->bta = uregs.scratch.bta; regs->lp_start = uregs.scratch.lp_start; regs->lp_end = uregs.scratch.lp_end; @@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) regs->r0 = uregs.scratch.r0; regs->sp = uregs.scratch.sp; - return err; + return 0; } static inline int is_do_ss_needed(unsigned int magic) diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index dfad287f1db1c6b55b86faacc0b40d2472636795..dbedc576e4cad870e5682750740bc13aa81c9d92 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -130,14 +130,17 @@ static cycle_t arc_counter_read(struct clocksource *cs) cycle_t full; } stamp; - - __asm__ __volatile( - "1: \n" - " lr %0, [AUX_RTC_LOW] \n" - " lr %1, [AUX_RTC_HIGH] \n" - " lr %2, [AUX_RTC_CTRL] \n" - " bbit0.nt %2, 31, 1b \n" - : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); + /* + * hardware has an internal state machine which tracks readout of + * low/high and updates the CTRL.status if + * - interrupt/exception taken between the two reads + * - high increments after low has been read + */ + do { + stamp.low = read_aux_reg(AUX_RTC_LOW); + stamp.high = read_aux_reg(AUX_RTC_HIGH); + status = read_aux_reg(AUX_RTC_CTRL); + } while (!(status & _BITUL(31))); return stamp.full; } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 56961334bb7e94468f8552f1bab92732699d2367..1886a65eee878ac9167d6a437474ebc54921f09b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -39,6 +39,7 @@ config ARM select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_TRACEHOOK + select HAVE_ARM_SMCCC if CPU_V7 select HAVE_BPF_JIT select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING @@ -1462,8 +1463,7 @@ config BIG_LITTLE config BL_SWITCHER bool "big.LITTLE switcher support" - depends on BIG_LITTLE && MCPM && HOTPLUG_CPU - select ARM_CPU_SUSPEND + depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC select CPU_PM help The big.LITTLE "switcher" provides the core functionality to @@ -1521,7 +1521,7 @@ config HOTPLUG_CPU config ARM_PSCI bool "Support for the ARM Power State Coordination Interface (PSCI)" - depends on CPU_V7 + depends on HAVE_ARM_SMCCC select ARM_PSCI_FW help Say Y here if you want Linux to communicate with system firmware @@ -2227,7 +2227,8 @@ config ARCH_SUSPEND_POSSIBLE def_bool y config ARM_CPU_SUSPEND - def_bool PM_SLEEP + def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW + depends on ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE bool diff --git a/arch/arm/boot/dts/armada-390.dtsi b/arch/arm/boot/dts/armada-390.dtsi index 094e39c660390440501dce7e64dfdda4b8268a37..6cd18d8aaac795606f06862a107c7b06ec700bce 100644 --- a/arch/arm/boot/dts/armada-390.dtsi +++ b/arch/arm/boot/dts/armada-390.dtsi @@ -47,6 +47,8 @@ #include "armada-39x.dtsi" / { + compatible = "marvell,armada390"; + soc { internal-regs { pinctrl@18000 { @@ -54,4 +56,5 @@ reg = <0x18000 0x20>; }; }; + }; }; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 2b6cc8bf3c5cce97349f2385e5dfba1009b5df0a..e6af41c4bbc129cef7dc82b74ed8ef180f5e20ef 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -221,7 +221,7 @@ clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>, <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>, <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>, - <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>, + <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>; clock-names = "core", "rxtx0", "rxtx1", "rxtx2", diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index bfa5edde179c30d38d55cb80dd462dc6c3ee8084..2c1e7f09205f5120cb222deb98ec223b539e5e82 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -113,7 +113,7 @@ partition@e0000 { label = "u-boot environment"; - reg = <0xe0000 0x100000>; + reg = <0xe0000 0x20000>; }; partition@100000 { diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi index a29ad16cc9bbddec399cef41a00106bc636a14a0..64c5af30c1d0df0047bd62024d48d50fe4e218c1 100644 --- a/arch/arm/boot/dts/omap3-overo-base.dtsi +++ b/arch/arm/boot/dts/omap3-overo-base.dtsi @@ -223,7 +223,9 @@ }; &gpmc { - ranges = <0 0 0x00000000 0x20000000>; + ranges = <0 0 0x30000000 0x1000000>, /* CS0 */ + <4 0 0x2b000000 0x1000000>, /* CS4 */ + <5 0 0x2c000000 0x1000000>; /* CS5 */ nand@0,0 { linux,mtd-name= "micron,mt29c4g96maz"; diff --git a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi index 17b82f82638a97e5e6157fc0c9c16d156de962eb..64047788216b5b7e8a2e828d6261e144f69c6b7b 100644 --- a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi @@ -55,8 +55,6 @@ #include "omap-gpmc-smsc9221.dtsi" &gpmc { - ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */ - ethernet@gpmc { reg = <5 0 0xff>; interrupt-parent = <&gpio6>; diff --git a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi index 9e24b6a1d07b8e4d02b6d0704858680015f0069b..1b304e2f1bd2fd4fd5be23dafcd0f364991fd131 100644 --- a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi @@ -27,8 +27,6 @@ #include "omap-gpmc-smsc9221.dtsi" &gpmc { - ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */ - ethernet@gpmc { reg = <5 0 0xff>; interrupt-parent = <&gpio6>; diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi index 334109e14613d6eb3a406f69eb56dce2f9f445c4..82e98ee3023ada85319d96c22da2ac985216b04f 100644 --- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi @@ -15,9 +15,6 @@ #include "omap-gpmc-smsc9221.dtsi" &gpmc { - ranges = <4 0 0x2b000000 0x1000000>, /* CS4 */ - <5 0 0x2c000000 0x1000000>; /* CS5 */ - smsc1: ethernet@gpmc { reg = <5 0 0xff>; interrupt-parent = <&gpio6>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index a4c1762b53ea3712a46e5f8a04cff5783d93b14d..e00d50ef678fa7635b516a3ad07964bc43c27a1a 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -5,6 +5,7 @@ #include #include #include +#include #include / { model = "Qualcomm APQ8064"; @@ -354,22 +355,50 @@ compatible = "qcom,pm8921-gpio"; reg = <0x150>; - interrupts = <192 1>, <193 1>, <194 1>, - <195 1>, <196 1>, <197 1>, - <198 1>, <199 1>, <200 1>, - <201 1>, <202 1>, <203 1>, - <204 1>, <205 1>, <206 1>, - <207 1>, <208 1>, <209 1>, - <210 1>, <211 1>, <212 1>, - <213 1>, <214 1>, <215 1>, - <216 1>, <217 1>, <218 1>, - <219 1>, <220 1>, <221 1>, - <222 1>, <223 1>, <224 1>, - <225 1>, <226 1>, <227 1>, - <228 1>, <229 1>, <230 1>, - <231 1>, <232 1>, <233 1>, - <234 1>, <235 1>; - + interrupts = <192 IRQ_TYPE_NONE>, + <193 IRQ_TYPE_NONE>, + <194 IRQ_TYPE_NONE>, + <195 IRQ_TYPE_NONE>, + <196 IRQ_TYPE_NONE>, + <197 IRQ_TYPE_NONE>, + <198 IRQ_TYPE_NONE>, + <199 IRQ_TYPE_NONE>, + <200 IRQ_TYPE_NONE>, + <201 IRQ_TYPE_NONE>, + <202 IRQ_TYPE_NONE>, + <203 IRQ_TYPE_NONE>, + <204 IRQ_TYPE_NONE>, + <205 IRQ_TYPE_NONE>, + <206 IRQ_TYPE_NONE>, + <207 IRQ_TYPE_NONE>, + <208 IRQ_TYPE_NONE>, + <209 IRQ_TYPE_NONE>, + <210 IRQ_TYPE_NONE>, + <211 IRQ_TYPE_NONE>, + <212 IRQ_TYPE_NONE>, + <213 IRQ_TYPE_NONE>, + <214 IRQ_TYPE_NONE>, + <215 IRQ_TYPE_NONE>, + <216 IRQ_TYPE_NONE>, + <217 IRQ_TYPE_NONE>, + <218 IRQ_TYPE_NONE>, + <219 IRQ_TYPE_NONE>, + <220 IRQ_TYPE_NONE>, + <221 IRQ_TYPE_NONE>, + <222 IRQ_TYPE_NONE>, + <223 IRQ_TYPE_NONE>, + <224 IRQ_TYPE_NONE>, + <225 IRQ_TYPE_NONE>, + <226 IRQ_TYPE_NONE>, + <227 IRQ_TYPE_NONE>, + <228 IRQ_TYPE_NONE>, + <229 IRQ_TYPE_NONE>, + <230 IRQ_TYPE_NONE>, + <231 IRQ_TYPE_NONE>, + <232 IRQ_TYPE_NONE>, + <233 IRQ_TYPE_NONE>, + <234 IRQ_TYPE_NONE>, + <235 IRQ_TYPE_NONE>; gpio-controller; #gpio-cells = <2>; @@ -381,9 +410,18 @@ gpio-controller; #gpio-cells = <2>; interrupts = - <128 1>, <129 1>, <130 1>, <131 1>, - <132 1>, <133 1>, <134 1>, <135 1>, - <136 1>, <137 1>, <138 1>, <139 1>; + <128 IRQ_TYPE_NONE>, + <129 IRQ_TYPE_NONE>, + <130 IRQ_TYPE_NONE>, + <131 IRQ_TYPE_NONE>, + <132 IRQ_TYPE_NONE>, + <133 IRQ_TYPE_NONE>, + <134 IRQ_TYPE_NONE>, + <135 IRQ_TYPE_NONE>, + <136 IRQ_TYPE_NONE>, + <137 IRQ_TYPE_NONE>, + <138 IRQ_TYPE_NONE>, + <139 IRQ_TYPE_NONE>; }; rtc@11d { diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi index b8b0f7e26dd995f3cf0b5d51463714cbf20ce88a..9c4890a84a3704c872fb14f8cfc02314fe39f281 100644 --- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi @@ -445,6 +445,9 @@ "msg-tx-failed", "msg-tx-discarded", "msg-rx-discarded"; + + qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */ + <9000 3000>; /* 9V @ 3A */ }; pm660_adc_tm: vadc@3400 { diff --git a/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi b/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi index 05a8816677edb051f663a86c0f356a3a76324c78..57e8d650569378174475a971d430838d33920f04 100644 --- a/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi @@ -26,7 +26,6 @@ qcom,never-unload-fw; qcom,sw-power-collapse; qcom,max-secure-instances = <5>; - qcom,debug-timeout; qcom,reg-presets = <0x80124 0x00000003>, <0x80550 0x01111111>, diff --git a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-qrd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..1db8503b020055a1ca1fde0de79b9cb8a110b57d --- /dev/null +++ b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-qrd.dtsi @@ -0,0 +1,428 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + led_flash0: qcom,camera-flash { + cell-index = <0>; + compatible = "qcom,camera-flash"; + qcom,flash-source = <&pm660l_flash0 &pm660l_flash1>; + qcom,torch-source = <&pm660l_torch0 &pm660l_torch1>; + qcom,switch-source = <&pm660l_switch0>; + status = "ok"; + }; + + cam_avdd_gpio_regulator: cam_avdd_fixed_regulator { + compatible = "regulator-fixed"; + regulator-name = "cam_vadd_gpio_regulator"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + enable-active-high; + gpio = <&tlmm 51 0>; + vin-supply = <&pm660l_bob>; + }; + + cam_vaf_gpio_regulator: cam_vaf_fixed_regulator { + compatible = "regulator-fixed"; + regulator-name = "cam_vaf_gpio_regulator"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + enable-active-high; + gpio = <&tlmm 50 0>; + vin-supply = <&pm660l_bob>; + }; +}; + +&tlmm { + cam_sensor_rear_active: cam_sensor_rear_active { + /* RESET */ + mux { + pins = "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio46"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_rear_suspend: cam_sensor_rear_suspend { + /* RESET */ + mux { + pins = "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio46"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_rear2_active: cam_sensor_rear2_active { + /* RESET */ + mux { + pins = "gpio48"; + function = "gpio"; + }; + + config { + pins = "gpio48"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_rear2_suspend: cam_sensor_rear2_suspend { + /* RESET */ + mux { + pins = "gpio48"; + function = "gpio"; + }; + + config { + pins = "gpio48"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_front_active: cam_sensor_front_active { + /* RESET */ + mux { + pins = "gpio47"; + function = "gpio"; + }; + + config { + pins = "gpio47"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_front_suspend: cam_sensor_front_suspend { + /* RESET */ + mux { + pins = "gpio47"; + function = "gpio"; + }; + + config { + pins = "gpio47"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; +}; + +&cci { + actuator0: qcom,actuator@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,actuator"; + qcom,cci-master = <0>; + cam_vaf-supply = <&cam_vaf_gpio_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <2800000>; + qcom,cam-vreg-max-voltage = <2800000>; + qcom,cam-vreg-op-mode = <0>; + }; + + actuator1: qcom,actuator@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,actuator"; + qcom,cci-master = <1>; + cam_vaf-supply = <&cam_vaf_gpio_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <2800000>; + qcom,cam-vreg-max-voltage = <2800000>; + qcom,cam-vreg-op-mode = <0>; + }; + + ois0: qcom,ois { + cell-index = <0>; + compatible = "qcom,ois"; + qcom,cci-master = <0>; + cam_vaf-supply = <&cam_vaf_gpio_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <2800000>; + qcom,cam-vreg-max-voltage = <2800000>; + qcom,cam-vreg-op-mode = <0>; + status = "disabled"; + }; + + eeprom0: qcom,eeprom@0 { + cell-index = <0>; + reg = <0>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm660_l11>; + cam_vana-supply = <&cam_avdd_gpio_regulator>; + cam_vdig-supply = <&pm660_s5>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <1780000 0 1350000>; + qcom,cam-vreg-max-voltage = <1950000 0 1350000>; + qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend>; + gpios = <&tlmm 32 0>, + <&tlmm 46 0>, + <&pm660l_gpios 4 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 1>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET0", + "CAM_VDIG"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss MCLK0_CLK_SRC>, + <&clock_mmss MMSS_CAMSS_MCLK0_CLK>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + eeprom1: qcom,eeprom@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm660_l11>; + cam_vana-supply = <&cam_avdd_gpio_regulator>; + cam_vdig-supply = <&pm660_s5>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <1780000 0 1350000>; + qcom,cam-vreg-max-voltage = <1950000 0 1350000>; + qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 34 0>, + <&tlmm 48 0>, + <&pm660l_gpios 4 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 1>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1", + "CAM_VDIG"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss MCLK2_CLK_SRC>, + <&clock_mmss MMSS_CAMSS_MCLK2_CLK>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + eeprom2: qcom,eeprom@2 { + cell-index = <2>; + reg = <0x2>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm660_l11>; + cam_vana-supply = <&cam_avdd_gpio_regulator>; + cam_vdig-supply = <&pm660_s5>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <1780000 0 1352000>; + qcom,cam-vreg-max-voltage = <1950000 0 1352000>; + qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 33 0>, + <&tlmm 47 0>, + <&pm660l_gpios 3 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2", + "CAM_VDIG"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss MCLK1_CLK_SRC>, + <&clock_mmss MMSS_CAMSS_MCLK1_CLK>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@0 { + cell-index = <0>; + compatible = "qcom,camera"; + reg = <0x0>; + qcom,special-support-sensors = "imx362_gt24c64a"; + qcom,csiphy-sd-index = <0>; + qcom,csid-sd-index = <0>; + qcom,mount-angle = <270>; + qcom,led-flash-src = <&led_flash0>; + qcom,actuator-src = <&actuator0>; + qcom,ois-src = <&ois0>; + qcom,eeprom-src = <&eeprom0>; + cam_vio-supply = <&pm660_l11>; + cam_vana-supply = <&cam_avdd_gpio_regulator>; + cam_vdig-supply = <&pm660_s5>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <1780000 0 1350000>; + qcom,cam-vreg-max-voltage = <1950000 0 1350000>; + qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend>; + gpios = <&tlmm 32 0>, + <&tlmm 46 0>, + <&pm660l_gpios 4 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 1>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET0", + "CAM_VDIG"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss MCLK0_CLK_SRC>, + <&clock_mmss MMSS_CAMSS_MCLK0_CLK>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@1 { + cell-index = <1>; + compatible = "qcom,camera"; + reg = <0x1>; + qcom,csiphy-sd-index = <1>; + qcom,csid-sd-index = <1>; + qcom,mount-angle = <270>; + qcom,actuator-src = <&actuator1>; + qcom,eeprom-src = <&eeprom1>; + cam_vio-supply = <&pm660_l11>; + cam_vana-supply = <&cam_avdd_gpio_regulator>; + cam_vdig-supply = <&pm660_s5>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <1780000 0 1350000>; + qcom,cam-vreg-max-voltage = <1950000 0 1350000>; + qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 34 0>, + <&tlmm 48 0>, + <&pm660l_gpios 4 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 1>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1", + "CAM_VDIG"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss MCLK2_CLK_SRC>, + <&clock_mmss MMSS_CAMSS_MCLK2_CLK>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@2 { + cell-index = <2>; + compatible = "qcom,camera"; + reg = <0x02>; + qcom,csiphy-sd-index = <2>; + qcom,csid-sd-index = <2>; + qcom,mount-angle = <270>; + qcom,eeprom-src = <&eeprom2>; + cam_vio-supply = <&pm660_l11>; + cam_vana-supply = <&cam_avdd_gpio_regulator>; + cam_vdig-supply = <&pm660_s5>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <1780000 0 1350000>; + qcom,cam-vreg-max-voltage = <1950000 0 1350000>; + qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 33 0>, + <&tlmm 47 0>, + <&pm660l_gpios 3 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2", + "CAM_VDIG"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss MCLK1_CLK_SRC>, + <&clock_mmss MMSS_CAMSS_MCLK1_CLK>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; +}; + +&pm660l_gpios { + gpio@c300 { /* GPIO4 -CAMERA SENSOR 0 VDIG*/ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <0>; /* VIN1 GPIO_LV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "ok"; + }; + + gpio@c200 { /* GPIO3 -CAMERA SENSOR 2 VDIG*/ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <0>; /* VIN1 GPIO_LV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "ok"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi index 4b33381a5449253a22e608c32d5f2da5f3da8c78..a5e33514976fc952cbb74a55cd713269e1dbed95 100644 --- a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi @@ -46,6 +46,38 @@ status = "ok"; }; +&pm660_gpios { + /* GPIO 4 (NFC_CLK_REQ) */ + gpio@c300 { + qcom,mode = <0>; + qcom,vin-sel = <1>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; +}; + +&i2c_6 { /* BLSP1 QUP6 (NFC) */ + status = "okay"; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 28 0x00>; + qcom,nq-ven = <&tlmm 29 0x00>; + qcom,nq-firm = <&tlmm 30 0x00>; + qcom,nq-clkreq = <&pm660_gpios 4 0x00>; + qcom,nq-esepwr = <&tlmm 31 0x00>; + interrupt-parent = <&tlmm>; + qcom,clk-src = "BBCLK3"; + interrupts = <28 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; + clocks = <&clock_rpmcc RPM_LN_BB_CLK3_PIN>; + clock-names = "ref_clk"; + }; +}; &soc { qcom,msm-ssc-sensors { compatible = "qcom,msm-ssc-sensors"; diff --git a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi index d13e1dbfe82420b5abbe94eafbea8f1249324189..be36f1b8a6cb48ddcb4fc356581bb8f9bf983b65 100644 --- a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi @@ -47,6 +47,39 @@ status = "ok"; }; +&pm660_gpios { + /* GPIO 4 (NFC_CLK_REQ) */ + gpio@c300 { + qcom,mode = <0>; + qcom,vin-sel = <1>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; +}; + +&i2c_6 { /* BLSP1 QUP6 (NFC) */ + status = "okay"; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 28 0x00>; + qcom,nq-ven = <&tlmm 29 0x00>; + qcom,nq-firm = <&tlmm 30 0x00>; + qcom,nq-clkreq = <&pm660_gpios 4 0x00>; + qcom,nq-esepwr = <&tlmm 31 0x00>; + interrupt-parent = <&tlmm>; + qcom,clk-src = "BBCLK3"; + interrupts = <28 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; + clocks = <&clock_rpmcc RPM_LN_BB_CLK3_PIN>; + clock-names = "ref_clk"; + }; +}; + &mem_client_3_size { qcom,peripheral-size = <0x500000>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi index 52df459cedfbf50e331eee4d17ac6532dca5229c..8763f57f5ae89f85e88969050236a8ad0b0250d8 100644 --- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi @@ -12,6 +12,125 @@ #include "msm-pm660a.dtsi" #include "sdm660-pinctrl.dtsi" +#include "sdm630-camera-sensor-qrd.dtsi" + +&i2c_2 { + status = "okay"; + smb138x: qcom,smb138x@8 { + compatible = "qcom,i2c-pmic"; + reg = <0x8>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&tlmm>; + interrupts = <21 0x0>; + interrupt_names = "smb138x"; + interrupt-controller; + #interrupt-cells = <3>; + qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>; + + smb138x_revid: qcom,revid@100 { + compatible = "qcom,qpnp-revid"; + reg = <0x100 0x100>; + }; + + smb138x_tadc: qcom,tadc@3600 { + compatible = "qcom,tadc"; + reg = <0x3600 0x100>; + #address-cells = <1>; + #size-cells = <0>; + #io-channel-cells = <1>; + interrupt-parent = <&smb138x>; + interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "eoc"; + + batt_temp@0 { + reg = <0>; + qcom,rbias = <68100>; + qcom,rtherm-at-25degc = <68000>; + qcom,beta-coefficient = <3450>; + }; + + skin_temp@1 { + reg = <1>; + qcom,rbias = <33000>; + qcom,rtherm-at-25degc = <68000>; + qcom,beta-coefficient = <3450>; + }; + + die_temp@2 { + reg = <2>; + qcom,scale = <(-1306)>; + qcom,offset = <397904>; + }; + + batt_i@3 { + reg = <3>; + qcom,channel = <3>; + qcom,scale = <(-20000000)>; + }; + + batt_v@4 { + reg = <4>; + qcom,scale = <5000000>; + }; + + input_i@5 { + reg = <5>; + qcom,scale = <14285714>; + }; + + input_v@6 { + reg = <6>; + qcom,scale = <25000000>; + }; + + otg_i@7 { + reg = <7>; + qcom,scale = <5714286>; + }; + }; + + smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 { + compatible = "qcom,smb138x-parallel-slave"; + qcom,pmic-revid = <&smb138x_revid>; + reg = <0x1000 0x700>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&smb138x>; + io-channels = + <&smb138x_tadc 1>, + <&smb138x_tadc 2>, + <&smb138x_tadc 3>, + <&smb138x_tadc 14>, + <&smb138x_tadc 15>, + <&smb138x_tadc 16>, + <&smb138x_tadc 17>; + io-channel-names = + "connector_temp", + "charger_temp", + "batt_i", + "connector_temp_thr1", + "connector_temp_thr2", + "connector_temp_thr3", + "charger_temp_max"; + + qcom,chgr@1000 { + reg = <0x1000 0x100>; + interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "chg-state-change"; + }; + + qcom,chgr-misc@1600 { + reg = <0x1600 0x100>; + interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>, + <0x16 0x6 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog-bark", + "temperature-change"; + }; + }; + }; +}; + / { qrd_batterydata: qcom,battery-data { qcom,batt-id-range-pct = <15>; @@ -133,6 +252,37 @@ qcom,src-sel = <0>; qcom,out-strength = <1>; }; + + /* GPIO 4 (NFC_CLK_REQ) */ + gpio@c300 { + qcom,mode = <0>; + qcom,vin-sel = <1>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; +}; + +&i2c_6 { /* BLSP1 QUP6 (NFC) */ + status = "okay"; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 28 0x00>; + qcom,nq-ven = <&tlmm 29 0x00>; + qcom,nq-firm = <&tlmm 30 0x00>; + qcom,nq-clkreq = <&pm660_gpios 4 0x00>; + qcom,nq-esepwr = <&tlmm 31 0x00>; + interrupt-parent = <&tlmm>; + qcom,clk-src = "BBCLK3"; + interrupts = <28 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; + clocks = <&clock_rpmcc RPM_LN_BB_CLK3_PIN>; + clock-names = "ref_clk"; + }; }; &soc { diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi index f44d59e021d2be749de5e9c0deccf99f2968e0ca..0425a338c51dc263cf50ec5aff825ec8885ac589 100644 --- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi @@ -145,7 +145,7 @@ cell-index = <1>; reg = <0x1>; compatible = "qcom,actuator"; - qcom,cci-master = <0>; + qcom,cci-master = <1>; cam_vaf-supply = <&cam_vaf_gpio_regulator>; qcom,cam-vreg-name = "cam_vaf"; qcom,cam-vreg-min-voltage = <2800000>; diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi index d4461395891a019656b35d348fc33bc490b48454..a9de658a064dc1d449b7bc3d0d86fb019355562f 100644 --- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi @@ -474,16 +474,34 @@ qcom,msm-bus,name = "sdhc1"; qcom,msm-bus,num-cases = <9>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */ - <78 512 1046 3200>, /* 400 KB/s*/ - <78 512 52286 160000>, /* 20 MB/s */ - <78 512 65360 200000>, /* 25 MB/s */ - <78 512 130718 400000>, /* 50 MB/s */ - <78 512 130718 400000>, /* 100 MB/s */ - <78 512 261438 800000>, /* 200 MB/s */ - <78 512 261438 800000>, /* 400 MB/s */ - <78 512 1338562 4096000>; /* Max. bandwidth */ + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <78 512 0 0>, <1 606 0 0>, + /* 400 KB/s*/ + <78 512 1046 3200>, + <1 606 1046 3200>, + /* 20 MB/s */ + <78 512 52286 160000>, + <1 606 52286 160000>, + /* 25 MB/s */ + <78 512 65360 200000>, + <1 606 65360 200000>, + /* 50 MB/s */ + <78 512 130718 400000>, + <1 606 130718 400000>, + /* 100 MB/s */ + <78 512 130718 400000>, + <1 606 130718 400000>, + /* 200 MB/s */ + <78 512 261438 800000>, + <1 606 261438 800000>, + /* 400 MB/s */ + <78 512 261438 800000>, + <1 606 261438 800000>, + /* Max. bandwidth */ + <78 512 1338562 4096000>, + <1 606 1338562 4096000>; qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 400000000 4294967295>; @@ -516,15 +534,31 @@ qcom,msm-bus,name = "sdhc2"; qcom,msm-bus,num-cases = <8>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */ - <81 512 1046 3200>, /* 400 KB/s */ - <81 512 52286 160000>, /* 20 MB/s */ - <81 512 65360 200000>, /* 25 MB/s */ - <81 512 130718 400000>, /* 50 MB/s */ - <81 512 261438 800000>, /* 100 MB/s */ - <81 512 261438 800000>, /* 200 MB/s */ - <81 512 1338562 4096000>; /* Max. bandwidth */ + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <81 512 0 0>, <1 608 0 0>, + /* 400 KB/s */ + <81 512 1046 3200>, + <1 608 1046 3200>, + /* 20 MB/s */ + <81 512 52286 160000>, + <1 608 52286 160000>, + /* 25 MB/s */ + <81 512 65360 200000>, + <1 608 65360 200000>, + /* 50 MB/s */ + <81 512 130718 400000>, + <1 608 130718 400000>, + /* 100 MB/s */ + <81 512 261438 800000>, + <1 608 261438 800000>, + /* 200 MB/s */ + <81 512 261438 800000>, + <1 608 261438 800000>, + /* Max. bandwidth */ + <81 512 1338562 4096000>, + <1 608 1338562 4096000>; qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>; diff --git a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi index c6e2f431a7c97840739962269f50be41d0c04d80..57565134788c6bdedbd98a93c186a87b9a6ee3db 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi @@ -404,13 +404,14 @@ nfc_enable_active: nfc_enable_active { /* active state */ mux { - /* 29: NFC ENABLE 31:ESE Enable */ - pins = "gpio29", "gpio31"; + /* 29: NFC ENABLE 30:FW DNLD */ + /* 31:ESE Enable */ + pins = "gpio29", "gpio30", "gpio31"; function = "gpio"; }; config { - pins = "gpio29", "gpio31"; + pins = "gpio29", "gpio30", "gpio31"; drive-strength = <2>; /* 2 MA */ bias-pull-up; }; @@ -419,13 +420,14 @@ nfc_enable_suspend: nfc_enable_suspend { /* sleep state */ mux { - /* 29: NFC ENABLE 31:ESE Enable */ - pins = "gpio29", "gpio31"; + /* 29: NFC ENABLE 30:FW DNLD */ + /* 31:ESE Enable */ + pins = "gpio29", "gpio30", "gpio31"; function = "gpio"; }; config { - pins = "gpio29", "gpio31"; + pins = "gpio29", "gpio30", "gpio31"; drive-strength = <2>; /* 2 MA */ bias-disable; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi index b2f6ac722dfc331afd93fae3828e44062f8a0021..65741e0df3ba2bd4333d21e430393ce8b8d6b139 100644 --- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi @@ -235,7 +235,6 @@ &pm660_fg { qcom,battery-data = <&qrd_batterydata>; qcom,fg-jeita-thresholds = <0 5 55 55>; - qcom,fg-cutoff-voltage = <3700>; }; &i2c_2 { diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi index 2d44c69ea8275f0e1139a06aca82dd8526a9b462..c144492895947e14b9dbea17114b016f0eee7264 100644 --- a/arch/arm/boot/dts/qcom/sdm660.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660.dtsi @@ -1329,16 +1329,34 @@ qcom,msm-bus,name = "sdhc1"; qcom,msm-bus,num-cases = <9>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */ - <78 512 1046 3200>, /* 400 KB/s*/ - <78 512 52286 160000>, /* 20 MB/s */ - <78 512 65360 200000>, /* 25 MB/s */ - <78 512 130718 400000>, /* 50 MB/s */ - <78 512 130718 400000>, /* 100 MB/s */ - <78 512 261438 800000>, /* 200 MB/s */ - <78 512 261438 800000>, /* 400 MB/s */ - <78 512 1338562 4096000>; /* Max. bandwidth */ + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <78 512 0 0>, <1 606 0 0>, + /* 400 KB/s*/ + <78 512 1046 3200>, + <1 606 1046 3200>, + /* 20 MB/s */ + <78 512 52286 160000>, + <1 606 52286 160000>, + /* 25 MB/s */ + <78 512 65360 200000>, + <1 606 65360 200000>, + /* 50 MB/s */ + <78 512 130718 400000>, + <1 606 130718 400000>, + /* 100 MB/s */ + <78 512 130718 400000>, + <1 606 130718 400000>, + /* 200 MB/s */ + <78 512 261438 800000>, + <1 606 261438 800000>, + /* 400 MB/s */ + <78 512 261438 800000>, + <1 606 261438 800000>, + /* Max. bandwidth */ + <78 512 1338562 4096000>, + <1 606 1338562 4096000>; qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 400000000 4294967295>; @@ -1364,15 +1382,31 @@ qcom,msm-bus,name = "sdhc2"; qcom,msm-bus,num-cases = <8>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */ - <81 512 1046 3200>, /* 400 KB/s */ - <81 512 52286 160000>, /* 20 MB/s */ - <81 512 65360 200000>, /* 25 MB/s */ - <81 512 130718 400000>, /* 50 MB/s */ - <81 512 261438 800000>, /* 100 MB/s */ - <81 512 261438 800000>, /* 200 MB/s */ - <81 512 1338562 4096000>; /* Max. bandwidth */ + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <81 512 0 0>, <1 608 0 0>, + /* 400 KB/s */ + <81 512 1046 3200>, + <1 608 1046 3200>, + /* 20 MB/s */ + <81 512 52286 160000>, + <1 608 52286 160000>, + /* 25 MB/s */ + <81 512 65360 200000>, + <1 608 65360 200000>, + /* 50 MB/s */ + <81 512 130718 400000>, + <1 608 130718 400000>, + /* 100 MB/s */ + <81 512 261438 800000>, + <1 608 261438 800000>, + /* 200 MB/s */ + <81 512 261438 800000>, + <1 608 261438 800000>, + /* Max. bandwidth */ + <81 512 1338562 4096000>, + <1 608 1338562 4096000>; qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>; diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi index 81f81214cdf9580a0cb19ce01d8bfc5e01417252..bbf95375cf99876cf5a465b955046aca4419076d 100644 --- a/arch/arm/boot/dts/stih407-family.dtsi +++ b/arch/arm/boot/dts/stih407-family.dtsi @@ -497,8 +497,9 @@ interrupt-names = "mmcirq"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mmc0>; - clock-names = "mmc"; - clocks = <&clk_s_c0_flexgen CLK_MMC_0>; + clock-names = "mmc", "icn"; + clocks = <&clk_s_c0_flexgen CLK_MMC_0>, + <&clk_s_c0_flexgen CLK_RX_ICN_HVA>; bus-width = <8>; non-removable; }; @@ -512,8 +513,9 @@ interrupt-names = "mmcirq"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sd1>; - clock-names = "mmc"; - clocks = <&clk_s_c0_flexgen CLK_MMC_1>; + clock-names = "mmc", "icn"; + clocks = <&clk_s_c0_flexgen CLK_MMC_1>, + <&clk_s_c0_flexgen CLK_RX_ICN_HVA>; resets = <&softreset STIH407_MMC1_SOFTRESET>; bus-width = <4>; }; diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 18ed1ad10d32bbb251746e3011c541f9cfcbc861..40318869c733668a15e3d318ff291c3221336125 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi @@ -41,7 +41,8 @@ compatible = "st,st-ohci-300x"; reg = <0x9a03c00 0x100>; interrupts = ; - clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; + clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>, + <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>; resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>, <&softreset STIH407_USB2_PORT0_SOFTRESET>; reset-names = "power", "softreset"; @@ -57,7 +58,8 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb0>; - clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; + clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>, + <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>; resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>, <&softreset STIH407_USB2_PORT0_SOFTRESET>; reset-names = "power", "softreset"; @@ -71,7 +73,8 @@ compatible = "st,st-ohci-300x"; reg = <0x9a83c00 0x100>; interrupts = ; - clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; + clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>, + <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>; resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>, <&softreset STIH407_USB2_PORT1_SOFTRESET>; reset-names = "power", "softreset"; @@ -87,7 +90,8 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb1>; - clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; + clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>, + <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>; resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>, <&softreset STIH407_USB2_PORT1_SOFTRESET>; reset-names = "power", "softreset"; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index d910d3a6c41c573c83e6c20898cabf072a79ae9b..84bdba480d5af40c9df19899a6247bdf0ea19b9e 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -83,7 +83,7 @@ trips { cpu_alert0: cpu_alert0 { /* milliCelsius */ - temperature = <850000>; + temperature = <85000>; hysteresis = <2000>; type = "passive"; }; diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 3d224941b5419e88a25aae2ae70632dba047606c..a3a9ad4dc3c66719413dc72ec07425023f48b800 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -869,9 +869,9 @@ struct sa1111_save_data { #ifdef CONFIG_PM -static int sa1111_suspend(struct platform_device *dev, pm_message_t state) +static int sa1111_suspend_noirq(struct device *dev) { - struct sa1111 *sachip = platform_get_drvdata(dev); + struct sa1111 *sachip = dev_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags; unsigned int val; @@ -934,9 +934,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state) * restored by their respective drivers, and must be called * via LDM after this function. */ -static int sa1111_resume(struct platform_device *dev) +static int sa1111_resume_noirq(struct device *dev) { - struct sa1111 *sachip = platform_get_drvdata(dev); + struct sa1111 *sachip = dev_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags, id; void __iomem *base; @@ -952,7 +952,7 @@ static int sa1111_resume(struct platform_device *dev) id = sa1111_readl(sachip->base + SA1111_SKID); if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { __sa1111_remove(sachip); - platform_set_drvdata(dev, NULL); + dev_set_drvdata(dev, NULL); kfree(save); return 0; } @@ -1003,8 +1003,8 @@ static int sa1111_resume(struct platform_device *dev) } #else -#define sa1111_suspend NULL -#define sa1111_resume NULL +#define sa1111_suspend_noirq NULL +#define sa1111_resume_noirq NULL #endif static int sa1111_probe(struct platform_device *pdev) @@ -1038,6 +1038,11 @@ static int sa1111_remove(struct platform_device *pdev) return 0; } +static struct dev_pm_ops sa1111_pm_ops = { + .suspend_noirq = sa1111_suspend_noirq, + .resume_noirq = sa1111_resume_noirq, +}; + /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at @@ -1050,10 +1055,9 @@ static int sa1111_remove(struct platform_device *pdev) static struct platform_driver sa1111_device_driver = { .probe = sa1111_probe, .remove = sa1111_remove, - .suspend = sa1111_suspend, - .resume = sa1111_resume, .driver = { .name = "sa1111", + .pm = &sa1111_pm_ops, }, }; diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..35a90af941a4fbb9193cba380f3ab4ed89d659e3 --- /dev/null +++ b/arch/arm/configs/ranchu_defconfig @@ -0,0 +1,315 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_ARCH_MMAP_RND_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_VIRT=y +CONFIG_ARM_KERNMEM_PERMS=y +CONFIG_SMP=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_VFP=y +CONFIG_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_VIRTUALIZATION=y diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig index a6a831aba9dcb1938af2babc79b39da4ec1ba5e1..ba22d7864d1c239e4bf32472e3f3220ab80030a9 100644 --- a/arch/arm/configs/sdm660_defconfig +++ b/arch/arm/configs/sdm660_defconfig @@ -233,6 +233,7 @@ CONFIG_NFC_NQ=y CONFIG_IPC_ROUTER=y CONFIG_IPC_ROUTER_SECURITY=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index b445a5d56f4342b71540e40c1ce298e0ad786688..593da7ffb449de02acf399a922ea1105225b7afc 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } - if (nbytes) { + if (walk.nbytes % AES_BLOCK_SIZE) { u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; u8 __aligned(8) tail[AES_BLOCK_SIZE]; diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 03a39fe2924648f91c45224c84d1a883e433a6dc..9d9ba9acdddcd3622131270679a7f72e2fa0901a 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -226,6 +226,27 @@ static int ghash_async_digest(struct ahash_request *req) } } +static int ghash_async_import(struct ahash_request *req, const void *in) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + + desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); + desc->flags = req->base.flags; + + return crypto_shash_import(desc, in); +} + +static int ghash_async_export(struct ahash_request *req, void *out) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + + return crypto_shash_export(desc, out); +} + static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -274,7 +295,10 @@ static struct ahash_alg ghash_async_alg = { .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, + .import = ghash_async_import, + .export = ghash_async_export, .halg.digestsize = GHASH_DIGEST_SIZE, + .halg.statesize = sizeof(struct ghash_desc_ctx), .halg.base = { .cra_name = "ghash", .cra_driver_name = "ghash-ce", diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h index 0f842492490280b9d0b50fd1b25935a51b5a4c9d..3848259bebf85786d39d4212d74f2e1646b98a44 100644 --- a/arch/arm/include/asm/cpuidle.h +++ b/arch/arm/include/asm/cpuidle.h @@ -30,7 +30,7 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct device_node; struct cpuidle_ops { - int (*suspend)(int cpu, unsigned long arg); + int (*suspend)(unsigned long arg); int (*init)(struct device_node *, int cpu); }; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 05ff03aead43d1157031c5736210a90f2ac36370..c8bfa1aabd6af469576aec79d665ef8ddec51412 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -120,7 +120,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) /* The ARM override for dma_max_pfn() */ static inline unsigned long dma_max_pfn(struct device *dev) { - return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); + return dma_to_pfn(dev, *dev->dma_mask); } #define dma_max_pfn(dev) dma_max_pfn(dev) diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index f4882553fbb0125927776c0f3a032625937649c6..85a34cc8316ace1b906347fc1de1e79adbd0cb02 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -17,7 +17,7 @@ #define fd_outb(val,port) \ do { \ - if ((port) == FD_DOR) \ + if ((port) == (u32)FD_DOR) \ fd_setdor((val)); \ else \ outb((val),(port)); \ diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index dc641ddf0784304434635080b5732ee215ab588f..e22089fb44dc86b7ed2fdb175bc6ec7b47ee4001 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -19,6 +19,7 @@ #ifndef __ARM_KVM_ARM_H__ #define __ARM_KVM_ARM_H__ +#include #include /* Hyp Configuration Register (HCR) bits */ @@ -132,10 +133,9 @@ * space. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) -#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) +#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) /* Virtualization Translation Control Register (VTCR) bits */ #define VTCR_SH0 (3 << 12) @@ -162,17 +162,17 @@ #define VTTBR_X (5 - KVM_T0SZ) #endif #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT _AC(48, ULL) +#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) /* Hyp Syndrome Register (HSR) bits */ #define HSR_EC_SHIFT (26) -#define HSR_EC (0x3fU << HSR_EC_SHIFT) -#define HSR_IL (1U << 25) +#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT) +#define HSR_IL (_AC(1, UL) << 25) #define HSR_ISS (HSR_IL - 1) #define HSR_ISV_SHIFT (24) -#define HSR_ISV (1U << HSR_ISV_SHIFT) +#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT) #define HSR_SRT_SHIFT (16) #define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) #define HSR_FSC (0x3f) @@ -180,9 +180,9 @@ #define HSR_SSE (1 << 21) #define HSR_WNR (1 << 6) #define HSR_CV_SHIFT (24) -#define HSR_CV (1U << HSR_CV_SHIFT) +#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT) #define HSR_COND_SHIFT (20) -#define HSR_COND (0xfU << HSR_COND_SHIFT) +#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT) #define FSC_FAULT (0x04) #define FSC_ACCESS (0x08) @@ -210,13 +210,13 @@ #define HSR_EC_DABT (0x24) #define HSR_EC_DABT_HYP (0x25) -#define HSR_WFI_IS_WFE (1U << 0) +#define HSR_WFI_IS_WFE (_AC(1, UL) << 0) -#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1) -#define HSR_DABT_S1PTW (1U << 7) -#define HSR_DABT_CM (1U << 8) -#define HSR_DABT_EA (1U << 9) +#define HSR_DABT_S1PTW (_AC(1, UL) << 7) +#define HSR_DABT_CM (_AC(1, UL) << 8) +#define HSR_DABT_EA (_AC(1, UL) << 9) #define kvm_arm_exception_type \ {0, "RESET" }, \ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 6692982c9b575db476bc12a37e2d69b27c00d9fb..bedaf65c0ff96fa3f7d13938f59ed1faa9c94033 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -214,6 +214,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + +static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t phys_idmap_start) +{ + /* + * TODO + * kvm_call_reset(boot_pgd_ptr, phys_idmap_start); + */ +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; @@ -226,7 +239,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 405aa18833073b88b52103363b54d534d9c69263..c7ba9a42e85720dcbf78dae2ef3698f8e7b0c851 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -66,6 +66,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); @@ -279,6 +280,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, pgd_t *merged_hyp_pgd, unsigned long hyp_idmap_start) { } +static inline unsigned int kvm_get_vmid_bits(void) +{ + return 8; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index fd929b5ded9e2eb1cb313af62451d0efdd4e2ede..9459ca85bd20c6301a490d2ff9a96446abd550c2 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -250,6 +250,7 @@ PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING); PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY); PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY); +PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY); PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 51622ba7c4a66a91494ecc49be4520fd4b14df88..d3c0c23703b6f90cd2863323462dfbb8da9f13e6 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -121,7 +121,6 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) extern int regs_query_register_offset(const char *name); -extern const char *regs_query_register_name(unsigned int offset); extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 4371f45c578401c7f233e565dbb5fd36d9d59d52..d4ceaf5f299b8d03d6e638a3ea5f72fb5b60a93e 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void) { return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } + +static inline bool is_kernel_in_hyp_mode(void) +{ + return false; +} + +/* The section containing the hypervisor text */ +extern char __hyp_text_start[]; +extern char __hyp_text_end[]; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index af9e59bf3831b9fd648d095c2070209b1e97677d..80856def246518e05015591f73d3bd0b4993bf8b 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -88,8 +88,9 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif +obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o + extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f89811fb9a55f3a490c3633ef99ef52745c58129..7e45f69a0ddc9d0ef3b04c145195cf1b4ad7d84b 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc); EXPORT_SYMBOL(__pv_phys_pfn_offset); EXPORT_SYMBOL(__pv_offset); #endif + +#ifdef CONFIG_HAVE_ARM_SMCCC +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); +#endif diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c index 318da33465f413f8c207eb2dd9b3766e53ab5ad3..703926e7007b4e0000b689006722780a02588e04 100644 --- a/arch/arm/kernel/cpuidle.c +++ b/arch/arm/kernel/cpuidle.c @@ -56,7 +56,7 @@ int arm_cpuidle_suspend(int index) int cpu = smp_processor_id(); if (cpuidle_ops[cpu].suspend) - ret = cpuidle_ops[cpu].suspend(cpu, index); + ret = cpuidle_ops[cpu].suspend(index); return ret; } diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index e94422d0405c8669f92b90115839207434f7c421..da3cafbd682b8e6f418da379c2a80898af3f89fd 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -87,6 +87,7 @@ void __init arm_dt_init_cpu_maps(void) return; for_each_child_of_node(cpus, cpu) { + int prop_bytes; u32 hwid; const __be32 *cell; @@ -107,10 +108,15 @@ void __init arm_dt_init_cpu_maps(void) } hwid = of_read_number(cell, of_n_addr_cells(cpu)); /* - * 8 MSBs must be set to 0 in the DT since the reg property + * Bits n:24 must be set to 0 in the DT since the reg property * defines the MPIDR[23:0]. */ - if (hwid & ~MPIDR_HWID_BITMASK) { + do { + hwid = be32_to_cpu(*cell++); + prop_bytes -= sizeof(*cell); + } while (!hwid && prop_bytes > 0); + + if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) { of_node_put(cpu); return; } diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S deleted file mode 100644 index a78e9e1e206dee03c7f16e67a30eb9a74159e9ab..0000000000000000000000000000000000000000 --- a/arch/arm/kernel/psci-call.S +++ /dev/null @@ -1,31 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2015 ARM Limited - * - * Author: Mark Rutland - */ - -#include - -#include -#include - -/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ -ENTRY(__invoke_psci_fn_hvc) - __HVC(0) - bx lr -ENDPROC(__invoke_psci_fn_hvc) - -/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ -ENTRY(__invoke_psci_fn_smc) - __SMC(0) - bx lr -ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S new file mode 100644 index 0000000000000000000000000000000000000000..2e48b674aab190563d321902be9919167d8f80cd --- /dev/null +++ b/arch/arm/kernel/smccc-call.S @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#include +#include +#include + + /* + * Wrap c macros in asm macros to delay expansion until after the + * SMCCC asm macro is expanded. + */ + .macro SMCCC_SMC + __SMC(0) + .endm + + .macro SMCCC_HVC + __HVC(0) + .endm + + .macro SMCCC instr +UNWIND( .fnstart) + mov r12, sp + push {r4-r7} +UNWIND( .save {r4-r7}) + ldm r12, {r4-r7} + \instr + pop {r4-r7} + ldr r12, [sp, #(4 * 4)] + stm r12, {r0-r3} + bx lr +UNWIND( .fnend) + .endm + +/* + * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_smc) + SMCCC SMCCC_SMC +ENDPROC(arm_smccc_smc) + +/* + * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_hvc) + SMCCC SMCCC_HVC +ENDPROC(arm_smccc_hvc) diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 70e6d557c75f696e537fe2a48bb4cc55721a6f8c..4cddf20cdb824394d5368bb5cd10a4eadff2d06c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -16,7 +16,6 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include #include #include #include @@ -44,6 +43,7 @@ #include #include #include +#include #ifdef REQUIRES_VIRT __asm__(".arch_extension virt"); @@ -58,9 +58,14 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); /* The VMID used in the VTTBR */ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); -static u8 kvm_next_vmid; +static u32 kvm_next_vmid; +static unsigned int kvm_vmid_bits __read_mostly; static DEFINE_SPINLOCK(kvm_vmid_lock); +static bool vgic_present; + +static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); + static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) { BUG_ON(preemptible()); @@ -85,11 +90,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) return &kvm_arm_running_vcpu; } -int kvm_arch_hardware_enable(void) -{ - return 0; -} - int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; @@ -132,7 +132,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.vmid_gen = 0; /* The maximum number of VCPUs is limited by the host's GIC model */ - kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus(); + kvm->arch.max_vcpus = vgic_present ? + kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; return ret; out_free_stage2_pgd: @@ -155,8 +156,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { int i; - kvm_free_stage2_pgd(kvm); - for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_arch_vcpu_free(kvm->vcpus[i]); @@ -172,6 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int r; switch (ext) { case KVM_CAP_IRQCHIP: + r = vgic_present; + break; case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_USER_MEMORY: @@ -433,11 +434,12 @@ static void update_vttbr(struct kvm *kvm) kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; + kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); - vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); kvm->arch.vttbr = pgd_phys | vmid; spin_unlock(&kvm_vmid_lock); @@ -913,6 +915,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, switch (dev_id) { case KVM_ARM_DEVICE_VGIC_V2: + if (!vgic_present) + return -ENXIO; return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); default: return -ENODEV; @@ -927,6 +931,8 @@ long kvm_arch_vm_ioctl(struct file *filp, switch (ioctl) { case KVM_CREATE_IRQCHIP: { + if (!vgic_present) + return -ENXIO; return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); } case KVM_ARM_SET_DEVICE_ADDR: { @@ -972,40 +978,96 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector); __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + __cpu_init_stage2(); kvm_arm_init_debug(); } -static int hyp_init_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) +static void cpu_hyp_reinit(void) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: + if (is_kernel_in_hyp_mode()) { + /* + * __cpu_init_stage2() is safe to call even if the PM + * event was cancelled before the CPU was reset. + */ + __cpu_init_stage2(); + } else { if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); - break; } +} + +static void cpu_hyp_reset(void) +{ + phys_addr_t boot_pgd_ptr; + phys_addr_t phys_idmap_start; + + if (!is_kernel_in_hyp_mode()) { + boot_pgd_ptr = kvm_mmu_get_boot_httbr(); + phys_idmap_start = kvm_get_idmap_start(); - return NOTIFY_OK; + __cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start); + } } -static struct notifier_block hyp_init_cpu_nb = { - .notifier_call = hyp_init_cpu_notify, -}; +static void _kvm_arch_hardware_enable(void *discard) +{ + if (!__this_cpu_read(kvm_arm_hardware_enabled)) { + cpu_hyp_reinit(); + __this_cpu_write(kvm_arm_hardware_enabled, 1); + } +} + +int kvm_arch_hardware_enable(void) +{ + _kvm_arch_hardware_enable(NULL); + return 0; +} + +static void _kvm_arch_hardware_disable(void *discard) +{ + if (__this_cpu_read(kvm_arm_hardware_enabled)) { + cpu_hyp_reset(); + __this_cpu_write(kvm_arm_hardware_enabled, 0); + } +} + +void kvm_arch_hardware_disable(void) +{ + _kvm_arch_hardware_disable(NULL); +} #ifdef CONFIG_CPU_PM static int hyp_init_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { - if (cmd == CPU_PM_EXIT && - __hyp_get_vectors() == hyp_default_vectors) { - cpu_init_hyp_mode(NULL); + /* + * kvm_arm_hardware_enabled is left with its old value over + * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should + * re-enable hyp. + */ + switch (cmd) { + case CPU_PM_ENTER: + if (__this_cpu_read(kvm_arm_hardware_enabled)) + /* + * don't update kvm_arm_hardware_enabled here + * so that the hardware will be re-enabled + * when we resume. See below. + */ + cpu_hyp_reset(); + return NOTIFY_OK; - } + case CPU_PM_EXIT: + if (__this_cpu_read(kvm_arm_hardware_enabled)) + /* The hardware was enabled before suspend. */ + cpu_hyp_reinit(); - return NOTIFY_DONE; + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } } static struct notifier_block hyp_init_cpu_pm_nb = { @@ -1022,6 +1084,91 @@ static inline void hyp_cpu_pm_init(void) } #endif +static void teardown_common_resources(void) +{ + free_percpu(kvm_host_cpu_state); +} + +static int init_common_resources(void) +{ + kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); + if (!kvm_host_cpu_state) { + kvm_err("Cannot allocate host CPU state\n"); + return -ENOMEM; + } + + return 0; +} + +static int init_subsystems(void) +{ + int err = 0; + + /* + * Enable hardware so that subsystem initialisation can access EL2. + */ + on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); + + /* + * Register CPU lower-power notifier + */ + hyp_cpu_pm_init(); + + /* + * Init HYP view of VGIC + */ + err = kvm_vgic_hyp_init(); + switch (err) { + case 0: + vgic_present = true; + break; + case -ENODEV: + case -ENXIO: + vgic_present = false; + err = 0; + break; + default: + goto out; + } + + /* + * Init HYP architected timer support + */ + err = kvm_timer_hyp_init(); + if (err) + goto out; + + kvm_perf_init(); + kvm_coproc_table_init(); + +out: + on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); + + return err; +} + +static void teardown_hyp_mode(void) +{ + int cpu; + + if (is_kernel_in_hyp_mode()) + return; + + free_hyp_pgds(); + for_each_possible_cpu(cpu) + free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); +} + +static int init_vhe_mode(void) +{ + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); + + kvm_info("VHE mode initialized successfully\n"); + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ @@ -1052,7 +1199,7 @@ static int init_hyp_mode(void) stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; - goto out_free_stack_pages; + goto out_err; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; @@ -1065,7 +1212,14 @@ static int init_hyp_mode(void) kvm_ksym_ref(__kvm_hyp_code_end)); if (err) { kvm_err("Cannot map world-switch code\n"); - goto out_free_mappings; + goto out_err; + } + + err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), + kvm_ksym_ref(__end_rodata)); + if (err) { + kvm_err("Cannot map rodata section\n"); + goto out_err; } /* @@ -1077,20 +1231,10 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map hyp stack\n"); - goto out_free_mappings; + goto out_err; } } - /* - * Map the host CPU structures - */ - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); - if (!kvm_host_cpu_state) { - err = -ENOMEM; - kvm_err("Cannot allocate host CPU state\n"); - goto out_free_mappings; - } - for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; @@ -1099,46 +1243,24 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map host CPU state: %d\n", err); - goto out_free_context; + goto out_err; } } - /* - * Execute the init code on each CPU. - */ - on_each_cpu(cpu_init_hyp_mode, NULL, 1); - - /* - * Init HYP view of VGIC - */ - err = kvm_vgic_hyp_init(); - if (err) - goto out_free_context; - - /* - * Init HYP architected timer support - */ - err = kvm_timer_hyp_init(); - if (err) - goto out_free_context; - #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif - kvm_perf_init(); + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); kvm_info("Hyp mode initialized successfully\n"); return 0; -out_free_context: - free_percpu(kvm_host_cpu_state); -out_free_mappings: - free_hyp_pgds(); -out_free_stack_pages: - for_each_possible_cpu(cpu) - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + out_err: + teardown_hyp_mode(); kvm_err("error initializing Hyp mode: %d\n", err); return err; } @@ -1182,26 +1304,27 @@ int kvm_arch_init(void *opaque) } } - cpu_notifier_register_begin(); - - err = init_hyp_mode(); + err = init_common_resources(); if (err) - goto out_err; + return err; - err = __register_cpu_notifier(&hyp_init_cpu_nb); - if (err) { - kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + if (is_kernel_in_hyp_mode()) + err = init_vhe_mode(); + else + err = init_hyp_mode(); + if (err) goto out_err; - } - - cpu_notifier_register_done(); - hyp_cpu_pm_init(); + err = init_subsystems(); + if (err) + goto out_hyp; - kvm_coproc_table_init(); return 0; + +out_hyp: + teardown_hyp_mode(); out_err: - cpu_notifier_register_done(); + teardown_common_resources(); return err; } diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index d6c005283678fe5061a50cc8f5efd1febcc0f27b..dc99159857b4ae70d7d3785b75a1c3f0f8639906 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) return vbar; } +/* + * Switch to an exception mode, updating both CPSR and SPSR. Follow + * the logic described in AArch32.EnterMode() from the ARMv8 ARM. + */ +static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) +{ + unsigned long cpsr = *vcpu_cpsr(vcpu); + u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; + + *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; + + switch (mode) { + case FIQ_MODE: + *vcpu_cpsr(vcpu) |= PSR_F_BIT; + /* Fall through */ + case ABT_MODE: + case IRQ_MODE: + *vcpu_cpsr(vcpu) |= PSR_A_BIT; + /* Fall through */ + default: + *vcpu_cpsr(vcpu) |= PSR_I_BIT; + } + + *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); + + if (sctlr & SCTLR_TE) + *vcpu_cpsr(vcpu) |= PSR_T_BIT; + if (sctlr & SCTLR_EE) + *vcpu_cpsr(vcpu) |= PSR_E_BIT; + + /* Note: These now point to the mode banked copies */ + *vcpu_spsr(vcpu) = cpsr; +} + /** * kvm_inject_undefined - inject an undefined exception into the guest * @vcpu: The VCPU to receive the undefined exception @@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) */ void kvm_inject_undefined(struct kvm_vcpu *vcpu) { - unsigned long new_lr_value; - unsigned long new_spsr_value; unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; bool is_thumb = (cpsr & PSR_T_BIT); u32 vect_offset = 4; u32 return_offset = (is_thumb) ? 2 : 4; - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) - return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to UND banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; + kvm_update_psr(vcpu, UND_MODE); + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; /* Branch to exception vector */ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; @@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) */ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) { - unsigned long new_lr_value; - unsigned long new_spsr_value; unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; bool is_thumb = (cpsr & PSR_T_BIT); u32 vect_offset; u32 return_offset = (is_thumb) ? 4 : 0; bool is_lpae; - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) + return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to ABT banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; + kvm_update_psr(vcpu, ABT_MODE); + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; if (is_pabt) vect_offset = 12; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 12d727fae0a7592d3d4ca947ea0507d2fb01c63d..767872411d97bbafddd9d4ee7c255db61a36c5c6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "trace.h" @@ -598,6 +599,9 @@ int create_hyp_mappings(void *from, void *to) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + start = start & PAGE_MASK; end = PAGE_ALIGN(end); @@ -630,6 +634,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + /* Check for a valid kernel IO mapping */ if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) return -EINVAL; @@ -656,9 +663,9 @@ static void *kvm_alloc_hwpgd(void) * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * @kvm: The KVM struct pointer for the VM. * - * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can - * support either full 40-bit input addresses or limited to 32-bit input - * addresses). Clears the allocated pages. + * Allocates only the stage-2 HW PGD level table(s) (can support either full + * 40-bit input addresses or limited to 32-bit input addresses). Clears the + * allocated pages. * * Note we don't need locking here as this is only called when the VM is * created, which can only be done once. @@ -1648,6 +1655,11 @@ phys_addr_t kvm_get_idmap_vector(void) return hyp_idmap_vector; } +phys_addr_t kvm_get_idmap_start(void) +{ + return hyp_idmap_start; +} + int kvm_mmu_init(void) { int err; @@ -1852,6 +1864,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) void kvm_arch_flush_shadow_all(struct kvm *kvm) { + kvm_free_stage2_pgd(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 4470376af5f815fd5f8f2d12c5e7d18306675bb4..a19d20f23e716678625c0963a52b0c00cf0b7b4e 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode) val &= ~BM_CLPCR_SBYOS; if (cpu_is_imx6sl()) val |= BM_CLPCR_BYPASS_PMIC_READY; - if (cpu_is_imx6sl() || cpu_is_imx6sx()) + if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul()) val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; else val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; @@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode) val |= 0x3 << BP_CLPCR_STBY_COUNT; val |= BM_CLPCR_VSTBY; val |= BM_CLPCR_SBYOS; - if (cpu_is_imx6sl()) + if (cpu_is_imx6sl() || cpu_is_imx6sx()) val |= BM_CLPCR_BYPASS_PMIC_READY; if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul()) val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index 907a452b78ea240b07d061993d4355b0dbde01a4..b31ad596be795454817242daefddf29179feef3d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -1474,6 +1474,7 @@ static void omap_hwmod_am43xx_rst(void) { RSTCTRL(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTCTRL_OFFSET); RSTCTRL(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTCTRL_OFFSET); + RSTST(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTST_OFFSET); RSTST(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTST_OFFSET); } diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index aff78d5198d21cad56f0986814ea161485374de3..131f8967589b723cbcb374159c756203b96d7e47 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -723,8 +723,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { * display serial interface controller */ +static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | + SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { .name = "dsi", + .sysc = &omap3xxx_dsi_sysc, }; static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h index 7c34c44eb0ae531b17a29f8bea872b59bde04c3d..babb5db5a3a407a4d0ba36bdd71dede87a240864 100644 --- a/arch/arm/mach-omap2/prcm43xx.h +++ b/arch/arm/mach-omap2/prcm43xx.h @@ -39,6 +39,7 @@ /* RM RSTST offsets */ #define AM43XX_RM_GFX_RSTST_OFFSET 0x0014 +#define AM43XX_RM_PER_RSTST_OFFSET 0x0014 #define AM43XX_RM_WKUP_RSTST_OFFSET 0x0014 /* CM instances */ diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index f6d02e4cbcda4e06954c1c960a54b945e28fe58b..5c87dff5d46ee04f9fe2060152f1ca12ab3f1d86 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c @@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = { }; static struct smc91x_platdata smc91x_platdata = { - .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | + SMC91X_USE_DMA | SMC91X_NOWAIT, }; static struct platform_device smc91x_device = { diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c index 2385052b0ce1326d9f1ae79959d16a9fea459361..e362f865fcd28dafa070c5bb3873e2cf54dbbc8c 100644 --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c @@ -41,30 +41,35 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d) unsigned long pending; unsigned int bit; - pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; - for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) - generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit)); + do { + pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; + for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) { + generic_handle_irq(irq_find_mapping(fpga->irqdomain, + bit)); + } + } while (pending); return IRQ_HANDLED; } -static void cplds_irq_mask_ack(struct irq_data *d) +static void cplds_irq_mask(struct irq_data *d) { struct cplds *fpga = irq_data_get_irq_chip_data(d); unsigned int cplds_irq = irqd_to_hwirq(d); - unsigned int set, bit = BIT(cplds_irq); + unsigned int bit = BIT(cplds_irq); fpga->irq_mask &= ~bit; writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); - set = readl(fpga->base + FPGA_IRQ_SET_CLR); - writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR); } static void cplds_irq_unmask(struct irq_data *d) { struct cplds *fpga = irq_data_get_irq_chip_data(d); unsigned int cplds_irq = irqd_to_hwirq(d); - unsigned int bit = BIT(cplds_irq); + unsigned int set, bit = BIT(cplds_irq); + + set = readl(fpga->base + FPGA_IRQ_SET_CLR); + writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR); fpga->irq_mask |= bit; writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); @@ -72,7 +77,8 @@ static void cplds_irq_unmask(struct irq_data *d) static struct irq_chip cplds_irq_chip = { .name = "pxa_cplds", - .irq_mask_ack = cplds_irq_mask_ack, + .irq_ack = cplds_irq_mask, + .irq_mask = cplds_irq_mask, .irq_unmask = cplds_irq_unmask, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c index 13b1d4586d7dedc7cee9d2754550a6d6eff28baf..9001312710f73eee85467b3f7ad255a3f79514ce 100644 --- a/arch/arm/mach-pxa/xcep.c +++ b/arch/arm/mach-pxa/xcep.c @@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = { }; static struct smc91x_platdata xcep_smc91x_info = { - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | + SMC91X_NOWAIT | SMC91X_USE_DMA, }; static struct platform_device smc91x_device = { diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 44575edc44b13d01c1f6db65245651f8be1c0074..cf0a7c2359f072d49aca95af6b4073da42ec7180 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -95,7 +95,8 @@ static struct smsc911x_platform_config smsc911x_config = { }; static struct smc91x_platdata smc91x_platdata = { - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | + SMC91X_NOWAIT, }; static struct platform_device realview_eth_device = { diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c index cbf53bb9c814dc328bb209ad3ee580da7f2f6a6f..0db46895c82a4729d40b6c94c62482cd00ae1c9b 100644 --- a/arch/arm/mach-sa1100/clock.c +++ b/arch/arm/mach-sa1100/clock.c @@ -125,6 +125,8 @@ static unsigned long clk_36864_get_rate(struct clk *clk) } static struct clkops clk_36864_ops = { + .enable = clk_cpu_enable, + .disable = clk_cpu_disable, .get_rate = clk_36864_get_rate, }; @@ -140,9 +142,8 @@ static struct clk_lookup sa11xx_clkregs[] = { CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864), }; -static int __init sa11xx_clk_init(void) +int __init sa11xx_clk_init(void) { clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs)); return 0; } -core_initcall(sa11xx_clk_init); diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 345e63f4eb71f53d0038adc005c8a0f052714b2b..3e09beddb6e8f4e84e0863691c7d33c91bae404a 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -34,6 +34,7 @@ #include #include +#include #include "generic.h" #include @@ -95,6 +96,8 @@ static void sa1100_power_off(void) void sa11x0_restart(enum reboot_mode mode, const char *cmd) { + clear_reset_status(RESET_STATUS_ALL); + if (mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); @@ -388,6 +391,7 @@ void __init sa1100_init_irq(void) sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start); sa1100_init_gpio(); + sa11xx_clk_init(); } /* diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h index 0d92e119b36b13d28c2ab8036c21f23963a54ff8..68199b603ff717f7b5dfb514abd62c3b21704d1c 100644 --- a/arch/arm/mach-sa1100/generic.h +++ b/arch/arm/mach-sa1100/generic.h @@ -44,3 +44,5 @@ int sa11x0_pm_init(void); #else static inline int sa11x0_pm_init(void) { return 0; } #endif + +int sa11xx_clk_init(void); diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 1525d7b5f1b74b6d06ac1276a56a45c23781e060..88149f85bc49dfa7a62c6b395bdf7a3e3f770e7b 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c @@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = { }; static struct smc91x_platdata smc91x_platdata = { - .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, + .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT, }; static struct platform_device smc91x_device = { diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index 62437b57813eab3c1cff4fc275386b37378dc9d8..73e3adbc133096eca9cdf652d37ff110f5e35850 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -41,39 +41,26 @@ #define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */ -static void __iomem *irqc; - -static const u8 da9063_mask_regs[] = { - DA9063_REG_IRQ_MASK_A, - DA9063_REG_IRQ_MASK_B, - DA9063_REG_IRQ_MASK_C, - DA9063_REG_IRQ_MASK_D, -}; - -/* DA9210 System Control and Event Registers */ +/* start of DA9210 System Control and Event Registers */ #define DA9210_REG_MASK_A 0x54 -#define DA9210_REG_MASK_B 0x55 - -static const u8 da9210_mask_regs[] = { - DA9210_REG_MASK_A, - DA9210_REG_MASK_B, -}; - -static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[], - unsigned int nregs) -{ - unsigned int i; - dev_info(&client->dev, "Masking %s interrupt sources\n", client->name); +static void __iomem *irqc; - for (i = 0; i < nregs; i++) { - int error = i2c_smbus_write_byte_data(client, regs[i], ~0); - if (error) { - dev_err(&client->dev, "i2c error %d\n", error); - return; - } - } -} +/* first byte sets the memory pointer, following are consecutive reg values */ +static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff }; +static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff }; + +static struct i2c_msg da9xxx_msgs[2] = { + { + .addr = 0x58, + .len = ARRAY_SIZE(da9063_irq_clr), + .buf = da9063_irq_clr, + }, { + .addr = 0x68, + .len = ARRAY_SIZE(da9210_irq_clr), + .buf = da9210_irq_clr, + }, +}; static int regulator_quirk_notify(struct notifier_block *nb, unsigned long action, void *data) @@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb, client = to_i2c_client(dev); dev_dbg(dev, "Detected %s\n", client->name); - if ((client->addr == 0x58 && !strcmp(client->name, "da9063"))) - da9xxx_mask_irqs(client, da9063_mask_regs, - ARRAY_SIZE(da9063_mask_regs)); - else if (client->addr == 0x68 && !strcmp(client->name, "da9210")) - da9xxx_mask_irqs(client, da9210_mask_regs, - ARRAY_SIZE(da9210_mask_regs)); + if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) || + (client->addr == 0x68 && !strcmp(client->name, "da9210"))) { + int ret; + + dev_info(&client->dev, "clearing da9063/da9210 interrupts\n"); + ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs)); + if (ret != ARRAY_SIZE(da9xxx_msgs)) + dev_err(&client->dev, "i2c error %d\n", ret); + } mon = ioread32(irqc + IRQC_MONITOR); if (mon & REGULATOR_IRQ_MASK) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f8d50d8c5379e98332e10056a89134d0487ea8c0..e0536945f835d5f3d29d93a0ccb600c5496f7356 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -82,9 +82,12 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE select HAVE_SYSCALL_TRACEPOINTS select IOMMU_DMA if (IOMMU_SUPPORT && !ARCH_QCOM) + select HAVE_KPROBES + select HAVE_KRETPROBES if HAVE_KPROBES select IRQ_DOMAIN select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA @@ -99,6 +102,7 @@ config ARM64 select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select HAVE_CONTEXT_TRACKING + select HAVE_ARM_SMCCC help ARM 64-bit (AArch64) Linux support. @@ -807,6 +811,14 @@ config SETEND_EMULATION If unsure, say Y endif +config ARM64_SW_TTBR0_PAN + bool "Emulate Priviledged Access Never using TTBR0_EL1 switching" + help + Enabling this option prevents the kernel from accessing + user-space memory directly by pointing TTBR0_EL1 to a reserved + zeroed area and reserved ASID. The user access routines + restore the valid TTBR0_EL1 temporarily. + menu "ARMv8.1 architectural features" config ARM64_HW_AFDBM @@ -893,7 +905,7 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" - select ARM64_MODULE_PLTS + select ARM64_MODULE_PLTS if MODULES select RELOCATABLE help Randomizes the virtual address at which the kernel image is @@ -1053,6 +1065,14 @@ menu "Power management options" source "kernel/power/Kconfig" +config ARCH_HIBERNATION_POSSIBLE + def_bool y + depends on CPU_PM + +config ARCH_HIBERNATION_HEADER + def_bool y + depends on HIBERNATION + config ARCH_SUSPEND_POSSIBLE def_bool y diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 01c86385fbb67ab3e5990dbc138f4849b892edcc..365e1300465169e03124ff0706aea4acdca5fb19 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -226,3 +226,6 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y CONFIG_CRYPTO_CRC32_ARM64=y +CONFIG_HIBERNATION=y +CONFIG_KPROBES=y +CONFIG_CORESIGHT=y diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig index 03c5bc89b6f5d88197b05e479e800882edd06e58..32b44c8c3d183dfe0c2796eb1940747f9bb6bc88 100644 --- a/arch/arm64/configs/msm-perf_defconfig +++ b/arch/arm64/configs/msm-perf_defconfig @@ -327,8 +327,6 @@ CONFIG_POWER_RESET_QCOM=y CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y -CONFIG_QPNP_SMBCHARGER=y -CONFIG_QPNP_FG=y CONFIG_SMB135X_CHARGER=y CONFIG_SMB1351_USB_CHARGER=y CONFIG_MSM_BCL_CTL=y @@ -461,7 +459,7 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_LEDS_QPNP=y -CONFIG_LEDS_QPNP_FLASH=y +CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_TRIGGERS=y CONFIG_SWITCH=y diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig index b4ddd9a2bed51ca10e621c124ff0298efe1af585..988f1fbf8ea36d15be4a36d71265340da51a5c05 100644 --- a/arch/arm64/configs/msm_defconfig +++ b/arch/arm64/configs/msm_defconfig @@ -314,8 +314,6 @@ CONFIG_POWER_RESET_QCOM=y CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y -CONFIG_QPNP_SMBCHARGER=y -CONFIG_QPNP_FG=y CONFIG_SMB135X_CHARGER=y CONFIG_SMB1351_USB_CHARGER=y CONFIG_MSM_BCL_CTL=y @@ -449,7 +447,7 @@ CONFIG_MMC_SPI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_EXYNOS=y CONFIG_LEDS_QPNP=y -CONFIG_LEDS_QPNP_FLASH=y +CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_SYSCON=y CONFIG_LEDS_TRIGGERS=y diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 5c07a8b8035476f9620412a4fa6a39037e3d106d..91224fc7ae6832f2569bfe4fd3eeb2307a8c4fd3 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -479,6 +479,11 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_SWITCH=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_QPNP=y +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y CONFIG_DMADEVICES=y CONFIG_QCOM_SPS_DMA=y CONFIG_UIO=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index f5b163eaec0b09d9b6e3a4e1aa0ce9613fee8e29..d5f30e9a8b1633f488d8629eccb118c5c9d6393c 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -489,6 +489,13 @@ CONFIG_EDAC_CORTEX_ARM64=y CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_QPNP=y +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +CONFIG_ESOC_DEBUG=y +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y +CONFIG_ESOC_MDM_DBG_ENG=y CONFIG_DMADEVICES=y CONFIG_QCOM_SPS_DMA=y CONFIG_UIO=y diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig index 27055fc4b9d51b1beac3df82c7dc90c6eac1865e..7e762a4d207b198e0618e55136f5b7967be0cbba 100644 --- a/arch/arm64/configs/msmcortex_mediabox_defconfig +++ b/arch/arm64/configs/msmcortex_mediabox_defconfig @@ -406,7 +406,6 @@ CONFIG_DVB_MPQ=m CONFIG_DVB_MPQ_DEMUX=m CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX=y CONFIG_TSPP=m -CONFIG_QCOM_KGSL=y CONFIG_DRM=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LOGO=y diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..00eb346e0928cf2531b6910e4a16c98dbb0a77e5 --- /dev/null +++ b/arch/arm64/configs/ranchu64_defconfig @@ -0,0 +1,311 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_MMAP_RND_BITS=24 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_NR_CPUS=4 +CONFIG_PREEMPT=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMC91X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_BATTERY_GOLDFISH=y +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_FTRACE is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_DEBUG_RODATA=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 05d9e16c0dfdd81a8a6ffe141e5c6f5cc96b3e7e..6a51dfccfe71a1846c66e8b2b53580b489a3d90c 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } - if (nbytes) { + if (walk.nbytes % AES_BLOCK_SIZE) { u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; u8 __aligned(8) tail[AES_BLOCK_SIZE]; diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index beccbdefa106a4ba0dd7d98537427a37d2c26517..8746ff6abd7782210abe56c56b549e0c4e67ac68 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -95,13 +95,11 @@ void apply_alternatives(void *start, size_t length); * The code that follows this macro will be assembled and linked as * normal. There are no restrictions on this code. */ -.macro alternative_if_not cap, enable = 1 - .if \enable +.macro alternative_if_not cap .pushsection .altinstructions, "a" altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection 661: - .endif .endm /* @@ -118,27 +116,27 @@ void apply_alternatives(void *start, size_t length); * alternative sequence it is defined in (branches into an * alternative sequence are not fixed up). */ -.macro alternative_else, enable = 1 - .if \enable +.macro alternative_else 662: .pushsection .altinstr_replacement, "ax" 663: - .endif .endm /* * Complete an alternative code sequence. */ -.macro alternative_endif, enable = 1 - .if \enable +.macro alternative_endif 664: .popsection .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) - .endif .endm #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) +.macro user_alt, label, oldinstr, newinstr, cond +9999: alternative_insn "\oldinstr", "\newinstr", \cond + _ASM_EXTABLE 9999b, \label +.endm /* * Generate the assembly for UAO alternatives with exception table entries. diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index f8856a45085252d6eb74540dbb852e8837046dfd..dfc8416d889619607065b5fde0bbb3c81ed0f28b 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -1,5 +1,5 @@ /* - * Based on arch/arm/include/asm/assembler.h + * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S * * Copyright (C) 1996-2000 Russell King * Copyright (C) 2012 ARM Ltd. @@ -23,6 +23,10 @@ #ifndef __ASM_ASSEMBLER_H #define __ASM_ASSEMBLER_H +#include +#include +#include +#include #include #include @@ -49,6 +53,15 @@ msr daifclr, #2 .endm + .macro save_and_disable_irq, flags + mrs \flags, daif + msr daifset, #2 + .endm + + .macro restore_irq, flags + msr daif, \flags + .endm + /* * Save/disable and restore interrupts. */ @@ -223,6 +236,111 @@ lr .req x30 // link register add \reg, \reg, \tmp .endm +/* + * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) + */ + .macro vma_vm_mm, rd, rn + ldr \rd, [\rn, #VMA_VM_MM] + .endm + +/* + * mmid - get context id from mm pointer (mm->context.id) + */ + .macro mmid, rd, rn + ldr \rd, [\rn, #MM_CONTEXT_ID] + .endm + +/* + * dcache_line_size - get the minimum D-cache line size from the CTR register. + */ + .macro dcache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + ubfm \tmp, \tmp, #16, #19 // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * icache_line_size - get the minimum I-cache line size from the CTR register. + */ + .macro icache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + and \tmp, \tmp, #0xf // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm + +/* + * Macro to perform a data cache maintenance for the interval + * [kaddr, kaddr + size) + * + * op: operation passed to dc instruction + * domain: domain used in dsb instruciton + * kaddr: starting virtual address of the region + * size: size of the region + * Corrupts: kaddr, size, tmp1, tmp2 + */ + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + dcache_line_size \tmp1, \tmp2 + add \size, \kaddr, \size + sub \tmp2, \tmp1, #1 + bic \kaddr, \kaddr, \tmp2 +9998: + .if (\op == cvau || \op == cvac) +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc \op, \kaddr +alternative_else + dc civac, \kaddr +alternative_endif + .else + dc \op, \kaddr + .endif + add \kaddr, \kaddr, \tmp1 + cmp \kaddr, \size + b.lo 9998b + dsb \domain + .endm + +/* + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present + */ + .macro reset_pmuserenr_el0, tmpreg + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer + sbfx \tmpreg, \tmpreg, #8, #4 + cmp \tmpreg, #1 // Skip if no PMU present + b.lt 9000f + msr pmuserenr_el0, xzr // Disable PMU access from EL0 +9000: + .endm + +/* + * copy_page - copy src to dest using temp registers t1-t8 + */ + .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req +9998: ldp \t1, \t2, [\src] + ldp \t3, \t4, [\src, #16] + ldp \t5, \t6, [\src, #32] + ldp \t7, \t8, [\src, #48] + add \src, \src, #64 + stnp \t1, \t2, [\dest] + stnp \t3, \t4, [\dest, #16] + stnp \t5, \t6, [\dest, #32] + stnp \t7, \t8, [\dest, #48] + add \dest, \dest, #64 + tst \src, #(PAGE_SIZE - 1) + b.ne 9998b + .endm + /* * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. @@ -265,4 +383,28 @@ lr .req x30 // link register movk \reg, :abs_g0_nc:\val .endm +/* + * Return the current thread_info. + */ + .macro get_thread_info, rd + mrs \rd, sp_el0 + .endm + +/* + * Errata workaround post TTBR0_EL1 update. + */ + .macro post_ttbr0_update_workaround +#ifdef CONFIG_CAVIUM_ERRATUM_27456 +alternative_if_not ARM64_WORKAROUND_CAVIUM_27456 + nop + nop + nop +alternative_else + ic iallu + dsb nsh + isb +alternative_endif +#endif + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index fb1013f9bb86432b1212f6752ec2b26b5f19cef0..29f8d7fd627d9da003f78892a6b2215e4c8fe8a4 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -34,9 +34,9 @@ #define ARM64_HAS_UAO 9 #define ARM64_ALT_PAN_NOT_UAO 10 -#define ARM64_NCAPS 11 -#define ARM64_WORKAROUND_CAVIUM_27456 12 - +#define ARM64_WORKAROUND_CAVIUM_27456 11 +#define ARM64_HAS_VIRT_HOST_EXTN 12 +#define ARM64_NCAPS 13 #ifndef __ASSEMBLY__ @@ -83,7 +83,7 @@ struct arm64_cpu_capabilities { const char *desc; u16 capability; bool (*matches)(const struct arm64_cpu_capabilities *); - void (*enable)(void *); /* Called on all active CPUs */ + int (*enable)(void *); /* Called on all active CPUs */ union { struct { /* To be used for erratum handling only */ u32 midr_model; @@ -191,6 +191,12 @@ static inline bool system_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1)); } +static inline bool system_uses_ttbr0_pan(void) +{ + return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && + !cpus_have_cap(ARM64_HAS_PAN); +} + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 2fcb9b7c876c06d67c220f1eefcd0b61ef715a1c..4b6b3f72a2158a06e07f36529912c81c0b5fb43b 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -66,6 +66,11 @@ #define CACHE_FLUSH_IS_SAFE 1 +/* kprobes BRK opcodes with ESR encoding */ +#define BRK64_ESR_MASK 0xFFFF +#define BRK64_ESR_KPROBES 0x0004 +#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5)) + /* AArch32 */ #define DBG_ESR_EVT_BKPT 0x4 #define DBG_ESR_EVT_VECC 0x5 diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index ef572206f1c3eb6658618b40568e662f7e1865fc..932f5a56d1a60d47ffc7b8c05ce2e67ea7374e7a 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -1,8 +1,11 @@ #ifndef _ASM_EFI_H #define _ASM_EFI_H +#include #include +#include #include +#include #ifdef CONFIG_EFI extern void efi_init(void); @@ -10,6 +13,8 @@ extern void efi_init(void); #define efi_init() #endif +int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); + #define efi_call_virt(f, ...) \ ({ \ efi_##f##_t *__f; \ @@ -63,6 +68,34 @@ extern void efi_init(void); * Services are enabled and the EFI_RUNTIME_SERVICES bit set. */ +static inline void efi_set_pgd(struct mm_struct *mm) +{ + __switch_mm(mm); + + if (system_uses_ttbr0_pan()) { + if (mm != current->active_mm) { + /* + * Update the current thread's saved ttbr0 since it is + * restored as part of a return from exception. Set + * the hardware TTBR0_EL1 using cpu_switch_mm() + * directly to enable potential errata workarounds. + */ + update_saved_ttbr0(current, mm); + cpu_switch_mm(mm->pgd, mm); + } else { + /* + * Defer the switch to the current thread's TTBR0_EL1 + * until uaccess_enable(). Restore the current + * thread's saved ttbr0 corresponding to its active_mm + * (if different from init_mm). + */ + cpu_set_reserved_ttbr0(); + if (current->active_mm != &init_mm) + update_saved_ttbr0(current, current->active_mm); + } + } +} + void efi_virtmap_load(void); void efi_virtmap_unload(void); diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 77eeb2cc648fd79d28cfc452c7373b6ebe04071b..f772e15c47663f0aafa1b287e8b9ac6aa3a55d2c 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -74,6 +74,7 @@ #define ESR_ELx_EC_SHIFT (26) #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) +#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) #define ESR_ELx_IL (UL(1) << 25) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index f2585cdd32c29832566718e99d7b5fd9c61d2322..71dfa3b4231364363e08bd474ea474cc7fffd89c 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -27,9 +27,9 @@ #include #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ +do { \ + uaccess_enable(); \ asm volatile( \ - ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ - CONFIG_ARM64_PAN) \ " prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ insn "\n" \ @@ -44,11 +44,11 @@ " .popsection\n" \ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ - ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ - CONFIG_ARM64_PAN) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "memory") + : "memory"); \ + uaccess_disable(); \ +} while (0) static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) @@ -118,8 +118,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; + uaccess_enable(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" -ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN) " prfm pstl1strm, %2\n" "1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" @@ -134,10 +134,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN) " .popsection\n" _ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(2b, 4b) -ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "memory"); + uaccess_disable(); *uval = val; return ret; diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 30e50eb54a6795aa9c504a2d22dfabd8b8db195a..1dbaa901d7e5d022f19f5bc3ee5e18fe6fa47a73 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -120,6 +120,29 @@ enum aarch64_insn_register { AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ }; +enum aarch64_insn_special_register { + AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200, + AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201, + AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208, + AARCH64_INSN_SPCLREG_SPSEL = 0xC210, + AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212, + AARCH64_INSN_SPCLREG_DAIF = 0xDA11, + AARCH64_INSN_SPCLREG_NZCV = 0xDA10, + AARCH64_INSN_SPCLREG_FPCR = 0xDA20, + AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28, + AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29, + AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200, + AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201, + AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208, + AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218, + AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219, + AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A, + AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B, + AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200, + AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201, + AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210 +}; + enum aarch64_insn_variant { AARCH64_INSN_VARIANT_32BIT, AARCH64_INSN_VARIANT_64BIT @@ -223,8 +246,15 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ { return (val); } +__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000) +__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) +__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) +__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) +__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000) +__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000) +__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000) __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) @@ -273,10 +303,15 @@ __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) +__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000) __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) +__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0) +__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000) +__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F) +__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) #undef __AARCH64_INSN_FUNCS @@ -286,6 +321,8 @@ bool aarch64_insn_is_branch_imm(u32 insn); int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); +bool aarch64_insn_uses_literal(u32 insn); +bool aarch64_insn_is_branch(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); @@ -367,9 +404,13 @@ bool aarch32_insn_is_wide(u32 insn); #define A32_RT_OFFSET 12 #define A32_RT2_OFFSET 0 +u32 aarch64_insn_extract_system_reg(u32 insn); u32 aarch32_insn_extract_reg_num(u32 insn, int offset); u32 aarch32_insn_mcr_extract_opc2(u32 insn); u32 aarch32_insn_mcr_extract_crm(u32 insn); + +typedef bool (pstate_check_t)(unsigned long); +extern pstate_check_t * const aarch32_opcode_cond_checks[16]; #endif /* __ASSEMBLY__ */ #endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 5c6375d8528bb8ddd313bfa2911f7a0d77819028..7803343e5881fbd7b2f635b25082d3e91d2583f8 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -19,6 +19,8 @@ #ifndef __ASM_KERNEL_PGTABLE_H #define __ASM_KERNEL_PGTABLE_H +#include +#include /* * The linear mapping and the start of memory are both 2M aligned (per @@ -53,6 +55,12 @@ #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +#define RESERVED_TTBR0_SIZE (PAGE_SIZE) +#else +#define RESERVED_TTBR0_SIZE (0) +#endif + /* Initial memory map size */ #if ARM64_SWAPPER_USES_SECTION_MAPS #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT @@ -86,10 +94,24 @@ * (64k granule), or a multiple that can be mapped using contiguous bits * in the page tables: 32 * PMD_SIZE (16k granule) */ -#ifdef CONFIG_ARM64_64K_PAGES -#define ARM64_MEMSTART_ALIGN SZ_512M +#if defined(CONFIG_ARM64_4K_PAGES) +#define ARM64_MEMSTART_SHIFT PUD_SHIFT +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5) +#else +#define ARM64_MEMSTART_SHIFT PMD_SHIFT +#endif + +/* + * sparsemem vmemmap imposes an additional requirement on the alignment of + * memstart_addr, due to the fact that the base of the vmemmap region + * has a direct correspondence, and needs to appear sufficiently aligned + * in the virtual address space. + */ +#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) #else -#define ARM64_MEMSTART_ALIGN SZ_1G +#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) #endif #endif /* __ASM_KERNEL_PGTABLE_H */ diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h new file mode 100644 index 0000000000000000000000000000000000000000..1737aecfcc5e462c78e6c47ab06ffe2e2fc12c60 --- /dev/null +++ b/arch/arm64/include/asm/kprobes.h @@ -0,0 +1,60 @@ +/* + * arch/arm64/include/asm/kprobes.h + * + * Copyright (C) 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ARM_KPROBES_H +#define _ARM_KPROBES_H + +#include +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 1 + +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +#include + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* Single step context for kprobe */ +struct kprobe_step_ctx { + unsigned long ss_pending; + unsigned long match_addr; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_irqflag; + struct prev_kprobe prev_kprobe; + struct kprobe_step_ctx ss_ctx; + struct pt_regs jprobe_saved_regs; +}; + +void arch_remove_kprobe(struct kprobe *); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); +int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); +int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr); +int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr); +void kretprobe_trampoline(void); +void __kprobes *trampoline_probe_handler(struct pt_regs *regs); + +#endif /* _ARM_KPROBES_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 2d960f8588b0639f80c272f833c3796fd7c286b6..8b709f53f87423ef55f2422204439dc4f63cd86f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -83,17 +83,6 @@ #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) -/* Hyp System Control Register (SCTLR_EL2) bits */ -#define SCTLR_EL2_EE (1 << 25) -#define SCTLR_EL2_WXN (1 << 19) -#define SCTLR_EL2_I (1 << 12) -#define SCTLR_EL2_SA (1 << 3) -#define SCTLR_EL2_C (1 << 2) -#define SCTLR_EL2_A (1 << 1) -#define SCTLR_EL2_M 1 -#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \ - SCTLR_EL2_SA | SCTLR_EL2_I) - /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) #define TCR_EL2_TBI (1 << 20) @@ -123,6 +112,7 @@ #define VTCR_EL2_SL0_LVL1 (1 << 6) #define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_40B 24 +#define VTCR_EL2_VS 19 /* * We configure the Stage-2 page tables to always restrict the IPA space to be @@ -167,7 +157,7 @@ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_VMID_SHIFT (UL(48)) -#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) +#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) /* Hyp System Trap Register */ #define HSTR_EL2_T(x) (1 << x) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 419bc6661b5c44198f03a7f2b80c59a3f17e8fb3..36a30c80032d46b8bd51b0aaee3eb2f5c11599e5 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -20,84 +20,10 @@ #include -/* - * 0 is reserved as an invalid value. - * Order *must* be kept in sync with the hyp switch code. - */ -#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */ -#define CSSELR_EL1 2 /* Cache Size Selection Register */ -#define SCTLR_EL1 3 /* System Control Register */ -#define ACTLR_EL1 4 /* Auxiliary Control Register */ -#define CPACR_EL1 5 /* Coprocessor Access Control */ -#define TTBR0_EL1 6 /* Translation Table Base Register 0 */ -#define TTBR1_EL1 7 /* Translation Table Base Register 1 */ -#define TCR_EL1 8 /* Translation Control Register */ -#define ESR_EL1 9 /* Exception Syndrome Register */ -#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */ -#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */ -#define FAR_EL1 12 /* Fault Address Register */ -#define MAIR_EL1 13 /* Memory Attribute Indirection Register */ -#define VBAR_EL1 14 /* Vector Base Address Register */ -#define CONTEXTIDR_EL1 15 /* Context ID Register */ -#define TPIDR_EL0 16 /* Thread ID, User R/W */ -#define TPIDRRO_EL0 17 /* Thread ID, User R/O */ -#define TPIDR_EL1 18 /* Thread ID, Privileged */ -#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ -#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ -#define PAR_EL1 21 /* Physical Address Register */ -#define MDSCR_EL1 22 /* Monitor Debug System Control Register */ -#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */ - -/* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 24 /* Domain Access Control Register */ -#define IFSR32_EL2 25 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ -#define NR_SYS_REGS 28 - -/* 32bit mapping */ -#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ -#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ -#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ -#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ -#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ -#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ -#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ -#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ -#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ -#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ -#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ -#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ -#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ -#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ -#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ -#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ -#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ -#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ -#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ -#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ -#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ -#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ -#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ -#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ -#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ -#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ -#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ -#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ -#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ - -#define cp14_DBGDSCRext (MDSCR_EL1 * 2) -#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) -#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) -#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) -#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) -#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) -#define cp14_DBGDCCINT (MDCCINT_EL1 * 2) - -#define NR_COPRO_REGS (NR_SYS_REGS * 2) - #define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_TRAP 1 +/* The hyp-stub will return this for any kvm_call_hyp() call */ +#define ARM_EXCEPTION_HYP_GONE 2 #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) @@ -105,11 +31,27 @@ #define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset) #ifndef __ASSEMBLY__ +#if __GNUC__ > 4 +#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR) +#else +/* + * GCC versions 4.9 and older will fold the constant below into the addend of + * the reference to 'sym' above if kvm_ksym_shift is declared static or if the + * constant is used directly. However, since we use the small code model for + * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair, + * with a +/- 4 GB range, resulting in linker relocation errors if the shift + * is sufficiently large. So prevent the compiler from folding the shift into + * the addend, by making the shift a variable with external linkage. + */ +__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR; +#endif + struct kvm; struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[]; extern char __kvm_hyp_vector[]; diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 25a40213bd9b87cb6802ecfd4204dc66c41e5850..3066328cd86b69a91274e0cb841059b428666140 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -26,7 +26,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 90c6368ad7c859bbc101d88f5273d33380c8d35a..3be7a7b52d809fa6eee7a3f1b4d2ec516b5697f4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -25,7 +25,6 @@ #include #include #include -#include #include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -45,6 +44,7 @@ int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_dev_ioctl_check_extension(long ext); +phys_addr_t kvm_hyp_reset_entry(void); struct kvm_arch { /* The VMID generation used for the virt. memory system */ @@ -85,6 +85,86 @@ struct kvm_vcpu_fault_info { u64 hpfar_el2; /* Hyp IPA Fault Address Register */ }; +/* + * 0 is reserved as an invalid value. + * Order should be kept in sync with the save/restore code. + */ +enum vcpu_sysreg { + __INVALID_SYSREG__, + MPIDR_EL1, /* MultiProcessor Affinity Register */ + CSSELR_EL1, /* Cache Size Selection Register */ + SCTLR_EL1, /* System Control Register */ + ACTLR_EL1, /* Auxiliary Control Register */ + CPACR_EL1, /* Coprocessor Access Control */ + TTBR0_EL1, /* Translation Table Base Register 0 */ + TTBR1_EL1, /* Translation Table Base Register 1 */ + TCR_EL1, /* Translation Control Register */ + ESR_EL1, /* Exception Syndrome Register */ + AFSR0_EL1, /* Auxilary Fault Status Register 0 */ + AFSR1_EL1, /* Auxilary Fault Status Register 1 */ + FAR_EL1, /* Fault Address Register */ + MAIR_EL1, /* Memory Attribute Indirection Register */ + VBAR_EL1, /* Vector Base Address Register */ + CONTEXTIDR_EL1, /* Context ID Register */ + TPIDR_EL0, /* Thread ID, User R/W */ + TPIDRRO_EL0, /* Thread ID, User R/O */ + TPIDR_EL1, /* Thread ID, Privileged */ + AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ + CNTKCTL_EL1, /* Timer Control Register (EL1) */ + PAR_EL1, /* Physical Address Register */ + MDSCR_EL1, /* Monitor Debug System Control Register */ + MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ + + /* 32bit specific registers. Keep them at the end of the range */ + DACR32_EL2, /* Domain Access Control Register */ + IFSR32_EL2, /* Instruction Fault Status Register */ + FPEXC32_EL2, /* Floating-Point Exception Control Register */ + DBGVCR32_EL2, /* Debug Vector Catch Register */ + + NR_SYS_REGS /* Nothing after this line! */ +}; + +/* 32bit mapping */ +#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ +#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ +#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ +#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ +#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ +#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ +#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ +#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ +#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ +#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ +#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ +#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ +#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ +#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ +#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ +#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ +#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ +#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ +#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ +#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ +#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ +#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ +#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ +#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ +#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ +#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ +#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ +#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ + +#define cp14_DBGDSCRext (MDSCR_EL1 * 2) +#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) +#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) +#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) +#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) +#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) +#define cp14_DBGDCCINT (MDCCINT_EL1 * 2) + +#define NR_COPRO_REGS (NR_SYS_REGS * 2) + struct kvm_cpu_context { struct kvm_regs gp_regs; union { @@ -247,7 +327,21 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } -static inline void kvm_arch_hardware_disable(void) {} +static inline void __cpu_init_stage2(void) +{ +} + +static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t phys_idmap_start) +{ + /* + * Call reset code, and switch back to stub hyp vectors. + * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation. + */ + __kvm_call_hyp((void *)kvm_hyp_reset_entry(), + boot_pgd_ptr, phys_idmap_start); +} + static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h index 889c908ee631b526594b5dfc32ef5dfde15480df..fe612a9625766b5fff3698e2d1014d7376744e4f 100644 --- a/arch/arm64/include/asm/kvm_mmio.h +++ b/arch/arm64/include/asm/kvm_mmio.h @@ -19,7 +19,6 @@ #define __ARM64_KVM_MMIO_H__ #include -#include #include /* diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 61505676d0853bb65710fc9ad7746db8f58e4658..342a5ac2f3da238da956eae467a37558a672c07a 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -20,6 +20,7 @@ #include #include +#include /* * As we only have the TTBR0_EL2 register, we cannot express @@ -98,6 +99,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); @@ -158,7 +160,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) #endif #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) @@ -302,5 +303,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); } +static inline unsigned int kvm_get_vmid_bits(void) +{ + int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1); + + return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 12f8a00fb3f1767a645a04358dcaca08fd4f6b43..ae11e8fdbfd2b29d7e326683cb49cd6a325ef39f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -71,6 +71,9 @@ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) +#define KERNEL_START _text +#define KERNEL_END _end + /* * The size of the KASAN shadow region. This should be 1/8th of the * size of the entire kernel virtual address space. @@ -193,7 +196,11 @@ static inline void *phys_to_virt(phys_addr_t x) #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) +#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ + _virt_addr_valid(kaddr)) #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 21d5a6e458918f3df0dce97aae7ca484f9067789..5d5baf8096b947cd01e4a81b05204fc4179a838f 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -118,7 +119,7 @@ static inline void cpu_uninstall_idmap(void) local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); - if (mm != &init_mm) + if (mm != &init_mm && !system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); } @@ -178,20 +179,26 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -/* - * This is the actual mm switch as far as the scheduler - * is concerned. No registers are touched. We avoid - * calling the CPU specific function when the mm hasn't - * actually changed. - */ -static inline void -switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +static inline void update_saved_ttbr0(struct task_struct *tsk, + struct mm_struct *mm) { - unsigned int cpu = smp_processor_id(); + if (system_uses_ttbr0_pan()) { + BUG_ON(mm->pgd == swapper_pg_dir); + task_thread_info(tsk)->ttbr0 = + virt_to_phys(mm->pgd) | ASID(mm) << 48; + } +} +#else +static inline void update_saved_ttbr0(struct task_struct *tsk, + struct mm_struct *mm) +{ +} +#endif - if (prev == next) - return; +static inline void __switch_mm(struct mm_struct *next) +{ + unsigned int cpu = smp_processor_id(); /* * init_mm.pgd does not contain any user mappings and it is always @@ -205,7 +212,23 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, check_and_switch_context(next, cpu); } +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + if (prev != next) + __switch_mm(next); + + /* + * Update the saved TTBR0_EL1 of the scheduled-in task as the previous + * value may have not been initialised yet (activate_mm caller) or the + * ASID has changed since the last run (following the context switch + * of another thread of the same process). + */ + update_saved_ttbr0(tsk, next); +} + #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, NULL) +#define activate_mm(prev,next) switch_mm(prev, next, current) #endif diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 9b2f5a9d019df493fa6021ee3ca6b4779401d8c4..fbafd0ad16df768fa7966a769014db5020294c41 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -19,6 +19,8 @@ #ifndef __ASM_PAGE_H #define __ASM_PAGE_H +#include + /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ #ifdef CONFIG_ARM64_64K_PAGES diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 0a456bef8c792d91a30a4c2caf24cb6fb96546e2..8a336852eeba051535b47821cd510bc709967586 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \ \ switch (size) { \ case 1: \ - do { \ - asm ("//__per_cpu_" #op "_1\n" \ - "ldxrb %w[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_1\n" \ + "1: ldxrb %w[ret], %[ptr]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \ - "stxrb %w[loop], %w[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u8 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxrb %w[loop], %w[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u8 *)ptr) \ + : [val] "Ir" (val)); \ break; \ case 2: \ - do { \ - asm ("//__per_cpu_" #op "_2\n" \ - "ldxrh %w[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_2\n" \ + "1: ldxrh %w[ret], %[ptr]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \ - "stxrh %w[loop], %w[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u16 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxrh %w[loop], %w[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u16 *)ptr) \ + : [val] "Ir" (val)); \ break; \ case 4: \ - do { \ - asm ("//__per_cpu_" #op "_4\n" \ - "ldxr %w[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_4\n" \ + "1: ldxr %w[ret], %[ptr]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \ - "stxr %w[loop], %w[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u32 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxr %w[loop], %w[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u32 *)ptr) \ + : [val] "Ir" (val)); \ break; \ case 8: \ - do { \ - asm ("//__per_cpu_" #op "_8\n" \ - "ldxr %[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_8\n" \ + "1: ldxr %[ret], %[ptr]\n" \ #asm_op " %[ret], %[ret], %[val]\n" \ - "stxr %w[loop], %[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u64 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxr %w[loop], %[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u64 *)ptr) \ + : [val] "Ir" (val)); \ break; \ default: \ BUILD_BUG(); \ @@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, switch (size) { case 1: - do { - asm ("//__percpu_xchg_1\n" - "ldxrb %w[ret], %[ptr]\n" - "stxrb %w[loop], %w[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u8 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_1\n" + "1: ldxrb %w[ret], %[ptr]\n" + " stxrb %w[loop], %w[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u8 *)ptr) + : [val] "r" (val)); break; case 2: - do { - asm ("//__percpu_xchg_2\n" - "ldxrh %w[ret], %[ptr]\n" - "stxrh %w[loop], %w[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u16 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_2\n" + "1: ldxrh %w[ret], %[ptr]\n" + " stxrh %w[loop], %w[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u16 *)ptr) + : [val] "r" (val)); break; case 4: - do { - asm ("//__percpu_xchg_4\n" - "ldxr %w[ret], %[ptr]\n" - "stxr %w[loop], %w[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u32 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_4\n" + "1: ldxr %w[ret], %[ptr]\n" + " stxr %w[loop], %w[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u32 *)ptr) + : [val] "r" (val)); break; case 8: - do { - asm ("//__percpu_xchg_8\n" - "ldxr %[ret], %[ptr]\n" - "stxr %w[loop], %[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u64 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_8\n" + "1: ldxr %[ret], %[ptr]\n" + " stxr %w[loop], %[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u64 *)ptr) + : [val] "r" (val)); break; default: BUILD_BUG(); diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h new file mode 100644 index 0000000000000000000000000000000000000000..5af574d632fa496a89d63d6c0d44bedfce586e71 --- /dev/null +++ b/arch/arm64/include/asm/probes.h @@ -0,0 +1,35 @@ +/* + * arch/arm64/include/asm/probes.h + * + * Copyright (C) 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef _ARM_PROBES_H +#define _ARM_PROBES_H + +#include + +struct kprobe; +struct arch_specific_insn; + +typedef u32 kprobe_opcode_t; +typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *); + +/* architecture specific copy of original instruction */ +struct arch_specific_insn { + kprobe_opcode_t *insn; + pstate_check_t *pstate_cc; + kprobes_handler_t *handler; + /* restore address after step xol */ + unsigned long restore; +}; + +#endif diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index c97d01eb219f3648f9a2e6e414e3594eb302514e..a0053d23b35a89fa5f2fb62fc5ee5fb5be20b41e 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -193,7 +193,7 @@ static inline void spin_lock_prefetch(const void *ptr) #endif -void cpu_enable_pan(void *__unused); -void cpu_enable_uao(void *__unused); +int cpu_enable_pan(void *__unused); +int cpu_enable_uao(void *__unused); #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 7f94755089e200afbd4c015316da4517fe113bcd..5eedfd83acc772ae0324e41ffc965093829f1a6f 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -21,6 +21,8 @@ #include +#define _PSR_PAN_BIT 22 + /* Current Exception Level values, as contained in CurrentEL */ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) @@ -121,6 +123,8 @@ struct pt_regs { u64 unused; // maintain 16 byte alignment }; +#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate) + #define arch_has_single_step() (1) #ifdef CONFIG_COMPAT @@ -146,9 +150,57 @@ struct pt_regs { #define fast_interrupts_enabled(regs) \ (!((regs)->pstate & PSR_F_BIT)) -#define user_stack_pointer(regs) \ +#define GET_USP(regs) \ (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) +#define SET_USP(ptregs, value) \ + (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value)) + +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + u64 val = 0; + + offset >>= 3; + switch (offset) { + case 0 ... 30: + val = regs->regs[offset]; + break; + case offsetof(struct pt_regs, sp) >> 3: + val = regs->sp; + break; + case offsetof(struct pt_regs, pc) >> 3: + val = regs->pc; + break; + case offsetof(struct pt_regs, pstate) >> 3: + val = regs->pstate; + break; + default: + val = 0; + } + + return val; +} + +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} + static inline unsigned long regs_return_value(struct pt_regs *regs) { return regs->regs[0]; @@ -158,8 +210,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) struct task_struct; int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); -#define instruction_pointer(regs) ((unsigned long)(regs)->pc) +#define GET_IP(regs) ((unsigned long)(regs)->pc) +#define SET_IP(regs, value) ((regs)->pc = ((u64) (value))) + +#define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29]) +#define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value))) + +#include +#undef profile_pc extern unsigned long profile_pc(struct pt_regs *regs); #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 7b369923b2eb22b8fd8912962be156a9f52a7f7f..19afa01911dd4224797d3b8c3b47535e7cc6b772 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -37,13 +37,17 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) "2: ldaxr %w0, %2\n" " eor %w1, %w0, %w0, ror #16\n" " cbnz %w1, 1b\n" + /* Serialise against any concurrent lockers */ ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " stxr %w1, %w0, %2\n" -" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */ - /* LSE atomics */ " nop\n" -" nop\n") +" nop\n", + /* LSE atomics */ +" mov %w1, %w0\n" +" cas %w0, %w0, %2\n" +" eor %w1, %w1, %w0\n") +" cbnz %w1, 2b\n" : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) : : "memory"); @@ -330,4 +334,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() +/* + * Accesses appearing in program order before a spin_lock() operation + * can be reordered with accesses inside the critical section, by virtue + * of arch_spin_lock being constructed using acquire semantics. + * + * In cases where this is problematic (e.g. try_to_wake_up), an + * smp_mb__before_spinlock() can restore the required ordering. + */ +#define smp_mb__before_spinlock() smp_mb() + #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 59a5b0f1e81c3274f3c1c5c0ffb4ce4014ce35c2..024d623f662e588c845f47abd752bfba2470ed2f 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -1,7 +1,8 @@ #ifndef __ASM_SUSPEND_H #define __ASM_SUSPEND_H -#define NR_CTX_REGS 11 +#define NR_CTX_REGS 10 +#define NR_CALLEE_SAVED_REGS 12 /* * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on @@ -16,11 +17,34 @@ struct cpu_suspend_ctx { u64 sp; } __aligned(16); -struct sleep_save_sp { - phys_addr_t *save_ptr_stash; - phys_addr_t save_ptr_stash_phys; +/* + * Memory to save the cpu state is allocated on the stack by + * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter(). + * This data must survive until cpu_resume() is called. + * + * This struct desribes the size and the layout of the saved cpu state. + * The layout of the callee_saved_regs is defined by the implementation + * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed + * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it + * returns, and the data would be subsequently corrupted by the call to the + * finisher. + */ +struct sleep_stack_data { + struct cpu_suspend_ctx system_regs; + unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS]; }; +extern unsigned long *sleep_save_stash; + extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); +int __cpu_suspend_enter(struct sleep_stack_data *state); +void __cpu_suspend_exit(void); +void _cpu_resume(void); + +int swsusp_arch_suspend(void); +int swsusp_arch_resume(void); +int arch_hibernation_header_save(void *addr, unsigned int max_size); +int arch_hibernation_header_restore(void *addr); + #endif diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b9fd8ec790336569ee8fdedb4573e25b3045c29b..0961a24e8d4891bdf976f7cf1d39a39cd31cc229 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -20,6 +20,8 @@ #ifndef __ASM_SYSREG_H #define __ASM_SYSREG_H +#include + #include /* @@ -84,10 +86,21 @@ #define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\ (!!x)<<8 | 0x1f) -/* SCTLR_EL1 */ -#define SCTLR_EL1_CP15BEN (0x1 << 5) -#define SCTLR_EL1_SED (0x1 << 8) -#define SCTLR_EL1_SPAN (0x1 << 23) +/* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_EE (1 << 25) +#define SCTLR_ELx_I (1 << 12) +#define SCTLR_ELx_SA (1 << 3) +#define SCTLR_ELx_C (1 << 2) +#define SCTLR_ELx_A (1 << 1) +#define SCTLR_ELx_M 1 + +#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ + SCTLR_ELx_SA | SCTLR_ELx_I) + +/* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_SPAN (1 << 23) +#define SCTLR_EL1_SED (1 << 8) +#define SCTLR_EL1_CP15BEN (1 << 5) /* id_aa64isar0 */ @@ -215,6 +228,8 @@ #else +#include + asm( " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" " .equ .L__reg_num_x\\num, \\num\n" @@ -239,6 +254,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set) val |= set; asm volatile("msr sctlr_el1, %0" : : "r" (val)); } + +/* + * Unlike read_cpuid, calls to read_sysreg are never expected to be + * optimized away or replaced with synthetic values. + */ +#define read_sysreg(r) ({ \ + u64 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define write_sysreg(v, r) do { \ + u64 __val = (u64)v; \ + asm volatile("msr " __stringify(r) ", %0" \ + : : "r" (__val)); \ +} while (0) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 4a9e82e4f72484fffce6959cfcc56831b85dad46..4b4292147584d96d645180b0f1cf5db420baef06 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -47,6 +47,9 @@ typedef unsigned long mm_segment_t; struct thread_info { unsigned long flags; /* low level flags */ mm_segment_t addr_limit; /* address limit */ +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + u64 ttbr0; /* saved TTBR0_EL1 */ +#endif struct task_struct *task; /* main task structure */ int preempt_count; /* 0 => preemptable, <0 => bug */ int cpu; /* cpu */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index c3d445b42351e1a529d94c4ba44e9c4d37544821..c37c064d7cddd62ab5c3334ff89e3077db3ffa91 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -18,6 +18,8 @@ #ifndef __ASM_UACCESS_H #define __ASM_UACCESS_H +#ifndef __ASSEMBLY__ + /* * User space memory access functions */ @@ -26,6 +28,7 @@ #include #include +#include #include #include #include @@ -123,6 +126,85 @@ static inline void set_fs(mm_segment_t fs) " .long (" #from " - .), (" #to " - .)\n" \ " .popsection\n" +/* + * User access enabling/disabling. + */ +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +static inline void uaccess_ttbr0_disable(void) +{ + unsigned long ttbr; + + /* reserved_ttbr0 placed at the end of swapper_pg_dir */ + ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; + write_sysreg(ttbr, ttbr0_el1); + isb(); +} + +static inline void uaccess_ttbr0_enable(void) +{ + unsigned long flags; + + /* + * Disable interrupts to avoid preemption between reading the 'ttbr0' + * variable and the MSR. A context switch could trigger an ASID + * roll-over and an update of 'ttbr0'. + */ + local_irq_save(flags); + write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); + isb(); + local_irq_restore(flags); +} +#else +static inline void uaccess_ttbr0_disable(void) +{ +} + +static inline void uaccess_ttbr0_enable(void) +{ +} +#endif + +#define __uaccess_disable(alt) \ +do { \ + if (system_uses_ttbr0_pan()) \ + uaccess_ttbr0_disable(); \ + else \ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ + CONFIG_ARM64_PAN)); \ +} while (0) + +#define __uaccess_enable(alt) \ +do { \ + if (system_uses_ttbr0_pan()) \ + uaccess_ttbr0_enable(); \ + else \ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ + CONFIG_ARM64_PAN)); \ +} while (0) + +static inline void uaccess_disable(void) +{ + __uaccess_disable(ARM64_HAS_PAN); +} + +static inline void uaccess_enable(void) +{ + __uaccess_enable(ARM64_HAS_PAN); +} + +/* + * These functions are no-ops when UAO is present. + */ +static inline void uaccess_disable_not_uao(void) +{ + __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); +} + +static inline void uaccess_enable_not_uao(void) +{ + __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); +} + /* * The "__xxx" versions of the user access functions do not verify the address * space - it must have been done previously with a separate "access_ok()" @@ -150,8 +232,7 @@ static inline void set_fs(mm_segment_t fs) do { \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\ - CONFIG_ARM64_PAN)); \ + uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ @@ -172,9 +253,8 @@ do { \ default: \ BUILD_BUG(); \ } \ + uaccess_disable_not_uao(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\ - CONFIG_ARM64_PAN)); \ } while (0) #define __get_user(x, ptr) \ @@ -219,8 +299,7 @@ do { \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\ - CONFIG_ARM64_PAN)); \ + uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ @@ -241,8 +320,7 @@ do { \ default: \ BUILD_BUG(); \ } \ - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\ - CONFIG_ARM64_PAN)); \ + uaccess_disable_not_uao(); \ } while (0) #define __put_user(x, ptr) \ @@ -327,4 +405,73 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); +#else /* __ASSEMBLY__ */ + +#include +#include +#include + +/* + * User access enabling/disabling macros. + */ + .macro uaccess_ttbr0_disable, tmp1 + mrs \tmp1, ttbr1_el1 // swapper_pg_dir + add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir + msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 + isb + .endm + + .macro uaccess_ttbr0_enable, tmp1 + get_thread_info \tmp1 + ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1 + msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 + isb + .endm + +/* + * These macros are no-ops when UAO is present. + */ + .macro uaccess_disable_not_uao, tmp1 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +alternative_if_not ARM64_HAS_PAN + uaccess_ttbr0_disable \tmp1 +alternative_else + nop + nop + nop + nop +alternative_endif +#endif +alternative_if_not ARM64_ALT_PAN_NOT_UAO + nop +alternative_else + SET_PSTATE_PAN(1) +alternative_endif + .endm + + .macro uaccess_enable_not_uao, tmp1, tmp2 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN +alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp2 // avoid preemption + uaccess_ttbr0_enable \tmp1 + restore_irq \tmp2 +alternative_else + nop + nop + nop + nop + nop + nop + nop +alternative_endif +#endif +alternative_if_not ARM64_ALT_PAN_NOT_UAO + nop +alternative_else + SET_PSTATE_PAN(0) +alternative_endif + .endm + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 7a5df5252dd736e4038e04e40510351820056183..06e6a5238c4c47b41a9567cf7c004c7dae78b888 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -18,11 +18,29 @@ #ifndef __ASM__VIRT_H #define __ASM__VIRT_H +/* + * The arm64 hcall implementation uses x0 to specify the hcall type. A value + * less than 0xfff indicates a special hcall, such as get/set vector. + * Any other value is used as a pointer to the function to call. + */ + +/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */ +#define HVC_GET_VECTORS 0 + +/* + * HVC_SET_VECTORS - Set the value of the vbar_el2 register. + * + * @x1: Physical address of the new vector table. + */ +#define HVC_SET_VECTORS 1 + #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12) #ifndef __ASSEMBLY__ +#include + /* * __boot_cpu_mode records what mode CPUs were booted in. * A correctly-implemented bootloader must start all CPUs in the same mode: @@ -50,6 +68,14 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } +static inline bool is_kernel_in_hyp_mode(void) +{ + u64 el; + + asm("mrs %0, CurrentEL" : "=r" (el)); + return el == CurrentEL_EL2; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index f6e0269de4c73e1719a699e3534a2208166241e4..d3cfa681654f146ced345599cbd9ed40b8dc09aa 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -14,10 +14,10 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + hyp-stub.o psci.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ - smp.o smp_spin_table.o topology.o + smp.o smp_spin_table.o topology.o smccc-call.o extra-$(CONFIG_EFI) := efi-entry.o @@ -26,8 +26,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,objcopy) arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ - sys_compat.o entry32.o \ - ../../arm/kernel/opcodes.o + sys_compat.o entry32.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o @@ -44,10 +43,12 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o -arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o +arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o +arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o +arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o -obj-y += $(arm64-obj-y) vdso/ +obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) head-y := head.o extra-y += $(head-y) vmlinux.lds diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 65919bfe6cf5f40e5c59d2afd72590e9f748b98b..546ce8979b3a1de32290da426cc18a67bb1efe1b 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include #include @@ -68,8 +70,13 @@ EXPORT_SYMBOL(test_and_change_bit); #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); +NOKPROBE_SYMBOL(_mcount); #endif /* caching functions */ EXPORT_SYMBOL(__dma_inv_range); EXPORT_SYMBOL(__dma_clean_range); EXPORT_SYMBOL(__dma_flush_range); + + /* arm-smccc */ +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index c37202c0c838d01a71d56b05d114cd0b419ff480..a0a0f2b20608bad8b9e92e23102c0a93913dc388 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -281,9 +281,9 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) * Error-checking SWP macros implemented using ldxr{b}/stxr{b} */ #define __user_swpX_asm(data, addr, res, temp, B) \ +do { \ + uaccess_enable(); \ __asm__ __volatile__( \ - ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ - CONFIG_ARM64_PAN) \ "0: ldxr"B" %w2, [%3]\n" \ "1: stxr"B" %w0, %w1, [%3]\n" \ " cbz %w0, 2f\n" \ @@ -299,11 +299,11 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) " .popsection" \ _ASM_EXTABLE(0b, 4b) \ _ASM_EXTABLE(1b, 4b) \ - ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ - CONFIG_ARM64_PAN) \ : "=&r" (res), "+r" (data), "=&r" (temp) \ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ - : "memory") + : "memory"); \ + uaccess_disable(); \ +} while (0) #define __user_swp_asm(data, addr, res, temp) \ __user_swpX_asm(data, addr, res, temp, "") @@ -366,6 +366,21 @@ static int emulate_swpX(unsigned int address, unsigned int *data, return res; } +#define ARM_OPCODE_CONDITION_UNCOND 0xf + +static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr) +{ + u32 cc_bits = opcode >> 28; + + if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) { + if ((*aarch32_opcode_cond_checks[cc_bits])(psr)) + return ARM_OPCODE_CONDTEST_PASS; + else + return ARM_OPCODE_CONDTEST_FAIL; + } + return ARM_OPCODE_CONDTEST_UNCOND; +} + /* * swp_handler logs the id of calling process, dissects the instruction, sanity * checks the memory location, calls emulate_swpX for the actual operation and @@ -380,7 +395,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr) type = instr & TYPE_SWPB; - switch (arm_check_condition(instr, regs->pstate)) { + switch (aarch32_check_condition(instr, regs->pstate)) { case ARM_OPCODE_CONDTEST_PASS: break; case ARM_OPCODE_CONDTEST_FAIL: @@ -461,7 +476,7 @@ static int cp15barrier_handler(struct pt_regs *regs, u32 instr) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); - switch (arm_check_condition(instr, regs->pstate)) { + switch (aarch32_check_condition(instr, regs->pstate)) { case ARM_OPCODE_CONDTEST_PASS: break; case ARM_OPCODE_CONDTEST_FAIL: diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 087cf9a65359b5fac32a0fcb4c5fa2804ffd73bc..dac70c160289569782878ac82c3525f37afd71b5 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -22,12 +22,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include int main(void) { @@ -36,6 +38,9 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0)); +#endif DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); @@ -49,6 +54,17 @@ int main(void) DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); + DEFINE(S_X8, offsetof(struct pt_regs, regs[8])); + DEFINE(S_X10, offsetof(struct pt_regs, regs[10])); + DEFINE(S_X12, offsetof(struct pt_regs, regs[12])); + DEFINE(S_X14, offsetof(struct pt_regs, regs[14])); + DEFINE(S_X16, offsetof(struct pt_regs, regs[16])); + DEFINE(S_X18, offsetof(struct pt_regs, regs[18])); + DEFINE(S_X20, offsetof(struct pt_regs, regs[20])); + DEFINE(S_X22, offsetof(struct pt_regs, regs[22])); + DEFINE(S_X24, offsetof(struct pt_regs, regs[24])); + DEFINE(S_X26, offsetof(struct pt_regs, regs[26])); + DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); #ifdef CONFIG_COMPAT @@ -109,58 +125,25 @@ int main(void) DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); - DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1)); - DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1)); - DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr)); - DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs)); + DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); - DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags)); - DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr)); - DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr)); - DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr)); - DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr)); - DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr)); - DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); - DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); - DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state)); - DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); - DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); - DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); - DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); - DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); - DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); - DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); - DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); - DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); - DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); - DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); - DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre)); - DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr)); - DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr)); - DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr)); - DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr)); - DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr)); - DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r)); - DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r)); - DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr)); - DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); - DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); - DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); - DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); - DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); + DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs)); + DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs)); #endif + DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); + DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); + BLANK(); + DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); + DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); + DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); return 0; } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7566cad9fa1da5a882ada85b5801a8ae33f4da0c..cdf1dca6413385f7ac2bbaa1faeff8198b8d4d92 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -19,13 +19,16 @@ #define pr_fmt(fmt) "CPU features: " fmt #include +#include #include +#include #include #include #include #include #include #include +#include unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); @@ -43,6 +46,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; #endif DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); +EXPORT_SYMBOL(cpu_hwcaps); #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ @@ -646,6 +650,11 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry) return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max); } +static bool runs_at_el2(const struct arm64_cpu_capabilities *entry) +{ + return is_kernel_in_hyp_mode(); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -698,6 +707,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = cpufeature_pan_not_uao, }, #endif /* CONFIG_ARM64_PAN */ + { + .desc = "Virtualization Host Extensions", + .capability = ARM64_HAS_VIRT_HOST_EXTN, + .matches = runs_at_el2, + }, {}, }; @@ -812,7 +826,13 @@ enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) for (i = 0; caps[i].matches; i++) if (caps[i].enable && cpus_have_cap(caps[i].capability)) - on_each_cpu(caps[i].enable, NULL, true); + /* + * Use stop_machine() as it schedules the work allowing + * us to modify PSTATE, instead of on_each_cpu() which + * uses an IPI, giving us a PSTATE that disappears when + * we return. + */ + stop_machine(caps[i].enable, NULL, cpu_online_mask); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index e51f27ac13fd230b758f8405c8d801d2313b6798..6de6d9f43b959fd1cbb45a16bdc33bba524fccd6 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,7 @@ static void mdscr_write(u32 mdscr) asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); local_dbg_restore(flags); } +NOKPROBE_SYMBOL(mdscr_write); static u32 mdscr_read(void) { @@ -55,6 +57,7 @@ static u32 mdscr_read(void) asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr)); return mdscr; } +NOKPROBE_SYMBOL(mdscr_read); /* * Allow root to disable self-hosted debug from userspace. @@ -103,6 +106,7 @@ void enable_debug_monitors(enum dbg_active_el el) mdscr_write(mdscr); } } +NOKPROBE_SYMBOL(enable_debug_monitors); void disable_debug_monitors(enum dbg_active_el el) { @@ -123,6 +127,7 @@ void disable_debug_monitors(enum dbg_active_el el) mdscr_write(mdscr); } } +NOKPROBE_SYMBOL(disable_debug_monitors); /* * OS lock clearing. @@ -173,6 +178,7 @@ static void set_regs_spsr_ss(struct pt_regs *regs) spsr |= DBG_SPSR_SS; regs->pstate = spsr; } +NOKPROBE_SYMBOL(set_regs_spsr_ss); static void clear_regs_spsr_ss(struct pt_regs *regs) { @@ -182,6 +188,7 @@ static void clear_regs_spsr_ss(struct pt_regs *regs) spsr &= ~DBG_SPSR_SS; regs->pstate = spsr; } +NOKPROBE_SYMBOL(clear_regs_spsr_ss); /* EL1 Single Step Handler hooks */ static LIST_HEAD(step_hook); @@ -225,6 +232,7 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr) return retval; } +NOKPROBE_SYMBOL(call_step_hook); static int single_step_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -253,6 +261,10 @@ static int single_step_handler(unsigned long addr, unsigned int esr, */ user_rewind_single_step(current); } else { +#ifdef CONFIG_KPROBES + if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED) + return 0; +#endif if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED) return 0; @@ -266,6 +278,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr, return 0; } +NOKPROBE_SYMBOL(single_step_handler); /* * Breakpoint handler is re-entrant as another breakpoint can @@ -303,6 +316,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } +NOKPROBE_SYMBOL(call_break_hook); static int brk_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -318,13 +332,21 @@ static int brk_handler(unsigned long addr, unsigned int esr, }; force_sig_info(SIGTRAP, &info, current); - } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { - pr_warning("Unexpected kernel BRK exception at EL1\n"); + } +#ifdef CONFIG_KPROBES + else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) { + if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED) + return -EFAULT; + } +#endif + else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { + pr_warn("Unexpected kernel BRK exception at EL1\n"); return -EFAULT; } return 0; } +NOKPROBE_SYMBOL(brk_handler); int aarch32_break_handler(struct pt_regs *regs) { @@ -369,6 +391,7 @@ int aarch32_break_handler(struct pt_regs *regs) force_sig_info(SIGTRAP, &info, current); return 0; } +NOKPROBE_SYMBOL(aarch32_break_handler); static int __init debug_traps_init(void) { @@ -390,6 +413,7 @@ void user_rewind_single_step(struct task_struct *task) if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) set_regs_spsr_ss(task_pt_regs(task)); } +NOKPROBE_SYMBOL(user_rewind_single_step); void user_fastforward_single_step(struct task_struct *task) { @@ -405,6 +429,7 @@ void kernel_enable_single_step(struct pt_regs *regs) mdscr_write(mdscr_read() | DBG_MDSCR_SS); enable_debug_monitors(DBG_ACTIVE_EL1); } +NOKPROBE_SYMBOL(kernel_enable_single_step); void kernel_disable_single_step(void) { @@ -412,21 +437,27 @@ void kernel_disable_single_step(void) mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); disable_debug_monitors(DBG_ACTIVE_EL1); } +NOKPROBE_SYMBOL(kernel_disable_single_step); int kernel_active_single_step(void) { WARN_ON(!irqs_disabled()); return mdscr_read() & DBG_MDSCR_SS; } +NOKPROBE_SYMBOL(kernel_active_single_step); /* ptrace API */ void user_enable_single_step(struct task_struct *task) { - set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); - set_regs_spsr_ss(task_pt_regs(task)); + struct thread_info *ti = task_thread_info(task); + + if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) + set_regs_spsr_ss(task_pt_regs(task)); } +NOKPROBE_SYMBOL(user_enable_single_step); void user_disable_single_step(struct task_struct *task) { clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); } +NOKPROBE_SYMBOL(user_disable_single_step); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 4eeb17198cfaf598fd403b1e4040c77f5ee3068a..b6abc852f2a142123150662bc6dd6bf5c3de62af 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,317 +11,34 @@ * */ -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include -#include #include -#include -#include -#include -#include -struct efi_memory_map memmap; - -static u64 efi_system_table; - -static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; - -static struct mm_struct efi_mm = { - .mm_rb = RB_ROOT, - .pgd = efi_pgd, - .mm_users = ATOMIC_INIT(2), - .mm_count = ATOMIC_INIT(1), - .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), - .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), - .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), -}; - -static int __init is_normal_ram(efi_memory_desc_t *md) -{ - if (md->attribute & EFI_MEMORY_WB) - return 1; - return 0; -} - -/* - * Translate a EFI virtual address into a physical address: this is necessary, - * as some data members of the EFI system table are virtually remapped after - * SetVirtualAddressMap() has been called. - */ -static phys_addr_t efi_to_phys(unsigned long addr) +int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) { - efi_memory_desc_t *md; - - for_each_efi_memory_desc(&memmap, md) { - if (!(md->attribute & EFI_MEMORY_RUNTIME)) - continue; - if (md->virt_addr == 0) - /* no virtual mapping has been installed by the stub */ - break; - if (md->virt_addr <= addr && - (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) - return md->phys_addr + addr - md->virt_addr; - } - return addr; -} - -static int __init uefi_init(void) -{ - efi_char16_t *c16; - void *config_tables; - u64 table_size; - char vendor[100] = "unknown"; - int i, retval; - - efi.systab = early_memremap(efi_system_table, - sizeof(efi_system_table_t)); - if (efi.systab == NULL) { - pr_warn("Unable to map EFI system table.\n"); - return -ENOMEM; - } - - set_bit(EFI_BOOT, &efi.flags); - set_bit(EFI_64BIT, &efi.flags); + pteval_t prot_val; /* - * Verify the EFI Table + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be + * executable, everything else can be mapped with the XN bits + * set. */ - if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { - pr_err("System table signature incorrect\n"); - retval = -EINVAL; - goto out; - } - if ((efi.systab->hdr.revision >> 16) < 2) - pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", - efi.systab->hdr.revision >> 16, - efi.systab->hdr.revision & 0xffff); - - /* Show what we know for posterity */ - c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), - sizeof(vendor) * sizeof(efi_char16_t)); - if (c16) { - for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) - vendor[i] = c16[i]; - vendor[i] = '\0'; - early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); - } - - pr_info("EFI v%u.%.02u by %s\n", - efi.systab->hdr.revision >> 16, - efi.systab->hdr.revision & 0xffff, vendor); - - table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; - config_tables = early_memremap(efi_to_phys(efi.systab->tables), - table_size); - if (config_tables == NULL) { - pr_warn("Unable to map EFI config table array.\n"); - retval = -ENOMEM; - goto out; - } - retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, - sizeof(efi_config_table_64_t), NULL); - - early_memunmap(config_tables, table_size); -out: - early_memunmap(efi.systab, sizeof(efi_system_table_t)); - return retval; -} - -/* - * Return true for RAM regions we want to permanently reserve. - */ -static __init int is_reserve_region(efi_memory_desc_t *md) -{ - switch (md->type) { - case EFI_LOADER_CODE: - case EFI_LOADER_DATA: - case EFI_BOOT_SERVICES_CODE: - case EFI_BOOT_SERVICES_DATA: - case EFI_CONVENTIONAL_MEMORY: - case EFI_PERSISTENT_MEMORY: - return 0; - default: - break; - } - return is_normal_ram(md); -} - -static __init void reserve_regions(void) -{ - efi_memory_desc_t *md; - u64 paddr, npages, size; - - if (efi_enabled(EFI_DBG)) - pr_info("Processing EFI memory map:\n"); - - for_each_efi_memory_desc(&memmap, md) { - paddr = md->phys_addr; - npages = md->num_pages; - - if (efi_enabled(EFI_DBG)) { - char buf[64]; - - pr_info(" 0x%012llx-0x%012llx %s", - paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, - efi_md_typeattr_format(buf, sizeof(buf), md)); - } - - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - - if (is_normal_ram(md)) - early_init_dt_add_memory_arch(paddr, size); - - if (is_reserve_region(md)) { - memblock_reserve(paddr, size); - if (efi_enabled(EFI_DBG)) - pr_cont("*"); - } - - if (efi_enabled(EFI_DBG)) - pr_cont("\n"); - } - - set_bit(EFI_MEMMAP, &efi.flags); -} - -void __init efi_init(void) -{ - struct efi_fdt_params params; - - /* Grab UEFI information placed in FDT by stub */ - if (!efi_get_fdt_params(¶ms)) - return; - - efi_system_table = params.system_table; - - memblock_reserve(params.mmap & PAGE_MASK, - PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); - memmap.phys_map = params.mmap; - memmap.map = early_memremap(params.mmap, params.mmap_size); - if (memmap.map == NULL) { - /* - * If we are booting via UEFI, the UEFI memory map is the only - * description of memory we have, so there is little point in - * proceeding if we cannot access it. - */ - panic("Unable to map EFI memory map.\n"); - } - memmap.map_end = memmap.map + params.mmap_size; - memmap.desc_size = params.desc_size; - memmap.desc_version = params.desc_ver; - - if (uefi_init() < 0) - return; - - reserve_regions(); - early_memunmap(memmap.map, params.mmap_size); -} - -static bool __init efi_virtmap_init(void) -{ - efi_memory_desc_t *md; - - init_new_context(NULL, &efi_mm); - - for_each_efi_memory_desc(&memmap, md) { - pgprot_t prot; - - if (!(md->attribute & EFI_MEMORY_RUNTIME)) - continue; - if (md->virt_addr == 0) - return false; - - pr_info(" EFI remap 0x%016llx => %p\n", - md->phys_addr, (void *)md->virt_addr); - - /* - * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be - * executable, everything else can be mapped with the XN bits - * set. - */ - if (!is_normal_ram(md)) - prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE || - !PAGE_ALIGNED(md->phys_addr)) - prot = PAGE_KERNEL_EXEC; - else - prot = PAGE_KERNEL; - - create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, - md->num_pages << EFI_PAGE_SHIFT, - __pgprot(pgprot_val(prot) | PTE_NG)); - } - return true; -} - -/* - * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., - * non-early mapping of the UEFI system table and virtual mappings for all - * EFI_MEMORY_RUNTIME regions. - */ -static int __init arm64_enable_runtime_services(void) -{ - u64 mapsize; - - if (!efi_enabled(EFI_BOOT)) { - pr_info("EFI services will not be available.\n"); - return 0; - } - - if (efi_runtime_disabled()) { - pr_info("EFI runtime services will be disabled.\n"); - return 0; - } - - pr_info("Remapping and enabling EFI services.\n"); - - mapsize = memmap.map_end - memmap.map; - memmap.map = (__force void *)ioremap_cache(memmap.phys_map, - mapsize); - if (!memmap.map) { - pr_err("Failed to remap EFI memory map\n"); - return -ENOMEM; - } - memmap.map_end = memmap.map + mapsize; - efi.memmap = &memmap; - - efi.systab = (__force void *)ioremap_cache(efi_system_table, - sizeof(efi_system_table_t)); - if (!efi.systab) { - pr_err("Failed to remap EFI System Table\n"); - return -ENOMEM; - } - set_bit(EFI_SYSTEM_TABLES, &efi.flags); - - if (!efi_virtmap_init()) { - pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); - return -ENOMEM; - } - - /* Set up runtime services function pointers */ - efi_native_runtime_setup(); - set_bit(EFI_RUNTIME_SERVICES, &efi.flags); - - efi.runtime_version = efi.systab->hdr.revision; - + if ((md->attribute & EFI_MEMORY_WB) == 0) + prot_val = PROT_DEVICE_nGnRE; + else if (md->type == EFI_RUNTIME_SERVICES_CODE || + !PAGE_ALIGNED(md->phys_addr)) + prot_val = pgprot_val(PAGE_KERNEL_EXEC); + else + prot_val = pgprot_val(PAGE_KERNEL); + + create_pgd_mapping(mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, + __pgprot(prot_val | PTE_NG)); return 0; } -early_initcall(arm64_enable_runtime_services); static int __init arm64_dmi_init(void) { @@ -337,23 +54,6 @@ static int __init arm64_dmi_init(void) } core_initcall(arm64_dmi_init); -static void efi_set_pgd(struct mm_struct *mm) -{ - switch_mm(NULL, mm, NULL); -} - -void efi_virtmap_load(void) -{ - preempt_disable(); - efi_set_pgd(&efi_mm); -} - -void efi_virtmap_unload(void) -{ - efi_set_pgd(current->active_mm); - preempt_enable(); -} - /* * UpdateCapsule() depends on the system being shutdown via * ResetSystem(). diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d311c635a00eada9561a119409d2fbf65bba4a2b..7f4e5cba0b13c1e4028fb6157512710ac28b6f8a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,7 +29,9 @@ #include #include #include +#include #include +#include #include /* @@ -109,6 +111,34 @@ mrs x22, elr_el1 mrs x23, spsr_el1 stp lr, x21, [sp, #S_LR] + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Set the TTBR0 PAN bit in SPSR. When the exception is taken from + * EL0, there is no need to check the state of TTBR0_EL1 since + * accesses are always enabled. + * Note that the meaning of this bit differs from the ARMv8.1 PAN + * feature as all TTBR0_EL1 accesses are disabled, not just those to + * user mappings. + */ +alternative_if_not ARM64_HAS_PAN + nop +alternative_else + b 1f // skip TTBR0 PAN +alternative_endif + + .if \el != 0 + mrs x21, ttbr0_el1 + tst x21, #0xffff << 48 // Check for the reserved ASID + orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR + b.eq 1f // TTBR0 access already disabled + and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR + .endif + + uaccess_ttbr0_disable x21 +1: +#endif + stp x22, x23, [sp, #S_PC] /* @@ -147,6 +177,42 @@ ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter + .endif + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR + * PAN bit checking. + */ +alternative_if_not ARM64_HAS_PAN + nop +alternative_else + b 2f // skip TTBR0 PAN +alternative_endif + + .if \el != 0 + tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled + .endif + + uaccess_ttbr0_enable x0 + + .if \el == 0 + /* + * Enable errata workarounds only if returning to user. The only + * workaround currently required for TTBR0_EL1 changes are for the + * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache + * corruption). + */ + post_ttbr0_update_workaround + .endif +1: + .if \el != 0 + and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit + .endif +2: +#endif + + .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 #ifdef CONFIG_ARM64_ERRATUM_845719 @@ -168,6 +234,7 @@ alternative_else alternative_endif #endif .endif + msr elr_el1, x21 // set up the return data msr spsr_el1, x22 ldp x0, x1, [sp, #16 * 0] @@ -190,10 +257,6 @@ alternative_endif eret // return to kernel .endm - .macro get_thread_info, rd - mrs \rd, sp_el0 - .endm - .macro irq_stack_entry mov x19, sp // preserve the original sp @@ -258,6 +321,7 @@ tsk .req x28 // current thread_info /* * Exception vectors. */ + .pushsection ".entry.text", "ax" .align 11 ENTRY(vectors) @@ -293,7 +357,7 @@ END(vectors) * Invalid mode handlers */ .macro inv_entry, el, reason, regsize = 64 - kernel_entry el, \regsize + kernel_entry \el, \regsize mov x0, sp mov x1, #\reason mrs x2, esr_el1 @@ -355,6 +419,8 @@ el1_sync: lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 b.eq el1_da + cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 + b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception @@ -366,6 +432,11 @@ el1_sync: cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 b.ge el1_dbg b el1_inv + +el1_ia: + /* + * Fall through to the Data abort case + */ el1_da: /* * Data abort handling @@ -551,7 +622,7 @@ el0_ia: enable_dbg_and_irq ct_user_exit mov x0, x26 - orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts + mov x1, x25 mov x2, sp bl do_mem_abort b ret_to_user @@ -820,6 +891,8 @@ __ni_sys_trace: bl do_ni_syscall b __sys_trace_return + .popsection // .entry.text + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 9890d04a96cb8a4eeeef14f392ded2f92a0311ad..8cfd5ab377434b2ccafd5b8d7c4701386faf8a95 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -50,9 +50,6 @@ #error TEXT_OFFSET must be less than 2MB #endif -#define KERNEL_START _text -#define KERNEL_END _end - /* * Kernel startup entry point. * --------------------------- @@ -321,14 +318,14 @@ __create_page_tables: * dirty cache lines being evicted. */ mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE + add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE bl __inval_cache_range /* * Clear the idmap and swapper page tables. */ mov x0, x25 - add x6, x26, #SWAPPER_DIR_SIZE + add x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 1: stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 @@ -407,7 +404,7 @@ __create_page_tables: * tables again to remove any speculatively loaded cache lines. */ mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE + add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE dmb sy bl __inval_cache_range @@ -551,8 +548,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems b.lt 4f // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to - msr mdcr_el2, x0 // all PMU counters from EL1 4: + csel x0, xzr, x0, lt // all PMU counters from EL1 + msr mdcr_el2, x0 // (if they exist) /* Stage-2 translation */ msr vttbr_el2, xzr @@ -665,7 +663,7 @@ ENDPROC(__secondary_switched) * If it isn't, park the CPU */ .section ".idmap.text", "ax" -__enable_mmu: +ENTRY(__enable_mmu) mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 @@ -697,6 +695,9 @@ __enable_mmu: isb bl __create_page_tables // recreate kernel mapping + tlbi vmalle1 // Remove any stale TLB entries + dsb nsh + msr sctlr_el1, x19 // re-enable the MMU isb ic iallu // flush instructions fetched diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S new file mode 100644 index 0000000000000000000000000000000000000000..46f29b6560ecf9b221da669afda75a881dc5af5a --- /dev/null +++ b/arch/arm64/kernel/hibernate-asm.S @@ -0,0 +1,176 @@ +/* + * Hibernate low-level support + * + * Copyright (C) 2016 ARM Ltd. + * Author: James Morse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * To prevent the possibility of old and new partial table walks being visible + * in the tlb, switch the ttbr to a zero page when we invalidate the old + * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i + * Even switching to our copied tables will cause a changed output address at + * each stage of the walk. + */ +.macro break_before_make_ttbr_switch zero_page, page_table + msr ttbr1_el1, \zero_page + isb + tlbi vmalle1is + dsb ish + msr ttbr1_el1, \page_table + isb +.endm + + +/* + * Resume from hibernate + * + * Loads temporary page tables then restores the memory image. + * Finally branches to cpu_resume() to restore the state saved by + * swsusp_arch_suspend(). + * + * Because this code has to be copied to a 'safe' page, it can't call out to + * other functions by PC-relative address. Also remember that it may be + * mid-way through over-writing other functions. For this reason it contains + * code from flush_icache_range() and uses the copy_page() macro. + * + * This 'safe' page is mapped via ttbr0, and executed from there. This function + * switches to a copy of the linear map in ttbr1, performs the restore, then + * switches ttbr1 to the original kernel's swapper_pg_dir. + * + * All of memory gets written to, including code. We need to clean the kernel + * text to the Point of Coherence (PoC) before secondary cores can be booted. + * Because the kernel modules and executable pages mapped to user space are + * also written as data, we clean all pages we touch to the Point of + * Unification (PoU). + * + * x0: physical address of temporary page tables + * x1: physical address of swapper page tables + * x2: address of cpu_resume + * x3: linear map address of restore_pblist in the current kernel + * x4: physical address of __hyp_stub_vectors, or 0 + * x5: physical address of a zero page that remains zero after resume + */ +.pushsection ".hibernate_exit.text", "ax" +ENTRY(swsusp_arch_suspend_exit) + /* + * We execute from ttbr0, change ttbr1 to our copied linear map tables + * with a break-before-make via the zero page + */ + break_before_make_ttbr_switch x5, x0 + + mov x21, x1 + mov x30, x2 + mov x24, x4 + mov x25, x5 + + /* walk the restore_pblist and use copy_page() to over-write memory */ + mov x19, x3 + +1: ldr x10, [x19, #HIBERN_PBE_ORIG] + mov x0, x10 + ldr x1, [x19, #HIBERN_PBE_ADDR] + + copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 + + add x1, x10, #PAGE_SIZE + /* Clean the copied page to PoU - based on flush_icache_range() */ + dcache_line_size x2, x3 + sub x3, x2, #1 + bic x4, x10, x3 +2: dc cvau, x4 /* clean D line / unified line */ + add x4, x4, x2 + cmp x4, x1 + b.lo 2b + + ldr x19, [x19, #HIBERN_PBE_NEXT] + cbnz x19, 1b + dsb ish /* wait for PoU cleaning to finish */ + + /* switch to the restored kernels page tables */ + break_before_make_ttbr_switch x25, x21 + + ic ialluis + dsb ish + isb + + cbz x24, 3f /* Do we need to re-initialise EL2? */ + hvc #0 +3: ret + + .ltorg +ENDPROC(swsusp_arch_suspend_exit) + +/* + * Restore the hyp stub. + * This must be done before the hibernate page is unmapped by _cpu_resume(), + * but happens before any of the hyp-stub's code is cleaned to PoC. + * + * x24: The physical address of __hyp_stub_vectors + */ +el1_sync: + msr vbar_el2, x24 + eret +ENDPROC(el1_sync) + +.macro invalid_vector label +\label: + b \label +ENDPROC(\label) +.endm + + invalid_vector el2_sync_invalid + invalid_vector el2_irq_invalid + invalid_vector el2_fiq_invalid + invalid_vector el2_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + +/* el2 vectors - switch el2 here while we restore the memory image. */ + .align 11 +ENTRY(hibernate_el2_vectors) + ventry el2_sync_invalid // Synchronous EL2t + ventry el2_irq_invalid // IRQ EL2t + ventry el2_fiq_invalid // FIQ EL2t + ventry el2_error_invalid // Error EL2t + + ventry el2_sync_invalid // Synchronous EL2h + ventry el2_irq_invalid // IRQ EL2h + ventry el2_fiq_invalid // FIQ EL2h + ventry el2_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq_invalid // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync_invalid // Synchronous 32-bit EL1 + ventry el1_irq_invalid // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +END(hibernate_el2_vectors) + +.popsection diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c new file mode 100644 index 0000000000000000000000000000000000000000..f8df75d740f4891a018aa84e02cb7c2988f3ff84 --- /dev/null +++ b/arch/arm64/kernel/hibernate.c @@ -0,0 +1,487 @@ +/*: + * Hibernate support specific for ARM64 + * + * Derived from work on ARM hibernation support by: + * + * Ubuntu project, hibernation support for mach-dove + * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) + * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) + * https://lkml.org/lkml/2010/6/18/4 + * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html + * https://patchwork.kernel.org/patch/96442/ + * + * Copyright (C) 2006 Rafael J. Wysocki + * + * License terms: GNU General Public License (GPL) version 2 + */ +#define pr_fmt(x) "hibernate: " x +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Hibernate core relies on this value being 0 on resume, and marks it + * __nosavedata assuming it will keep the resume kernel's '0' value. This + * doesn't happen with either KASLR. + * + * defined as "__visible int in_suspend __nosavedata" in + * kernel/power/hibernate.c + */ +extern int in_suspend; + +/* Find a symbols alias in the linear map */ +#define LMADDR(x) phys_to_virt(virt_to_phys(x)) + +/* Do we need to reset el2? */ +#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) + +/* + * Start/end of the hibernate exit code, this must be copied to a 'safe' + * location in memory, and executed from there. + */ +extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; + +/* temporary el2 vectors in the __hibernate_exit_text section. */ +extern char hibernate_el2_vectors[]; + +/* hyp-stub vectors, used to restore el2 during resume from hibernate. */ +extern char __hyp_stub_vectors[]; + +/* + * Values that may not change over hibernate/resume. We put the build number + * and date in here so that we guarantee not to resume with a different + * kernel. + */ +struct arch_hibernate_hdr_invariants { + char uts_version[__NEW_UTS_LEN + 1]; +}; + +/* These values need to be know across a hibernate/restore. */ +static struct arch_hibernate_hdr { + struct arch_hibernate_hdr_invariants invariants; + + /* These are needed to find the relocated kernel if built with kaslr */ + phys_addr_t ttbr1_el1; + void (*reenter_kernel)(void); + + /* + * We need to know where the __hyp_stub_vectors are after restore to + * re-configure el2. + */ + phys_addr_t __hyp_stub_vectors; +} resume_hdr; + +static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) +{ + memset(i, 0, sizeof(*i)); + memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); +} + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); + unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); + + return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); +} + +void notrace save_processor_state(void) +{ + WARN_ON(num_online_cpus() != 1); +} + +void notrace restore_processor_state(void) +{ +} + +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct arch_hibernate_hdr *hdr = addr; + + if (max_size < sizeof(*hdr)) + return -EOVERFLOW; + + arch_hdr_invariants(&hdr->invariants); + hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir); + hdr->reenter_kernel = _cpu_resume; + + /* We can't use __hyp_get_vectors() because kvm may still be loaded */ + if (el2_reset_needed()) + hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors); + else + hdr->__hyp_stub_vectors = 0; + + return 0; +} +EXPORT_SYMBOL(arch_hibernation_header_save); + +int arch_hibernation_header_restore(void *addr) +{ + struct arch_hibernate_hdr_invariants invariants; + struct arch_hibernate_hdr *hdr = addr; + + arch_hdr_invariants(&invariants); + if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { + pr_crit("Hibernate image not generated by this kernel!\n"); + return -EINVAL; + } + + resume_hdr = *hdr; + + return 0; +} +EXPORT_SYMBOL(arch_hibernation_header_restore); + +/* + * Copies length bytes, starting at src_start into an new page, + * perform cache maintentance, then maps it at the specified address low + * address as executable. + * + * This is used by hibernate to copy the code it needs to execute when + * overwriting the kernel text. This function generates a new set of page + * tables, which it loads into ttbr0. + * + * Length is provided as we probably only want 4K of data, even on a 64K + * page system. + */ +static int create_safe_exec_page(void *src_start, size_t length, + unsigned long dst_addr, + phys_addr_t *phys_dst_addr, + void *(*allocator)(gfp_t mask), + gfp_t mask) +{ + int rc = 0; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned long dst = (unsigned long)allocator(mask); + + if (!dst) { + rc = -ENOMEM; + goto out; + } + + memcpy((void *)dst, src_start, length); + flush_icache_range(dst, dst + length); + + pgd = pgd_offset_raw(allocator(mask), dst_addr); + if (pgd_none(*pgd)) { + pud = allocator(mask); + if (!pud) { + rc = -ENOMEM; + goto out; + } + pgd_populate(&init_mm, pgd, pud); + } + + pud = pud_offset(pgd, dst_addr); + if (pud_none(*pud)) { + pmd = allocator(mask); + if (!pmd) { + rc = -ENOMEM; + goto out; + } + pud_populate(&init_mm, pud, pmd); + } + + pmd = pmd_offset(pud, dst_addr); + if (pmd_none(*pmd)) { + pte = allocator(mask); + if (!pte) { + rc = -ENOMEM; + goto out; + } + pmd_populate_kernel(&init_mm, pmd, pte); + } + + pte = pte_offset_kernel(pmd, dst_addr); + set_pte(pte, __pte(virt_to_phys((void *)dst) | + pgprot_val(PAGE_KERNEL_EXEC))); + + /* Load our new page tables */ + asm volatile("msr ttbr0_el1, %0;" + "isb;" + "tlbi vmalle1is;" + "dsb ish;" + "isb" : : "r"(virt_to_phys(pgd))); + + *phys_dst_addr = virt_to_phys((void *)dst); + +out: + return rc; +} + + +int swsusp_arch_suspend(void) +{ + int ret = 0; + unsigned long flags; + struct sleep_stack_data state; + + local_dbg_save(flags); + + if (__cpu_suspend_enter(&state)) { + ret = swsusp_save(); + } else { + /* Clean kernel to PoC for secondary core startup */ + __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START); + + /* + * Tell the hibernation core that we've just restored + * the memory + */ + in_suspend = 0; + + __cpu_suspend_exit(); + } + + local_dbg_restore(flags); + + return ret; +} + +static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, + unsigned long end) +{ + pte_t *src_pte; + pte_t *dst_pte; + unsigned long addr = start; + + dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pte) + return -ENOMEM; + pmd_populate_kernel(&init_mm, dst_pmd, dst_pte); + dst_pte = pte_offset_kernel(dst_pmd, start); + + src_pte = pte_offset_kernel(src_pmd, start); + do { + if (!pte_none(*src_pte)) + /* + * Resume will overwrite areas that may be marked + * read only (code, rodata). Clear the RDONLY bit from + * the temporary mappings we use during restore. + */ + set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY)); + } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + + return 0; +} + +static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start, + unsigned long end) +{ + pmd_t *src_pmd; + pmd_t *dst_pmd; + unsigned long next; + unsigned long addr = start; + + if (pud_none(*dst_pud)) { + dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pmd) + return -ENOMEM; + pud_populate(&init_mm, dst_pud, dst_pmd); + } + dst_pmd = pmd_offset(dst_pud, start); + + src_pmd = pmd_offset(src_pud, start); + do { + next = pmd_addr_end(addr, end); + if (pmd_none(*src_pmd)) + continue; + if (pmd_table(*src_pmd)) { + if (copy_pte(dst_pmd, src_pmd, addr, next)) + return -ENOMEM; + } else { + set_pmd(dst_pmd, + __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY)); + } + } while (dst_pmd++, src_pmd++, addr = next, addr != end); + + return 0; +} + +static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start, + unsigned long end) +{ + pud_t *dst_pud; + pud_t *src_pud; + unsigned long next; + unsigned long addr = start; + + if (pgd_none(*dst_pgd)) { + dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pud) + return -ENOMEM; + pgd_populate(&init_mm, dst_pgd, dst_pud); + } + dst_pud = pud_offset(dst_pgd, start); + + src_pud = pud_offset(src_pgd, start); + do { + next = pud_addr_end(addr, end); + if (pud_none(*src_pud)) + continue; + if (pud_table(*(src_pud))) { + if (copy_pmd(dst_pud, src_pud, addr, next)) + return -ENOMEM; + } else { + set_pud(dst_pud, + __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY)); + } + } while (dst_pud++, src_pud++, addr = next, addr != end); + + return 0; +} + +static int copy_page_tables(pgd_t *dst_pgd, unsigned long start, + unsigned long end) +{ + unsigned long next; + unsigned long addr = start; + pgd_t *src_pgd = pgd_offset_k(start); + + dst_pgd = pgd_offset_raw(dst_pgd, start); + do { + next = pgd_addr_end(addr, end); + if (pgd_none(*src_pgd)) + continue; + if (copy_pud(dst_pgd, src_pgd, addr, next)) + return -ENOMEM; + } while (dst_pgd++, src_pgd++, addr = next, addr != end); + + return 0; +} + +/* + * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). + * + * Memory allocated by get_safe_page() will be dealt with by the hibernate code, + * we don't need to free it here. + */ +int swsusp_arch_resume(void) +{ + int rc = 0; + void *zero_page; + size_t exit_size; + pgd_t *tmp_pg_dir; + void *lm_restore_pblist; + phys_addr_t phys_hibernate_exit; + void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, + void *, phys_addr_t, phys_addr_t); + + /* + * Locate the exit code in the bottom-but-one page, so that *NULL + * still has disastrous affects. + */ + hibernate_exit = (void *)PAGE_SIZE; + exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; + /* + * Copy swsusp_arch_suspend_exit() to a safe page. This will generate + * a new set of ttbr0 page tables and load them. + */ + rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, + (unsigned long)hibernate_exit, + &phys_hibernate_exit, + (void *)get_safe_page, GFP_ATOMIC); + if (rc) { + pr_err("Failed to create safe executable page for hibernate_exit code."); + goto out; + } + + /* + * The hibernate exit text contains a set of el2 vectors, that will + * be executed at el2 with the mmu off in order to reload hyp-stub. + */ + __flush_dcache_area(hibernate_exit, exit_size); + + /* + * Restoring the memory image will overwrite the ttbr1 page tables. + * Create a second copy of just the linear map, and use this when + * restoring. + */ + tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); + if (!tmp_pg_dir) { + pr_err("Failed to allocate memory for temporary page tables."); + rc = -ENOMEM; + goto out; + } + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); + if (rc) + goto out; + + /* + * Since we only copied the linear map, we need to find restore_pblist's + * linear map address. + */ + lm_restore_pblist = LMADDR(restore_pblist); + + /* + * KASLR will cause the el2 vectors to be in a different location in + * the resumed kernel. Load hibernate's temporary copy into el2. + * + * We can skip this step if we booted at EL1, or are running with VHE. + */ + if (el2_reset_needed()) { + phys_addr_t el2_vectors = phys_hibernate_exit; /* base */ + el2_vectors += hibernate_el2_vectors - + __hibernate_exit_text_start; /* offset */ + + __hyp_set_vectors(el2_vectors); + } + + /* + * We need a zero page that is zero before & after resume in order to + * to break before make on the ttbr1 page tables. + */ + zero_page = (void *)get_safe_page(GFP_ATOMIC); + + hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, + resume_hdr.reenter_kernel, lm_restore_pblist, + resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); + +out: + return rc; +} + +static int check_boot_cpu_online_pm_callback(struct notifier_block *nb, + unsigned long action, void *ptr) +{ + if (action == PM_HIBERNATION_PREPARE && + cpumask_first(cpu_online_mask) != 0) { + pr_warn("CPU0 is offline.\n"); + return notifier_from_errno(-ENODEV); + } + + return NOTIFY_OK; +} + +static int __init check_boot_cpu_online_init(void) +{ + /* + * Set this pm_notifier callback with a lower priority than + * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be + * called earlier to disable cpu hotplug before the cpu online check. + */ + pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX); + + return 0; +} +core_initcall(check_boot_cpu_online_init); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index b45c95d34b8323e74992e0a4a56e6da0e1257c60..367a954f9937979c574a805bab2e3cbad706d202 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static u64 read_wb_reg(int reg, int n) return val; } +NOKPROBE_SYMBOL(read_wb_reg); static void write_wb_reg(int reg, int n, u64 val) { @@ -140,6 +142,7 @@ static void write_wb_reg(int reg, int n, u64 val) } isb(); } +NOKPROBE_SYMBOL(write_wb_reg); /* * Convert a breakpoint privilege level to the corresponding exception @@ -157,6 +160,7 @@ static enum dbg_active_el debug_exception_level(int privilege) return -EINVAL; } } +NOKPROBE_SYMBOL(debug_exception_level); enum hw_breakpoint_ops { HW_BREAKPOINT_INSTALL, @@ -575,6 +579,7 @@ static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable) write_wb_reg(reg, i, ctrl); } } +NOKPROBE_SYMBOL(toggle_bp_registers); /* * Debug exception handlers. @@ -654,6 +659,7 @@ unlock: return 0; } +NOKPROBE_SYMBOL(breakpoint_handler); static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -756,6 +762,7 @@ unlock: return 0; } +NOKPROBE_SYMBOL(watchpoint_handler); /* * Handle single-step exception. @@ -813,6 +820,7 @@ int reinstall_suspended_bps(struct pt_regs *regs) return !handled_exception; } +NOKPROBE_SYMBOL(reinstall_suspended_bps); /* * Context-switcher for restoring suspended breakpoints. diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index a272f335c289dcb5f52144c815edf6938757a218..8727f44907725445efd25735b9638672a2599cb0 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -22,6 +22,8 @@ #include #include +#include +#include #include #include @@ -53,15 +55,26 @@ ENDPROC(__hyp_stub_vectors) .align 11 el1_sync: - mrs x1, esr_el2 - lsr x1, x1, #26 - cmp x1, #0x16 - b.ne 2f // Not an HVC trap - cbz x0, 1f - msr vbar_el2, x0 // Set vbar_el2 - b 2f -1: mrs x0, vbar_el2 // Return vbar_el2 -2: eret + mrs x30, esr_el2 + lsr x30, x30, #ESR_ELx_EC_SHIFT + + cmp x30, #ESR_ELx_EC_HVC64 + b.ne 9f // Not an HVC trap + + cmp x0, #HVC_GET_VECTORS + b.ne 1f + mrs x0, vbar_el2 + b 9f + +1: cmp x0, #HVC_SET_VECTORS + b.ne 2f + msr vbar_el2, x1 + b 9f + + /* Someone called kvm_call_hyp() against the hyp-stub... */ +2: mov x0, #ARM_EXCEPTION_HYP_GONE + +9: eret ENDPROC(el1_sync) .macro invalid_vector label @@ -101,10 +114,18 @@ ENDPROC(\label) */ ENTRY(__hyp_get_vectors) - mov x0, xzr - // fall through -ENTRY(__hyp_set_vectors) + str lr, [sp, #-16]! + mov x0, #HVC_GET_VECTORS hvc #0 + ldr lr, [sp], #16 ret ENDPROC(__hyp_get_vectors) + +ENTRY(__hyp_set_vectors) + str lr, [sp, #-16]! + mov x1, x0 + mov x0, #HVC_SET_VECTORS + hvc #0 + ldr lr, [sp], #16 + ret ENDPROC(__hyp_set_vectors) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 62152a48e65f6ad5b3f1aad8f452b7f123cd07ac..5f72243e5ba70a6a15ee531f06839c5f1ec1e516 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #define AARCH64_INSN_SF_BIT BIT(31) @@ -163,6 +164,32 @@ static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) aarch64_insn_is_nop(insn); } +bool __kprobes aarch64_insn_uses_literal(u32 insn) +{ + /* ldr/ldrsw (literal), prfm */ + + return aarch64_insn_is_ldr_lit(insn) || + aarch64_insn_is_ldrsw_lit(insn) || + aarch64_insn_is_adr_adrp(insn) || + aarch64_insn_is_prfm_lit(insn); +} + +bool __kprobes aarch64_insn_is_branch(u32 insn) +{ + /* b, bl, cb*, tb*, b.cond, br, blr */ + + return aarch64_insn_is_b(insn) || + aarch64_insn_is_bl(insn) || + aarch64_insn_is_cbz(insn) || + aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_tbz(insn) || + aarch64_insn_is_tbnz(insn) || + aarch64_insn_is_ret(insn) || + aarch64_insn_is_br(insn) || + aarch64_insn_is_blr(insn) || + aarch64_insn_is_bcond(insn); +} + /* * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a * Section B2.6.5 "Concurrent modification and execution of instructions": @@ -1176,6 +1203,14 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset) BUG(); } +/* + * Extract the Op/CR data from a msr/mrs instruction. + */ +u32 aarch64_insn_extract_system_reg(u32 insn) +{ + return (insn & 0x1FFFE0) >> 5; +} + bool aarch32_insn_is_wide(u32 insn) { return insn >= 0xe800; @@ -1201,3 +1236,101 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn) { return insn & CRM_MASK; } + +static bool __kprobes __check_eq(unsigned long pstate) +{ + return (pstate & PSR_Z_BIT) != 0; +} + +static bool __kprobes __check_ne(unsigned long pstate) +{ + return (pstate & PSR_Z_BIT) == 0; +} + +static bool __kprobes __check_cs(unsigned long pstate) +{ + return (pstate & PSR_C_BIT) != 0; +} + +static bool __kprobes __check_cc(unsigned long pstate) +{ + return (pstate & PSR_C_BIT) == 0; +} + +static bool __kprobes __check_mi(unsigned long pstate) +{ + return (pstate & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_pl(unsigned long pstate) +{ + return (pstate & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_vs(unsigned long pstate) +{ + return (pstate & PSR_V_BIT) != 0; +} + +static bool __kprobes __check_vc(unsigned long pstate) +{ + return (pstate & PSR_V_BIT) == 0; +} + +static bool __kprobes __check_hi(unsigned long pstate) +{ + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) != 0; +} + +static bool __kprobes __check_ls(unsigned long pstate) +{ + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) == 0; +} + +static bool __kprobes __check_ge(unsigned long pstate) +{ + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_lt(unsigned long pstate) +{ + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_gt(unsigned long pstate) +{ + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) == 0; +} + +static bool __kprobes __check_le(unsigned long pstate) +{ + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) != 0; +} + +static bool __kprobes __check_al(unsigned long pstate) +{ + return true; +} + +/* + * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that + * it behaves identically to 0b1110 ("al"). + */ +pstate_check_t * const aarch32_opcode_cond_checks[16] = { + __check_eq, __check_ne, __check_cs, __check_cc, + __check_mi, __check_pl, __check_vs, __check_vc, + __check_hi, __check_ls, __check_ge, __check_lt, + __check_gt, __check_le, __check_al, __check_al +}; diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index bcac81e600b9af09341cffdd80d83880626f77b2..814d0c51b2f91b2e2e696ad81ec8066f3ed0d5be 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -22,6 +22,7 @@ #include #include #include +#include #include struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { @@ -218,6 +219,7 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } +NOKPROBE_SYMBOL(kgdb_brk_fn) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) { @@ -226,12 +228,14 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) return 0; } +NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) { kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } +NOKPROBE_SYMBOL(kgdb_step_brk_fn); static struct break_hook kgdb_brkpt_hook = { .esr_mask = 0xffffffff, diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ce06312e3d349d862dbe10d7113fea9973ed1698 --- /dev/null +++ b/arch/arm64/kernel/probes/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ + kprobes_trampoline.o \ + simulate-insn.o diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c new file mode 100644 index 0000000000000000000000000000000000000000..f7931d900bca8fe171a578c3a7e63f5e8a92c931 --- /dev/null +++ b/arch/arm64/kernel/probes/decode-insn.c @@ -0,0 +1,174 @@ +/* + * arch/arm64/kernel/probes/decode-insn.c + * + * Copyright (C) 2013 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "decode-insn.h" +#include "simulate-insn.h" + +static bool __kprobes aarch64_insn_is_steppable(u32 insn) +{ + /* + * Branch instructions will write a new value into the PC which is + * likely to be relative to the XOL address and therefore invalid. + * Deliberate generation of an exception during stepping is also not + * currently safe. Lastly, MSR instructions can do any number of nasty + * things we can't handle during single-stepping. + */ + if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) { + if (aarch64_insn_is_branch(insn) || + aarch64_insn_is_msr_imm(insn) || + aarch64_insn_is_msr_reg(insn) || + aarch64_insn_is_exception(insn) || + aarch64_insn_is_eret(insn)) + return false; + + /* + * The MRS instruction may not return a correct value when + * executing in the single-stepping environment. We do make one + * exception, for reading the DAIF bits. + */ + if (aarch64_insn_is_mrs(insn)) + return aarch64_insn_extract_system_reg(insn) + != AARCH64_INSN_SPCLREG_DAIF; + + /* + * The HINT instruction is is problematic when single-stepping, + * except for the NOP case. + */ + if (aarch64_insn_is_hint(insn)) + return aarch64_insn_is_nop(insn); + + return true; + } + + /* + * Instructions which load PC relative literals are not going to work + * when executed from an XOL slot. Instructions doing an exclusive + * load/store are not going to complete successfully when single-step + * exception handling happens in the middle of the sequence. + */ + if (aarch64_insn_uses_literal(insn) || + aarch64_insn_is_exclusive(insn)) + return false; + + return true; +} + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD If instruction is supported and uses instruction slot, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +static enum kprobe_insn __kprobes +arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + /* + * Instructions reading or modifying the PC won't work from the XOL + * slot. + */ + if (aarch64_insn_is_steppable(insn)) + return INSN_GOOD; + + if (aarch64_insn_is_bcond(insn)) { + asi->handler = simulate_b_cond; + } else if (aarch64_insn_is_cbz(insn) || + aarch64_insn_is_cbnz(insn)) { + asi->handler = simulate_cbz_cbnz; + } else if (aarch64_insn_is_tbz(insn) || + aarch64_insn_is_tbnz(insn)) { + asi->handler = simulate_tbz_tbnz; + } else if (aarch64_insn_is_adr_adrp(insn)) { + asi->handler = simulate_adr_adrp; + } else if (aarch64_insn_is_b(insn) || + aarch64_insn_is_bl(insn)) { + asi->handler = simulate_b_bl; + } else if (aarch64_insn_is_br(insn) || + aarch64_insn_is_blr(insn) || + aarch64_insn_is_ret(insn)) { + asi->handler = simulate_br_blr_ret; + } else if (aarch64_insn_is_ldr_lit(insn)) { + asi->handler = simulate_ldr_literal; + } else if (aarch64_insn_is_ldrsw_lit(insn)) { + asi->handler = simulate_ldrsw_literal; + } else { + /* + * Instruction cannot be stepped out-of-line and we don't + * (yet) simulate it. + */ + return INSN_REJECTED; + } + + return INSN_GOOD_NO_SLOT; +} + +static bool __kprobes +is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) +{ + while (scan_start > scan_end) { + /* + * atomic region starts from exclusive load and ends with + * exclusive store. + */ + if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start))) + return false; + else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start))) + return true; + scan_start--; + } + + return false; +} + +enum kprobe_insn __kprobes +arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) +{ + enum kprobe_insn decoded; + kprobe_opcode_t insn = le32_to_cpu(*addr); + kprobe_opcode_t *scan_start = addr - 1; + kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; +#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) + struct module *mod; +#endif + + if (addr >= (kprobe_opcode_t *)_text && + scan_end < (kprobe_opcode_t *)_text) + scan_end = (kprobe_opcode_t *)_text; +#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) + else { + preempt_disable(); + mod = __module_address((unsigned long)addr); + if (mod && within_module_init((unsigned long)addr, mod) && + !within_module_init((unsigned long)scan_end, mod)) + scan_end = (kprobe_opcode_t *)mod->module_init; + else if (mod && within_module_core((unsigned long)addr, mod) && + !within_module_core((unsigned long)scan_end, mod)) + scan_end = (kprobe_opcode_t *)mod->module_core; + preempt_enable(); + } +#endif + decoded = arm_probe_decode_insn(insn, asi); + + if (decoded == INSN_REJECTED || + is_probed_address_atomic(scan_start, scan_end)) + return INSN_REJECTED; + + return decoded; +} diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h new file mode 100644 index 0000000000000000000000000000000000000000..d438289646a6f5a40030127c0b100209adbbf3b0 --- /dev/null +++ b/arch/arm64/kernel/probes/decode-insn.h @@ -0,0 +1,35 @@ +/* + * arch/arm64/kernel/probes/decode-insn.h + * + * Copyright (C) 2013 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ARM_KERNEL_KPROBES_ARM64_H +#define _ARM_KERNEL_KPROBES_ARM64_H + +/* + * ARM strongly recommends a limit of 128 bytes between LoadExcl and + * StoreExcl instructions in a single thread of execution. So keep the + * max atomic context size as 32. + */ +#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t)) + +enum kprobe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +enum kprobe_insn __kprobes +arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi); + +#endif /* _ARM_KERNEL_KPROBES_ARM64_H */ diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c new file mode 100644 index 0000000000000000000000000000000000000000..1ee93c7c5a75b081dcba584b4be4d1233c60ae6a --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes.c @@ -0,0 +1,657 @@ +/* + * arch/arm64/kernel/probes/kprobes.c + * + * Kprobes support for ARM64 + * + * Copyright (C) 2013 Linaro Limited. + * Author: Sandeepa Prabhu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + /* prepare insn slot */ + p->ainsn.insn[0] = cpu_to_le32(p->opcode); + + flush_icache_range((uintptr_t) (p->ainsn.insn), + (uintptr_t) (p->ainsn.insn) + + MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + + /* + * Needs restoring of return address after stepping xol. + */ + p->ainsn.restore = (unsigned long) p->addr + + sizeof(kprobe_opcode_t); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + /* This instructions is not executed xol. No need to adjust the PC */ + p->ainsn.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.handler) + p->ainsn.handler((u32)p->opcode, (long)p->addr, regs); + + /* single step simulated, now go for post processing */ + post_kprobe_handler(kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + extern char __start_rodata[]; + extern char __end_rodata[]; + + if (probe_addr & 0x3) + return -EINVAL; + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + if (in_exception_text(probe_addr)) + return -EINVAL; + if (probe_addr >= (unsigned long) __start_rodata && + probe_addr <= (unsigned long) __end_rodata) + return -EINVAL; + + /* decode instruction */ + switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) + return -ENOMEM; + break; + }; + + /* prepare the instruction */ + if (p->ainsn.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + void *addrs[1]; + u32 insns[1]; + + addrs[0] = (void *)addr; + insns[0] = (u32)opcode; + + return aarch64_insn_patch_text(addrs, insns, 1); +} + +/* arm kprobe: install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, BRK64_OPCODE_KPROBES); +} + +/* disarm kprobe: remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * The D-flag (Debug mask) is set (masked) upon debug exception entry. + * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive + * probe i.e. when probe hit from kprobe handler context upon + * executing the pre/post handlers. In this case we return with + * D-flag clear so that single-stepping can be carried-out. + * + * Leave D-flag set in all other cases. + */ +static void __kprobes +spsr_set_debug_flag(struct pt_regs *regs, int mask) +{ + unsigned long spsr = regs->pstate; + + if (mask) + spsr |= PSR_D_BIT; + else + spsr &= ~PSR_D_BIT; + + regs->pstate = spsr; +} + +/* + * Interrupts need to be disabled before single-step mode is set, and not + * reenabled until after single-step mode ends. + * Without disabling interrupt on local CPU, there is a chance of + * interrupt occurrence in the period of exception return and start of + * out-of-line single-step, that result in wrongly single stepping + * into the interrupt handler. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_irqflag = regs->pstate; + regs->pstate |= PSR_I_BIT; +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + if (kcb->saved_irqflag & PSR_I_BIT) + regs->pstate |= PSR_I_BIT; + else + regs->pstate &= ~PSR_I_BIT; +} + +static void __kprobes +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr) +{ + kcb->ss_ctx.ss_pending = true; + kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t); +} + +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) +{ + kcb->ss_ctx.ss_pending = false; + kcb->ss_ctx.match_addr = 0; +} + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + + if (p->ainsn.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.insn; + + set_ss_context(kcb, slot); /* mark pending ss */ + + if (kcb->kprobe_status == KPROBE_REENTER) + spsr_set_debug_flag(regs, 0); + else + WARN_ON(regs->pstate & PSR_D_BIT); + + /* IRQs and single stepping do not mix well. */ + kprobes_save_local_irqflag(kcb, regs); + kernel_enable_single_step(regs); + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + + if (!cur) + return; + + /* return addr restore if non-branching insn */ + if (cur->ainsn.restore != 0) + instruction_pointer_set(regs, cur->ainsn.restore); + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) { + /* post_handler can hit breakpoint and single step + * again, so we enable D-flag for recursive exception. + */ + cur->post_handler(cur, regs, 0); + } + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + instruction_pointer_set(regs, (unsigned long) cur->addr); + if (!instruction_pointer(regs)) + BUG(); + + kernel_disable_single_step(); + if (kcb->kprobe_status == KPROBE_REENTER) + spsr_set_debug_flag(regs, 1); + + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + } + return 0; +} + +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +static void __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + + if (p) { + if (cur_kprobe) { + if (reenter_kprobe(p, regs, kcb)) + return; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it prepped + * for calling the break_handler below on re-entry, + * so get out doing nothing more here. + * + * pre_handler can hit a breakpoint and can step thru + * before return, keep PSTATE D-flag enabled until + * pre_handler return back. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) { + setup_singlestep(p, regs, kcb, 0); + return; + } + } + } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) == + BRK64_OPCODE_KPROBES) && cur_kprobe) { + /* We probably hit a jprobe. Call its break handler. */ + if (cur_kprobe->break_handler && + cur_kprobe->break_handler(cur_kprobe, regs)) { + setup_singlestep(cur_kprobe, regs, kcb, 0); + return; + } + } + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Return back to original instruction, and continue. + */ +} + +static int __kprobes +kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) +{ + if ((kcb->ss_ctx.ss_pending) + && (kcb->ss_ctx.match_addr == addr)) { + clear_ss_context(kcb); /* clear pending ss */ + return DBG_HOOK_HANDLED; + } + /* not ours, kprobes should ignore it */ + return DBG_HOOK_ERROR; +} + +int __kprobes +kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + int retval; + + /* return error if this is not our step */ + retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); + + if (retval == DBG_HOOK_HANDLED) { + kprobes_restore_local_irqflag(kcb, regs); + kernel_disable_single_step(); + + if (kcb->kprobe_status == KPROBE_REENTER) + spsr_set_debug_flag(regs, 1); + + post_kprobe_handler(kcb, regs); + } + + return retval; +} + +int __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) +{ + kprobe_handler(regs); + return DBG_HOOK_HANDLED; +} + +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct jprobe *jp = container_of(p, struct jprobe, kp); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + kcb->jprobe_saved_regs = *regs; + /* + * Since we can't be sure where in the stack frame "stacked" + * pass-by-value arguments are stored we just don't try to + * duplicate any of the stack. Do not use jprobes on functions that + * use more than 64 bytes (after padding each to an 8 byte boundary) + * of arguments, or pass individual arguments larger than 16 bytes. + */ + + instruction_pointer_set(regs, (unsigned long) jp->entry); + preempt_disable(); + pause_graph_tracing(); + return 1; +} + +void __kprobes jprobe_return(void) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + /* + * Jprobe handler return by entering break exception, + * encoded same as kprobe, but with following conditions + * -a special PC to identify it from the other kprobes. + * -restore stack addr to original saved pt_regs + */ + asm volatile(" mov sp, %0 \n" + "jprobe_return_break: brk %1 \n" + : + : "r" (kcb->jprobe_saved_regs.sp), + "I" (BRK64_ESR_KPROBES) + : "memory"); + + unreachable(); +} + +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + long stack_addr = kcb->jprobe_saved_regs.sp; + long orig_sp = kernel_stack_pointer(regs); + struct jprobe *jp = container_of(p, struct jprobe, kp); + extern const char jprobe_return_break[]; + + if (instruction_pointer(regs) != (u64) jprobe_return_break) + return 0; + + if (orig_sp != stack_addr) { + struct pt_regs *saved_regs = + (struct pt_regs *)kcb->jprobe_saved_regs.sp; + pr_err("current sp %lx does not match saved sp %lx\n", + orig_sp, stack_addr); + pr_err("Saved registers for jprobe %p\n", jp); + show_regs(saved_regs); + pr_err("Current registers\n"); + show_regs(regs); + BUG(); + } + unpause_graph_tracing(); + *regs = kcb->jprobe_saved_regs; + preempt_enable_no_resched(); + return 1; +} + +bool arch_within_kprobe_blacklist(unsigned long addr) +{ + extern char __idmap_text_start[], __idmap_text_end[]; + + if ((addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end) || + (addr >= (unsigned long)__entry_text_start && + addr < (unsigned long)__entry_text_end) || + (addr >= (unsigned long)__idmap_text_start && + addr < (unsigned long)__idmap_text_end) || + !!search_exception_tables(addr)) + return true; + + + return false; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = + (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + + /* + * It is possible to have multiple instances associated with a given + * task either because multiple functions in the call path have + * return probes installed on them, and/or more than one + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always pushed into the head of the list + * - when multiple return probes are registered for the same + * function, the (chronologically) first instance's ret_addr + * will be the real return address, and all the rest will + * point to kretprobe_trampoline. + */ + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + correct_ret_addr = ri->ret_addr; + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; + if (ri->rp && ri->rp->handler) { + __this_cpu_write(current_kprobe, &ri->rp->kp); + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); + __this_cpu_write(current_kprobe, NULL); + } + + recycle_rp_inst(ri, &empty_rp); + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_hash_unlock(current, &flags); + + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + return (void *)orig_ret_address; +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; + + /* replace return addr (x30) with trampoline */ + regs->regs[30] = (long)&kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S new file mode 100644 index 0000000000000000000000000000000000000000..5d6e7f14638c679b25579b3b54ebfe35fc22802a --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,81 @@ +/* + * trampoline entry and return code for kretprobes. + */ + +#include +#include +#include + + .text + + .macro save_all_base_regs + stp x0, x1, [sp, #S_X0] + stp x2, x3, [sp, #S_X2] + stp x4, x5, [sp, #S_X4] + stp x6, x7, [sp, #S_X6] + stp x8, x9, [sp, #S_X8] + stp x10, x11, [sp, #S_X10] + stp x12, x13, [sp, #S_X12] + stp x14, x15, [sp, #S_X14] + stp x16, x17, [sp, #S_X16] + stp x18, x19, [sp, #S_X18] + stp x20, x21, [sp, #S_X20] + stp x22, x23, [sp, #S_X22] + stp x24, x25, [sp, #S_X24] + stp x26, x27, [sp, #S_X26] + stp x28, x29, [sp, #S_X28] + add x0, sp, #S_FRAME_SIZE + stp lr, x0, [sp, #S_LR] + /* + * Construct a useful saved PSTATE + */ + mrs x0, nzcv + mrs x1, daif + orr x0, x0, x1 + mrs x1, CurrentEL + orr x0, x0, x1 + mrs x1, SPSel + orr x0, x0, x1 + stp xzr, x0, [sp, #S_PC] + .endm + + .macro restore_all_base_regs + ldr x0, [sp, #S_PSTATE] + and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) + msr nzcv, x0 + ldp x0, x1, [sp, #S_X0] + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + ldp x8, x9, [sp, #S_X8] + ldp x10, x11, [sp, #S_X10] + ldp x12, x13, [sp, #S_X12] + ldp x14, x15, [sp, #S_X14] + ldp x16, x17, [sp, #S_X16] + ldp x18, x19, [sp, #S_X18] + ldp x20, x21, [sp, #S_X20] + ldp x22, x23, [sp, #S_X22] + ldp x24, x25, [sp, #S_X24] + ldp x26, x27, [sp, #S_X26] + ldp x28, x29, [sp, #S_X28] + .endm + +ENTRY(kretprobe_trampoline) + sub sp, sp, #S_FRAME_SIZE + + save_all_base_regs + + mov x0, sp + bl trampoline_probe_handler + /* + * Replace trampoline address in lr with actual orig_ret_addr return + * address. + */ + mov lr, x0 + + restore_all_base_regs + + add sp, sp, #S_FRAME_SIZE + ret + +ENDPROC(kretprobe_trampoline) diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c new file mode 100644 index 0000000000000000000000000000000000000000..8977ce9d009d052f4c22c8be1be9217d30171bfd --- /dev/null +++ b/arch/arm64/kernel/probes/simulate-insn.c @@ -0,0 +1,217 @@ +/* + * arch/arm64/kernel/probes/simulate-insn.c + * + * Copyright (C) 2013 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include + +#include "simulate-insn.h" + +#define sign_extend(x, signbit) \ + ((x) | (0 - ((x) & (1 << (signbit))))) + +#define bbl_displacement(insn) \ + sign_extend(((insn) & 0x3ffffff) << 2, 27) + +#define bcond_displacement(insn) \ + sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) + +#define cbz_displacement(insn) \ + sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) + +#define tbz_displacement(insn) \ + sign_extend(((insn >> 5) & 0x3fff) << 2, 15) + +#define ldr_displacement(insn) \ + sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) + +static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val) +{ + if (reg < 31) + regs->regs[reg] = val; +} + +static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val) +{ + if (reg < 31) + regs->regs[reg] = lower_32_bits(val); +} + +static inline u64 get_x_reg(struct pt_regs *regs, int reg) +{ + if (reg < 31) + return regs->regs[reg]; + else + return 0; +} + +static inline u32 get_w_reg(struct pt_regs *regs, int reg) +{ + if (reg < 31) + return lower_32_bits(regs->regs[reg]); + else + return 0; +} + +static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + + return (opcode & (1 << 31)) ? + (get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0); +} + +static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + + return (opcode & (1 << 31)) ? + (get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0); +} + +static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f); + + return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0; +} + +static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs) +{ + int xn = opcode & 0x1f; + int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f); + + return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0; +} + +/* + * instruction simulation functions + */ +void __kprobes +simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs) +{ + long imm, xn, val; + + xn = opcode & 0x1f; + imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3); + imm = sign_extend(imm, 20); + if (opcode & 0x80000000) + val = (imm<<12) + (addr & 0xfffffffffffff000); + else + val = imm + addr; + + set_x_reg(regs, xn, val); + + instruction_pointer_set(regs, instruction_pointer(regs) + 4); +} + +void __kprobes +simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = bbl_displacement(opcode); + + /* Link register is x30 */ + if (opcode & (1 << 31)) + set_x_reg(regs, 30, addr + 4); + + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = 4; + + if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff)) + disp = bcond_displacement(opcode); + + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs) +{ + int xn = (opcode >> 5) & 0x1f; + + /* update pc first in case we're doing a "blr lr" */ + instruction_pointer_set(regs, get_x_reg(regs, xn)); + + /* Link register is x30 */ + if (((opcode >> 21) & 0x3) == 1) + set_x_reg(regs, 30, addr + 4); +} + +void __kprobes +simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = 4; + + if (opcode & (1 << 24)) { + if (check_cbnz(opcode, regs)) + disp = cbz_displacement(opcode); + } else { + if (check_cbz(opcode, regs)) + disp = cbz_displacement(opcode); + } + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs) +{ + int disp = 4; + + if (opcode & (1 << 24)) { + if (check_tbnz(opcode, regs)) + disp = tbz_displacement(opcode); + } else { + if (check_tbz(opcode, regs)) + disp = tbz_displacement(opcode); + } + instruction_pointer_set(regs, addr + disp); +} + +void __kprobes +simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs) +{ + u64 *load_addr; + int xn = opcode & 0x1f; + int disp; + + disp = ldr_displacement(opcode); + load_addr = (u64 *) (addr + disp); + + if (opcode & (1 << 30)) /* x0-x30 */ + set_x_reg(regs, xn, *load_addr); + else /* w0-w30 */ + set_w_reg(regs, xn, *load_addr); + + instruction_pointer_set(regs, instruction_pointer(regs) + 4); +} + +void __kprobes +simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs) +{ + s32 *load_addr; + int xn = opcode & 0x1f; + int disp; + + disp = ldr_displacement(opcode); + load_addr = (s32 *) (addr + disp); + + set_x_reg(regs, xn, *load_addr); + + instruction_pointer_set(regs, instruction_pointer(regs) + 4); +} diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h new file mode 100644 index 0000000000000000000000000000000000000000..050bde683c2df94f32e7b51e985ea453ba6b61e9 --- /dev/null +++ b/arch/arm64/kernel/probes/simulate-insn.h @@ -0,0 +1,28 @@ +/* + * arch/arm64/kernel/probes/simulate-insn.h + * + * Copyright (C) 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H +#define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H + +void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs); +void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs); +void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs); +void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs); +void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs); +void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs); +void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs); +void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs); + +#endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */ diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index daa5734794211e435d3ec4b81f911d7649cef739..b9e7c42cd8eb3c9d58dcb8f20fc0e9d02c30b205 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -20,7 +20,6 @@ #include #include #include -#include #include @@ -28,73 +27,6 @@ #include #include #include -#include - -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); - -static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) -{ - int i, ret, count = 0; - u32 *psci_states; - struct device_node *state_node, *cpu_node; - - cpu_node = of_get_cpu_node(cpu, NULL); - if (!cpu_node) - return -ENODEV; - - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - u32 state; - - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); - - ret = of_property_read_u32(state_node, - "arm,psci-suspend-param", - &state); - if (ret) { - pr_warn(" * %s missing arm,psci-suspend-param property\n", - state_node->full_name); - of_node_put(state_node); - goto free_mem; - } - - of_node_put(state_node); - pr_debug("psci-power-state %#x index %d\n", state, i); - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - ret = -EINVAL; - goto free_mem; - } - psci_states[i] = state; - } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; - -free_mem: - kfree(psci_states); - return ret; -} static int __init cpu_psci_cpu_init(unsigned int cpu) { @@ -179,34 +111,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu) } #endif -static int psci_suspend_finisher(unsigned long state_id) -{ - return psci_ops.cpu_suspend(state_id, virt_to_phys(cpu_resume)); -} - -static int __maybe_unused cpu_psci_cpu_suspend(unsigned long state_id) -{ - int ret; - /* - * idle state id 0 corresponds to wfi, should never be called - * from the cpu_suspend operations - */ - if (WARN_ON_ONCE(!state_id)) - return -EINVAL; - - if (!psci_power_state_loses_context(state_id)) - ret = psci_ops.cpu_suspend(state_id, 0); - else - ret = cpu_suspend(state_id, psci_suspend_finisher); - - return ret; -} - const struct cpu_operations cpu_psci_ops = { .name = "psci", #ifdef CONFIG_CPU_IDLE - .cpu_init_idle = cpu_psci_cpu_init_idle, - .cpu_suspend = cpu_psci_cpu_suspend, + .cpu_init_idle = psci_cpu_init_idle, + .cpu_suspend = psci_cpu_suspend_enter, #endif .cpu_init = cpu_psci_cpu_init, .cpu_prepare = cpu_psci_cpu_prepare, diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fc779ec6f051fcacf52c793f37961a75db832fab..df4707380e6656722ccd2901413d387bc79cf965 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -49,6 +49,106 @@ #define CREATE_TRACE_POINTS #include +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} +#define GPR_OFFSET_NAME(r) \ + {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(pstate), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + /* * TODO: does not yet catch signals sent when the child dies. * in exit.c or in signal.c. diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 326c53b7fe0b94497e34137659c015ec2dd15fa1..30af178c640cacc103229bb3e21e51dc97ab1dad 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -183,7 +183,6 @@ static void __init smp_build_mpidr_hash(void) */ if (mpidr_hash_size() > 4 * num_possible_cpus()) pr_warn("Large number of MPIDR hash buckets detected\n"); - __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); } static void __init setup_machine_fdt(phys_addr_t dt_phys) @@ -361,6 +360,15 @@ void __init setup_arch(char **cmdline_p) smp_init_cpus(); smp_build_mpidr_hash(); +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Make sure init_thread_info.ttbr0 always generates translation + * faults in case uaccess_enable() is inadvertently called by the init + * thread. + */ + init_thread_info.ttbr0 = virt_to_phys(empty_zero_page); +#endif + #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index e33fe33876ab3804f2c6dcd6c5458e576596ef24..c2bf5a58039f1b7f749912bc18f58bf5007e80fb 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -49,39 +49,32 @@ orr \dst, \dst, \mask // dst|=(aff3>>rs3) .endm /* - * Save CPU state for a suspend and execute the suspend finisher. - * On success it will return 0 through cpu_resume - ie through a CPU - * soft/hard reboot from the reset vector. - * On failure it returns the suspend finisher return value or force - * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher - * is not allowed to return, if it does this must be considered failure). - * It saves callee registers, and allocates space on the kernel stack - * to save the CPU specific registers + some other data for resume. + * Save CPU state in the provided sleep_stack_data area, and publish its + * location for cpu_resume()'s use in sleep_save_stash. * - * x0 = suspend finisher argument - * x1 = suspend finisher function pointer + * cpu_resume() will restore this saved state, and return. Because the + * link-register is saved and restored, it will appear to return from this + * function. So that the caller can tell the suspend/resume paths apart, + * __cpu_suspend_enter() will always return a non-zero value, whereas the + * path through cpu_resume() will return 0. + * + * x0 = struct sleep_stack_data area */ ENTRY(__cpu_suspend_enter) - stp x29, lr, [sp, #-96]! - stp x19, x20, [sp,#16] - stp x21, x22, [sp,#32] - stp x23, x24, [sp,#48] - stp x25, x26, [sp,#64] - stp x27, x28, [sp,#80] - /* - * Stash suspend finisher and its argument in x20 and x19 - */ - mov x19, x0 - mov x20, x1 + stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS] + stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16] + stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32] + stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48] + stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64] + stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80] + + /* save the sp in cpu_suspend_ctx */ mov x2, sp - sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx - mov x0, sp - /* - * x0 now points to struct cpu_suspend_ctx allocated on the stack - */ - str x2, [x0, #CPU_CTX_SP] - ldr x1, =sleep_save_sp - ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] + str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] + + /* find the mpidr_hash */ + ldr x1, =sleep_save_stash + ldr x1, [x1] mrs x7, mpidr_el1 ldr x9, =mpidr_hash ldr x10, [x9, #MPIDR_HASH_MASK] @@ -93,70 +86,28 @@ ENTRY(__cpu_suspend_enter) ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 add x1, x1, x8, lsl #3 - bl __cpu_suspend_save - /* - * Grab suspend finisher in x20 and its argument in x19 - */ - mov x0, x19 - mov x1, x20 - /* - * We are ready for power down, fire off the suspend finisher - * in x1, with argument in x0 - */ - blr x1 - /* - * Never gets here, unless suspend finisher fails. - * Successful cpu_suspend should return from cpu_resume, returning - * through this code path is considered an error - * If the return value is set to 0 force x0 = -EOPNOTSUPP - * to make sure a proper error condition is propagated - */ - cmp x0, #0 - mov x3, #-EOPNOTSUPP - csel x0, x3, x0, eq - add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer - ldp x19, x20, [sp, #16] - ldp x21, x22, [sp, #32] - ldp x23, x24, [sp, #48] - ldp x25, x26, [sp, #64] - ldp x27, x28, [sp, #80] - ldp x29, lr, [sp], #96 + + str x0, [x1] + add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS + stp x29, lr, [sp, #-16]! + bl cpu_do_suspend + ldp x29, lr, [sp], #16 + mov x0, #1 ret ENDPROC(__cpu_suspend_enter) .ltorg -/* - * x0 must contain the sctlr value retrieved from restored context - */ - .pushsection ".idmap.text", "ax" -ENTRY(cpu_resume_mmu) - ldr x3, =cpu_resume_after_mmu - msr sctlr_el1, x0 // restore sctlr_el1 - isb - /* - * Invalidate the local I-cache so that any instructions fetched - * speculatively from the PoC are discarded, since they may have - * been dynamically patched at the PoU. - */ - ic iallu - dsb nsh - isb - br x3 // global jump to virtual address -ENDPROC(cpu_resume_mmu) - .popsection -cpu_resume_after_mmu: - mov x0, #0 // return zero on success - ldp x19, x20, [sp, #16] - ldp x21, x22, [sp, #32] - ldp x23, x24, [sp, #48] - ldp x25, x26, [sp, #64] - ldp x27, x28, [sp, #80] - ldp x29, lr, [sp], #96 - ret -ENDPROC(cpu_resume_after_mmu) - ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + /* enable the MMU early - so we can access sleep_save_stash by va */ + adr_l lr, __enable_mmu /* __cpu_setup will return here */ + ldr x27, =_cpu_resume /* __enable_mmu will branch here */ + adrp x25, idmap_pg_dir + adrp x26, swapper_pg_dir + b __cpu_setup +ENDPROC(cpu_resume) + +ENTRY(_cpu_resume) mrs x1, mpidr_el1 adrp x8, mpidr_hash add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address @@ -166,20 +117,27 @@ ENTRY(cpu_resume) ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 /* x7 contains hash index, let's use it to grab context pointer */ - ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS + ldr_l x0, sleep_save_stash ldr x0, [x0, x7, lsl #3] + add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS + add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] - /* load physical address of identity map page table in x1 */ - adrp x1, idmap_pg_dir mov sp, x2 /* save thread_info */ and x2, x2, #~(THREAD_SIZE - 1) msr sp_el0, x2 /* - * cpu_do_resume expects x0 to contain context physical address - * pointer and x1 to contain physical address of 1:1 page tables + * cpu_do_resume expects x0 to contain context address pointer */ - bl cpu_do_resume // PC relative jump, MMU off - b cpu_resume_mmu // Resume MMU, never returns -ENDPROC(cpu_resume) + bl cpu_do_resume + + ldp x19, x20, [x29, #16] + ldp x21, x22, [x29, #32] + ldp x23, x24, [x29, #48] + ldp x25, x26, [x29, #64] + ldp x27, x28, [x29, #80] + ldp x29, lr, [x29] + mov x0, #0 + ret +ENDPROC(_cpu_resume) diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S new file mode 100644 index 0000000000000000000000000000000000000000..ae0496fa423555a768b1d382a4e159001ecabe6b --- /dev/null +++ b/arch/arm64/kernel/smccc-call.S @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License Version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include + + .macro SMCCC instr + .cfi_startproc + \instr #0 + ldr x4, [sp] + stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] + stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] + ret + .cfi_endproc + .endm + +/* + * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_smc) + SMCCC smc +ENDPROC(arm_smccc_smc) + +/* + * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_hvc) + SMCCC hvc +ENDPROC(arm_smccc_hvc) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 66055392f445ef47a7fb3749ca6024df1ea185c9..5a0b1088c17c0fa0d9998ee03a67791d111b2e75 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,7 +1,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -10,30 +12,11 @@ #include #include -extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); /* - * This is called by __cpu_suspend_enter() to save the state, and do whatever - * flushing is required to ensure that when the CPU goes to sleep we have - * the necessary data available when the caches are not searched. - * - * ptr: CPU context virtual address - * save_ptr: address of the location where the context physical address - * must be saved + * This is allocated by cpu_suspend_init(), and used to store a pointer to + * the 'struct sleep_stack_data' the contains a particular CPUs state. */ -void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, - phys_addr_t *save_ptr) -{ - *save_ptr = virt_to_phys(ptr); - - cpu_do_suspend(ptr); - /* - * Only flush the context that must be retrieved with the MMU - * off. VA primitives ensure the flush is applied to all - * cache levels so context is pushed to DRAM. - */ - __flush_dcache_area(ptr, sizeof(*ptr)); - __flush_dcache_area(save_ptr, sizeof(*save_ptr)); -} +unsigned long *sleep_save_stash; /* * This hook is provided so that cpu_suspend code can restore HW @@ -51,6 +34,30 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) hw_breakpoint_restore = hw_bp_restore; } +void notrace __cpu_suspend_exit(void) +{ + /* + * We are resuming from reset with the idmap active in TTBR0_EL1. + * We must uninstall the idmap and restore the expected MMU + * state before we can possibly return to userspace. + */ + cpu_uninstall_idmap(); + + /* + * Restore per-cpu offset before any kernel + * subsystem relying on it has a chance to run. + */ + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + + /* + * Restore HW breakpoint registers to sane values + * before debug exceptions are possibly reenabled + * through local_dbg_restore. + */ + if (hw_breakpoint_restore) + hw_breakpoint_restore(NULL); +} + /* * cpu_suspend * @@ -60,8 +67,9 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { - int ret; + int ret = 0; unsigned long flags; + struct sleep_stack_data state; /* * From this point debug exceptions are disabled to prevent @@ -77,34 +85,26 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) */ pause_graph_tracing(); - /* - * mm context saved on the stack, it will be restored when - * the cpu comes out of reset through the identity mapped - * page tables, so that the thread address space is properly - * set-up on function return. - */ - ret = __cpu_suspend_enter(arg, fn); - if (ret == 0) { - /* - * We are resuming from reset with the idmap active in TTBR0_EL1. - * We must uninstall the idmap and restore the expected MMU - * state before we can possibly return to userspace. - */ - cpu_uninstall_idmap(); + if (__cpu_suspend_enter(&state)) { + /* Call the suspend finisher */ + ret = fn(arg); /* - * Restore per-cpu offset before any kernel - * subsystem relying on it has a chance to run. + * PSTATE was not saved over suspend/resume, re-enable any + * detected features that might not have been set correctly. */ - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, + CONFIG_ARM64_PAN)); /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. */ - if (hw_breakpoint_restore) - hw_breakpoint_restore(NULL); + if (!ret) + ret = -EOPNOTSUPP; + } else { + __cpu_suspend_exit(); } unpause_graph_tracing(); @@ -119,22 +119,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return ret; } -struct sleep_save_sp sleep_save_sp; - static int __init cpu_suspend_init(void) { - void *ctx_ptr; - /* ctx_ptr is an array of physical addresses */ - ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); + sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash), + GFP_KERNEL); - if (WARN_ON(!ctx_ptr)) + if (WARN_ON(!sleep_save_stash)) return -ENOMEM; - sleep_save_sp.save_ptr_stash = ctx_ptr; - sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); - __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); - return 0; } early_initcall(cpu_suspend_init); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 48b75ece4c1773d7496f19f3c5cf7b68152b3165..d621d1de0ac219df6079bbec92cae1319be2dfa8 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -68,8 +68,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, /* * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. + * to safely read from kernel space. */ fs = get_fs(); set_fs(KERNEL_DS); @@ -115,21 +114,12 @@ static void dump_backtrace_entry(unsigned long where) print_ip_sym(where); } -static void dump_instr(const char *lvl, struct pt_regs *regs) +static void __dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); - mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - for (i = -4; i < 1; i++) { unsigned int val, bad; @@ -143,8 +133,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) } } printk("%sCode: %s\n", lvl, str); +} - set_fs(fs); +static void dump_instr(const char *lvl, struct pt_regs *regs) +{ + if (!user_mode(regs)) { + mm_segment_t fs = get_fs(); + set_fs(KERNEL_DS); + __dump_instr(lvl, regs); + set_fs(fs); + } else { + __dump_instr(lvl, regs); + } } static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) @@ -505,7 +505,7 @@ static const char *esr_class_str[] = { const char *esr_get_class_string(u32 esr) { - return esr_class_str[esr >> ESR_ELx_EC_SHIFT]; + return esr_class_str[ESR_ELx_EC(esr)]; } /* diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 76ff67d44fb519739b960b3103987267103e4cc2..b94ad40ecf7babb2463bce786e191d0871ff0cd8 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -47,6 +47,16 @@ jiffies = jiffies_64; *(.idmap.text) \ VMLINUX_SYMBOL(__idmap_text_end) = .; +#ifdef CONFIG_HIBERNATION +#define HIBERNATE_TEXT \ + . = ALIGN(SZ_4K); \ + VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\ + *(.hibernate_exit.text) \ + VMLINUX_SYMBOL(__hibernate_exit_text_end) = .; +#else +#define HIBERNATE_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -109,11 +119,14 @@ SECTIONS *(.exception.text) __exception_text_end = .; IRQENTRY_TEXT + ENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT + KPROBES_TEXT HYPERVISOR_TEXT IDMAP_TEXT + HIBERNATE_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -182,6 +195,11 @@ SECTIONS swapper_pg_dir = .; . += SWAPPER_DIR_SIZE; +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + reserved_ttbr0 = .; + . += RESERVED_TTBR0_SIZE; +#endif + _end = .; STABS_DEBUG @@ -197,6 +215,10 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "HYP init code too big or misaligned") ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "ID map text too big or misaligned") +#ifdef CONFIG_HIBERNATION +ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) + <= SZ_4K, "Hibernate exit text too big or misaligned") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 1949fe5f54246a3f14983753570f4e4d0f978e76..caee9ee8e12af1eef4c13ebd1f750d7a3b5ee7f2 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -10,6 +10,7 @@ KVM=../../../virt/kvm ARM=../../../arch/arm/kvm obj-$(CONFIG_KVM_ARM_HOST) += kvm.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp/ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o @@ -22,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 3039f080e2d5820ed4446aac0871dee42ffa5ad2..e5ee8880d5d9b2014ea6859ea455daa33beb2ebd 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 15f0477b0d2adc53d86573b1733d2fa7f368bbd9..25006a7a5316924d472f0d676b933e8007ca8dfe 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -182,6 +183,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, exit_handler = kvm_get_exit_handler(vcpu); return exit_handler(vcpu, run); + case ARM_EXCEPTION_HYP_GONE: + /* + * EL2 has been reset to the hyp-stub. This happens when a guest + * is pre-empted by kvm_reboot()'s shutdown call. + */ + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + return 0; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 84c338f017b2121b0000484bbca981cbabc49abb..d87635e678b7edf2ea18a6a038d2db1dba802d8c 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -21,6 +21,7 @@ #include #include #include +#include .text .pushsection .hyp.idmap.text, "ax" @@ -96,6 +97,14 @@ __do_hyp_init: ldr x4, =VTCR_EL2_FLAGS bfi x4, x5, #16, #3 + /* + * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in + * VTCR_EL2. + */ + mrs x5, ID_AA64MMFR1_EL1 + ubfx x5, x5, #5, #1 + lsl x5, x5, #VTCR_EL2_VS + orr x4, x4, x5 msr vtcr_el2, x4 @@ -108,8 +117,8 @@ __do_hyp_init: dsb sy mrs x4, sctlr_el2 - and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 - ldr x5, =SCTLR_EL2_FLAGS + and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 + ldr x5, =SCTLR_ELx_FLAGS orr x4, x4, x5 msr sctlr_el2, x4 isb @@ -143,6 +152,44 @@ merged: eret ENDPROC(__kvm_hyp_init) + /* + * x0: HYP boot pgd + * x1: HYP phys_idmap_start + */ +ENTRY(__kvm_hyp_reset) + /* We're in trampoline code in VA, switch back to boot page tables */ + msr ttbr0_el2, x0 + isb + + /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */ + ic iallu + tlbi alle2 + dsb sy + isb + + /* Branch into PA space */ + adr x0, 1f + bfi x1, x0, #0, #PAGE_SHIFT + br x1 + + /* We're now in idmap, disable MMU */ +1: mrs x0, sctlr_el2 + ldr x1, =SCTLR_ELx_FLAGS + bic x0, x0, x1 // Clear SCTL_M and etc + msr sctlr_el2, x0 + isb + + /* Invalidate the old TLBs */ + tlbi alle2 + dsb sy + + /* Install stub vectors */ + adr_l x0, __hyp_stub_vectors + msr vbar_el2, x0 + + eret +ENDPROC(__kvm_hyp_reset) + .ltorg .popsection diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 309e3479dc2c48fb47ca28a44b81e9d06b73e7bf..7ce9315651518cf88e47d4a08670b9088bfbcfa1 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -18,909 +18,8 @@ #include #include -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) -#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) -#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) -#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) - - .text - .pushsection .hyp.text, "ax" - .align PAGE_SHIFT - -.macro save_common_regs - // x2: base address for cpu context - // x3: tmp register - - add x3, x2, #CPU_XREG_OFFSET(19) - stp x19, x20, [x3] - stp x21, x22, [x3, #16] - stp x23, x24, [x3, #32] - stp x25, x26, [x3, #48] - stp x27, x28, [x3, #64] - stp x29, lr, [x3, #80] - - mrs x19, sp_el0 - mrs x20, elr_el2 // pc before entering el2 - mrs x21, spsr_el2 // pstate before entering el2 - - stp x19, x20, [x3, #96] - str x21, [x3, #112] - - mrs x22, sp_el1 - mrs x23, elr_el1 - mrs x24, spsr_el1 - - str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] - str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] - str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] -.endm - -.macro restore_common_regs - // x2: base address for cpu context - // x3: tmp register - - ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] - ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] - ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] - - msr sp_el1, x22 - msr elr_el1, x23 - msr spsr_el1, x24 - - add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 - ldp x19, x20, [x3] - ldr x21, [x3, #16] - - msr sp_el0, x19 - msr elr_el2, x20 // pc on return from el2 - msr spsr_el2, x21 // pstate on return from el2 - - add x3, x2, #CPU_XREG_OFFSET(19) - ldp x19, x20, [x3] - ldp x21, x22, [x3, #16] - ldp x23, x24, [x3, #32] - ldp x25, x26, [x3, #48] - ldp x27, x28, [x3, #64] - ldp x29, lr, [x3, #80] -.endm - -.macro save_host_regs - save_common_regs -.endm - -.macro restore_host_regs - restore_common_regs -.endm - -.macro save_fpsimd - // x2: cpu context address - // x3, x4: tmp regs - add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) - fpsimd_save x3, 4 -.endm - -.macro restore_fpsimd - // x2: cpu context address - // x3, x4: tmp regs - add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) - fpsimd_restore x3, 4 -.endm - -.macro save_guest_regs - // x0 is the vcpu address - // x1 is the return code, do not corrupt! - // x2 is the cpu context - // x3 is a tmp register - // Guest's x0-x3 are on the stack - - // Compute base to save registers - add x3, x2, #CPU_XREG_OFFSET(4) - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - stp x8, x9, [x3, #32] - stp x10, x11, [x3, #48] - stp x12, x13, [x3, #64] - stp x14, x15, [x3, #80] - stp x16, x17, [x3, #96] - str x18, [x3, #112] - - pop x6, x7 // x2, x3 - pop x4, x5 // x0, x1 - - add x3, x2, #CPU_XREG_OFFSET(0) - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - - save_common_regs -.endm - -.macro restore_guest_regs - // x0 is the vcpu address. - // x2 is the cpu context - // x3 is a tmp register - - // Prepare x0-x3 for later restore - add x3, x2, #CPU_XREG_OFFSET(0) - ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] - push x4, x5 // Push x0-x3 on the stack - push x6, x7 - - // x4-x18 - ldp x4, x5, [x3, #32] - ldp x6, x7, [x3, #48] - ldp x8, x9, [x3, #64] - ldp x10, x11, [x3, #80] - ldp x12, x13, [x3, #96] - ldp x14, x15, [x3, #112] - ldp x16, x17, [x3, #128] - ldr x18, [x3, #144] - - // x19-x29, lr, sp*, elr*, spsr* - restore_common_regs - - // Last bits of the 64bit state - pop x2, x3 - pop x0, x1 - - // Do not touch any register after this! -.endm - -/* - * Macros to perform system register save/restore. - * - * Ordering here is absolutely critical, and must be kept consistent - * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, - * and in kvm_asm.h. - * - * In other words, don't touch any of these unless you know what - * you are doing. - */ -.macro save_sysregs - // x2: base address for cpu context - // x3: tmp register - - add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) - - mrs x4, vmpidr_el2 - mrs x5, csselr_el1 - mrs x6, sctlr_el1 - mrs x7, actlr_el1 - mrs x8, cpacr_el1 - mrs x9, ttbr0_el1 - mrs x10, ttbr1_el1 - mrs x11, tcr_el1 - mrs x12, esr_el1 - mrs x13, afsr0_el1 - mrs x14, afsr1_el1 - mrs x15, far_el1 - mrs x16, mair_el1 - mrs x17, vbar_el1 - mrs x18, contextidr_el1 - mrs x19, tpidr_el0 - mrs x20, tpidrro_el0 - mrs x21, tpidr_el1 - mrs x22, amair_el1 - mrs x23, cntkctl_el1 - mrs x24, par_el1 - mrs x25, mdscr_el1 - - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - stp x8, x9, [x3, #32] - stp x10, x11, [x3, #48] - stp x12, x13, [x3, #64] - stp x14, x15, [x3, #80] - stp x16, x17, [x3, #96] - stp x18, x19, [x3, #112] - stp x20, x21, [x3, #128] - stp x22, x23, [x3, #144] - stp x24, x25, [x3, #160] -.endm - -.macro save_debug type - // x4: pointer to register set - // x5: number of registers to skip - // x6..x22 trashed - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - mrs x21, \type\()15_el1 - mrs x20, \type\()14_el1 - mrs x19, \type\()13_el1 - mrs x18, \type\()12_el1 - mrs x17, \type\()11_el1 - mrs x16, \type\()10_el1 - mrs x15, \type\()9_el1 - mrs x14, \type\()8_el1 - mrs x13, \type\()7_el1 - mrs x12, \type\()6_el1 - mrs x11, \type\()5_el1 - mrs x10, \type\()4_el1 - mrs x9, \type\()3_el1 - mrs x8, \type\()2_el1 - mrs x7, \type\()1_el1 - mrs x6, \type\()0_el1 - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - str x21, [x4, #(15 * 8)] - str x20, [x4, #(14 * 8)] - str x19, [x4, #(13 * 8)] - str x18, [x4, #(12 * 8)] - str x17, [x4, #(11 * 8)] - str x16, [x4, #(10 * 8)] - str x15, [x4, #(9 * 8)] - str x14, [x4, #(8 * 8)] - str x13, [x4, #(7 * 8)] - str x12, [x4, #(6 * 8)] - str x11, [x4, #(5 * 8)] - str x10, [x4, #(4 * 8)] - str x9, [x4, #(3 * 8)] - str x8, [x4, #(2 * 8)] - str x7, [x4, #(1 * 8)] - str x6, [x4, #(0 * 8)] -.endm - -.macro restore_sysregs - // x2: base address for cpu context - // x3: tmp register - - add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) - - ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] - ldp x8, x9, [x3, #32] - ldp x10, x11, [x3, #48] - ldp x12, x13, [x3, #64] - ldp x14, x15, [x3, #80] - ldp x16, x17, [x3, #96] - ldp x18, x19, [x3, #112] - ldp x20, x21, [x3, #128] - ldp x22, x23, [x3, #144] - ldp x24, x25, [x3, #160] - - msr vmpidr_el2, x4 - msr csselr_el1, x5 - msr sctlr_el1, x6 - msr actlr_el1, x7 - msr cpacr_el1, x8 - msr ttbr0_el1, x9 - msr ttbr1_el1, x10 - msr tcr_el1, x11 - msr esr_el1, x12 - msr afsr0_el1, x13 - msr afsr1_el1, x14 - msr far_el1, x15 - msr mair_el1, x16 - msr vbar_el1, x17 - msr contextidr_el1, x18 - msr tpidr_el0, x19 - msr tpidrro_el0, x20 - msr tpidr_el1, x21 - msr amair_el1, x22 - msr cntkctl_el1, x23 - msr par_el1, x24 - msr mdscr_el1, x25 -.endm - -.macro restore_debug type - // x4: pointer to register set - // x5: number of registers to skip - // x6..x22 trashed - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - ldr x21, [x4, #(15 * 8)] - ldr x20, [x4, #(14 * 8)] - ldr x19, [x4, #(13 * 8)] - ldr x18, [x4, #(12 * 8)] - ldr x17, [x4, #(11 * 8)] - ldr x16, [x4, #(10 * 8)] - ldr x15, [x4, #(9 * 8)] - ldr x14, [x4, #(8 * 8)] - ldr x13, [x4, #(7 * 8)] - ldr x12, [x4, #(6 * 8)] - ldr x11, [x4, #(5 * 8)] - ldr x10, [x4, #(4 * 8)] - ldr x9, [x4, #(3 * 8)] - ldr x8, [x4, #(2 * 8)] - ldr x7, [x4, #(1 * 8)] - ldr x6, [x4, #(0 * 8)] - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - msr \type\()15_el1, x21 - msr \type\()14_el1, x20 - msr \type\()13_el1, x19 - msr \type\()12_el1, x18 - msr \type\()11_el1, x17 - msr \type\()10_el1, x16 - msr \type\()9_el1, x15 - msr \type\()8_el1, x14 - msr \type\()7_el1, x13 - msr \type\()6_el1, x12 - msr \type\()5_el1, x11 - msr \type\()4_el1, x10 - msr \type\()3_el1, x9 - msr \type\()2_el1, x8 - msr \type\()1_el1, x7 - msr \type\()0_el1, x6 -.endm - -.macro skip_32bit_state tmp, target - // Skip 32bit state if not needed - mrs \tmp, hcr_el2 - tbnz \tmp, #HCR_RW_SHIFT, \target -.endm - -.macro skip_tee_state tmp, target - // Skip ThumbEE state if not needed - mrs \tmp, id_pfr0_el1 - tbz \tmp, #12, \target -.endm - -.macro skip_debug_state tmp, target - ldr \tmp, [x0, #VCPU_DEBUG_FLAGS] - tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target -.endm - -/* - * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled) - */ -.macro skip_fpsimd_state tmp, target - mrs \tmp, cptr_el2 - tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target -.endm - -.macro compute_debug_state target - // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY - // is set, we do a full save/restore cycle and disable trapping. - add x25, x0, #VCPU_CONTEXT - - // Check the state of MDSCR_EL1 - ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)] - and x26, x25, #DBG_MDSCR_KDE - and x25, x25, #DBG_MDSCR_MDE - adds xzr, x25, x26 - b.eq 9998f // Nothing to see there - - // If any interesting bits was set, we must set the flag - mov x26, #KVM_ARM64_DEBUG_DIRTY - str x26, [x0, #VCPU_DEBUG_FLAGS] - b 9999f // Don't skip restore - -9998: - // Otherwise load the flags from memory in case we recently - // trapped - skip_debug_state x25, \target -9999: -.endm - -.macro save_guest_32bit_state - skip_32bit_state x3, 1f - - add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) - mrs x4, spsr_abt - mrs x5, spsr_und - mrs x6, spsr_irq - mrs x7, spsr_fiq - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - - add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) - mrs x4, dacr32_el2 - mrs x5, ifsr32_el2 - stp x4, x5, [x3] - - skip_fpsimd_state x8, 2f - mrs x6, fpexc32_el2 - str x6, [x3, #16] -2: - skip_debug_state x8, 1f - mrs x7, dbgvcr32_el2 - str x7, [x3, #24] -1: -.endm - -.macro restore_guest_32bit_state - skip_32bit_state x3, 1f - - add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) - ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] - msr spsr_abt, x4 - msr spsr_und, x5 - msr spsr_irq, x6 - msr spsr_fiq, x7 - - add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) - ldp x4, x5, [x3] - msr dacr32_el2, x4 - msr ifsr32_el2, x5 - - skip_debug_state x8, 1f - ldr x7, [x3, #24] - msr dbgvcr32_el2, x7 -1: -.endm - -.macro activate_traps - ldr x2, [x0, #VCPU_HCR_EL2] - - /* - * We are about to set CPTR_EL2.TFP to trap all floating point - * register accesses to EL2, however, the ARM ARM clearly states that - * traps are only taken to EL2 if the operation would not otherwise - * trap to EL1. Therefore, always make sure that for 32-bit guests, - * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. - */ - tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state - mov x3, #(1 << 30) - msr fpexc32_el2, x3 - isb -99: - msr hcr_el2, x2 - mov x2, #CPTR_EL2_TTA - orr x2, x2, #CPTR_EL2_TFP - msr cptr_el2, x2 - - mov x2, #(1 << 15) // Trap CP15 Cr=15 - msr hstr_el2, x2 - - // Monitor Debug Config - see kvm_arm_setup_debug() - ldr x2, [x0, #VCPU_MDCR_EL2] - msr mdcr_el2, x2 -.endm - -.macro deactivate_traps - mov x2, #HCR_RW - msr hcr_el2, x2 - msr hstr_el2, xzr - - mrs x2, mdcr_el2 - and x2, x2, #MDCR_EL2_HPMN_MASK - msr mdcr_el2, x2 -.endm - -.macro activate_vm - ldr x1, [x0, #VCPU_KVM] - kern_hyp_va x1 - ldr x2, [x1, #KVM_VTTBR] - msr vttbr_el2, x2 -.endm - -.macro deactivate_vm - msr vttbr_el2, xzr -.endm - -/* - * Call into the vgic backend for state saving - */ -.macro save_vgic_state -alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF - bl __save_vgic_v2_state -alternative_else - bl __save_vgic_v3_state -alternative_endif - mrs x24, hcr_el2 - mov x25, #HCR_INT_OVERRIDE - neg x25, x25 - and x24, x24, x25 - msr hcr_el2, x24 -.endm - -/* - * Call into the vgic backend for state restoring - */ -.macro restore_vgic_state - mrs x24, hcr_el2 - ldr x25, [x0, #VCPU_IRQ_LINES] - orr x24, x24, #HCR_INT_OVERRIDE - orr x24, x24, x25 - msr hcr_el2, x24 -alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF - bl __restore_vgic_v2_state -alternative_else - bl __restore_vgic_v3_state -alternative_endif -.endm - -.macro save_timer_state - // x0: vcpu pointer - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr w3, [x2, #KVM_TIMER_ENABLED] - cbz w3, 1f - - mrs x3, cntv_ctl_el0 - and x3, x3, #3 - str w3, [x0, #VCPU_TIMER_CNTV_CTL] - - isb - - mrs x3, cntv_cval_el0 - str x3, [x0, #VCPU_TIMER_CNTV_CVAL] - -1: - // Disable the virtual timer - msr cntv_ctl_el0, xzr - - // Allow physical timer/counter access for the host - mrs x2, cnthctl_el2 - orr x2, x2, #3 - msr cnthctl_el2, x2 - - // Clear cntvoff for the host - msr cntvoff_el2, xzr -.endm - -.macro restore_timer_state - // x0: vcpu pointer - // Disallow physical timer access for the guest - // Physical counter access is allowed - mrs x2, cnthctl_el2 - orr x2, x2, #1 - bic x2, x2, #2 - msr cnthctl_el2, x2 - - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr w3, [x2, #KVM_TIMER_ENABLED] - cbz w3, 1f - - ldr x3, [x2, #KVM_TIMER_CNTVOFF] - msr cntvoff_el2, x3 - ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] - msr cntv_cval_el0, x2 - isb - - ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] - and x2, x2, #3 - msr cntv_ctl_el0, x2 -1: -.endm - -__save_sysregs: - save_sysregs - ret - -__restore_sysregs: - restore_sysregs - ret - -/* Save debug state */ -__save_debug: - // x2: ptr to CPU context - // x3: ptr to debug reg struct - // x4/x5/x6-22/x24-26: trashed - - mrs x26, id_aa64dfr0_el1 - ubfx x24, x26, #12, #4 // Extract BRPs - ubfx x25, x26, #20, #4 // Extract WRPs - mov w26, #15 - sub w24, w26, w24 // How many BPs to skip - sub w25, w26, w25 // How many WPs to skip - - mov x5, x24 - add x4, x3, #DEBUG_BCR - save_debug dbgbcr - add x4, x3, #DEBUG_BVR - save_debug dbgbvr - - mov x5, x25 - add x4, x3, #DEBUG_WCR - save_debug dbgwcr - add x4, x3, #DEBUG_WVR - save_debug dbgwvr - - mrs x21, mdccint_el1 - str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] - ret - -/* Restore debug state */ -__restore_debug: - // x2: ptr to CPU context - // x3: ptr to debug reg struct - // x4/x5/x6-22/x24-26: trashed - - mrs x26, id_aa64dfr0_el1 - ubfx x24, x26, #12, #4 // Extract BRPs - ubfx x25, x26, #20, #4 // Extract WRPs - mov w26, #15 - sub w24, w26, w24 // How many BPs to skip - sub w25, w26, w25 // How many WPs to skip - - mov x5, x24 - add x4, x3, #DEBUG_BCR - restore_debug dbgbcr - add x4, x3, #DEBUG_BVR - restore_debug dbgbvr - - mov x5, x25 - add x4, x3, #DEBUG_WCR - restore_debug dbgwcr - add x4, x3, #DEBUG_WVR - restore_debug dbgwvr - - ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] - msr mdccint_el1, x21 - - ret - -__save_fpsimd: - skip_fpsimd_state x3, 1f - save_fpsimd -1: ret - -__restore_fpsimd: - skip_fpsimd_state x3, 1f - restore_fpsimd -1: ret - -switch_to_guest_fpsimd: - push x4, lr - - mrs x2, cptr_el2 - bic x2, x2, #CPTR_EL2_TFP - msr cptr_el2, x2 - isb - - mrs x0, tpidr_el2 - - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - bl __save_fpsimd - - add x2, x0, #VCPU_CONTEXT - bl __restore_fpsimd - - skip_32bit_state x3, 1f - ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] - msr fpexc32_el2, x4 -1: - pop x4, lr - pop x2, x3 - pop x0, x1 - - eret - -/* - * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); - * - * This is the world switch. The first half of the function - * deals with entering the guest, and anything from __kvm_vcpu_return - * to the end of the function deals with reentering the host. - * On the enter path, only x0 (vcpu pointer) must be preserved until - * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception - * code) must both be preserved until the epilogue. - * In both cases, x2 points to the CPU context we're saving/restoring from/to. - */ -ENTRY(__kvm_vcpu_run) - kern_hyp_va x0 - msr tpidr_el2, x0 // Save the vcpu register - - // Host context - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - - save_host_regs - bl __save_sysregs - - compute_debug_state 1f - add x3, x0, #VCPU_HOST_DEBUG_STATE - bl __save_debug -1: - activate_traps - activate_vm - - restore_vgic_state - restore_timer_state - - // Guest context - add x2, x0, #VCPU_CONTEXT - - // We must restore the 32-bit state before the sysregs, thanks - // to Cortex-A57 erratum #852523. - restore_guest_32bit_state - bl __restore_sysregs - - skip_debug_state x3, 1f - ldr x3, [x0, #VCPU_DEBUG_PTR] - kern_hyp_va x3 - bl __restore_debug -1: - restore_guest_regs - - // That's it, no more messing around. - eret - -__kvm_vcpu_return: - // Assume x0 is the vcpu pointer, x1 the return code - // Guest's x0-x3 are on the stack - - // Guest context - add x2, x0, #VCPU_CONTEXT - - save_guest_regs - bl __save_fpsimd - bl __save_sysregs - - skip_debug_state x3, 1f - ldr x3, [x0, #VCPU_DEBUG_PTR] - kern_hyp_va x3 - bl __save_debug -1: - save_guest_32bit_state - - save_timer_state - save_vgic_state - - deactivate_traps - deactivate_vm - - // Host context - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - - bl __restore_sysregs - bl __restore_fpsimd - /* Clear FPSIMD and Trace trapping */ - msr cptr_el2, xzr - - skip_debug_state x3, 1f - // Clear the dirty flag for the next run, as all the state has - // already been saved. Note that we nuke the whole 64bit word. - // If we ever add more flags, we'll have to be more careful... - str xzr, [x0, #VCPU_DEBUG_FLAGS] - add x3, x0, #VCPU_HOST_DEBUG_STATE - bl __restore_debug -1: - restore_host_regs - - mov x0, x1 - ret -END(__kvm_vcpu_run) - -// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); -ENTRY(__kvm_tlb_flush_vmid_ipa) - dsb ishst - - kern_hyp_va x0 - ldr x2, [x0, #KVM_VTTBR] - msr vttbr_el2, x2 - isb - - /* - * We could do so much better if we had the VA as well. - * Instead, we invalidate Stage-2 for this IPA, and the - * whole of Stage-1. Weep... - */ - lsr x1, x1, #12 - tlbi ipas2e1is, x1 - /* - * We have to ensure completion of the invalidation at Stage-2, - * since a table walk on another CPU could refill a TLB with a - * complete (S1 + S2) walk based on the old Stage-2 mapping if - * the Stage-1 invalidation happened first. - */ - dsb ish - tlbi vmalle1is - dsb ish - isb - - msr vttbr_el2, xzr - ret -ENDPROC(__kvm_tlb_flush_vmid_ipa) - -/** - * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs - * @struct kvm *kvm - pointer to kvm structure - * - * Invalidates all Stage 1 and 2 TLB entries for current VMID. - */ -ENTRY(__kvm_tlb_flush_vmid) - dsb ishst - - kern_hyp_va x0 - ldr x2, [x0, #KVM_VTTBR] - msr vttbr_el2, x2 - isb - - tlbi vmalls12e1is - dsb ish - isb - - msr vttbr_el2, xzr - ret -ENDPROC(__kvm_tlb_flush_vmid) - -ENTRY(__kvm_flush_vm_context) - dsb ishst - tlbi alle1is - ic ialluis - dsb ish - ret -ENDPROC(__kvm_flush_vm_context) - -__kvm_hyp_panic: - // Stash PAR_EL1 before corrupting it in __restore_sysregs - mrs x0, par_el1 - push x0, xzr - - // Guess the context by looking at VTTBR: - // If zero, then we're already a host. - // Otherwise restore a minimal host context before panicing. - mrs x0, vttbr_el2 - cbz x0, 1f - - mrs x0, tpidr_el2 - - deactivate_traps - deactivate_vm - - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - - bl __restore_sysregs - - /* - * Make sure we have a valid host stack, and don't leave junk in the - * frame pointer that will give us a misleading host stack unwinding. - */ - ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] - msr sp_el1, x22 - mov x29, xzr - -1: adr x0, __hyp_panic_str - adr x1, 2f - ldp x2, x3, [x1] - sub x0, x0, x2 - add x0, x0, x3 - mrs x1, spsr_el2 - mrs x2, elr_el2 - mrs x3, esr_el2 - mrs x4, far_el2 - mrs x5, hpfar_el2 - pop x6, xzr // active context PAR_EL1 - mrs x7, tpidr_el2 - - mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ - PSR_MODE_EL1h) - msr spsr_el2, lr - ldr lr, =panic - msr elr_el2, lr - eret - - .align 3 -2: .quad HYP_PAGE_OFFSET - .quad PAGE_OFFSET -ENDPROC(__kvm_hyp_panic) - -__hyp_panic_str: - .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" - - .align 2 /* * u64 __kvm_call_hyp(void *hypfn, ...); @@ -934,189 +33,23 @@ __hyp_panic_str: * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. + * passed in x0. * - * A function pointer with a value of 0 has a special meaning, and is - * used to implement __hyp_get_vectors in the same way as in + * A function pointer with a value less than 0xfff has a special meaning, + * and is used to implement __hyp_get_vectors in the same way as in * arch/arm64/kernel/hyp_stub.S. + * HVC behaves as a 'bl' call and will clobber lr. */ ENTRY(__kvm_call_hyp) +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + str lr, [sp, #-16]! hvc #0 + ldr lr, [sp], #16 ret -ENDPROC(__kvm_call_hyp) - -.macro invalid_vector label, target - .align 2 -\label: - b \target -ENDPROC(\label) -.endm - - /* None of these should ever happen */ - invalid_vector el2t_sync_invalid, __kvm_hyp_panic - invalid_vector el2t_irq_invalid, __kvm_hyp_panic - invalid_vector el2t_fiq_invalid, __kvm_hyp_panic - invalid_vector el2t_error_invalid, __kvm_hyp_panic - invalid_vector el2h_sync_invalid, __kvm_hyp_panic - invalid_vector el2h_irq_invalid, __kvm_hyp_panic - invalid_vector el2h_fiq_invalid, __kvm_hyp_panic - invalid_vector el2h_error_invalid, __kvm_hyp_panic - invalid_vector el1_sync_invalid, __kvm_hyp_panic - invalid_vector el1_irq_invalid, __kvm_hyp_panic - invalid_vector el1_fiq_invalid, __kvm_hyp_panic - invalid_vector el1_error_invalid, __kvm_hyp_panic - -el1_sync: // Guest trapped into EL2 - push x0, x1 - push x2, x3 - - mrs x1, esr_el2 - lsr x2, x1, #ESR_ELx_EC_SHIFT - - cmp x2, #ESR_ELx_EC_HVC64 - b.ne el1_trap - - mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x3, el1_trap // called HVC - - /* Here, we're pretty sure the host called HVC. */ - pop x2, x3 - pop x0, x1 - - /* Check for __hyp_get_vectors */ - cbnz x0, 1f - mrs x0, vbar_el2 - b 2f - -1: push lr, xzr - - /* - * Compute the function address in EL2, and shuffle the parameters. - */ - kern_hyp_va x0 - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr - - pop lr, xzr -2: eret - -el1_trap: - /* - * x1: ESR - * x2: ESR_EC - */ - - /* Guest accessed VFP/SIMD registers, save host, restore Guest */ - cmp x2, #ESR_ELx_EC_FP_ASIMD - b.eq switch_to_guest_fpsimd - - cmp x2, #ESR_ELx_EC_DABT_LOW - mov x0, #ESR_ELx_EC_IABT_LOW - ccmp x2, x0, #4, ne - b.ne 1f // Not an abort we care about - - /* This is an abort. Check for permission fault */ -alternative_if_not ARM64_WORKAROUND_834220 - and x2, x1, #ESR_ELx_FSC_TYPE - cmp x2, #FSC_PERM - b.ne 1f // Not a permission fault alternative_else - nop // Use the permission fault path to - nop // check for a valid S1 translation, - nop // regardless of the ESR value. + b __vhe_hyp_call + nop + nop + nop alternative_endif - - /* - * Check for Stage-1 page table walk, which is guaranteed - * to give a valid HPFAR_EL2. - */ - tbnz x1, #7, 1f // S1PTW is set - - /* Preserve PAR_EL1 */ - mrs x3, par_el1 - push x3, xzr - - /* - * Permission fault, HPFAR_EL2 is invalid. - * Resolve the IPA the hard way using the guest VA. - * Stage-1 translation already validated the memory access rights. - * As such, we can use the EL1 translation regime, and don't have - * to distinguish between EL0 and EL1 access. - */ - mrs x2, far_el2 - at s1e1r, x2 - isb - - /* Read result */ - mrs x3, par_el1 - pop x0, xzr // Restore PAR_EL1 from the stack - msr par_el1, x0 - tbnz x3, #0, 3f // Bail out if we failed the translation - ubfx x3, x3, #12, #36 // Extract IPA - lsl x3, x3, #4 // and present it like HPFAR - b 2f - -1: mrs x3, hpfar_el2 - mrs x2, far_el2 - -2: mrs x0, tpidr_el2 - str w1, [x0, #VCPU_ESR_EL2] - str x2, [x0, #VCPU_FAR_EL2] - str x3, [x0, #VCPU_HPFAR_EL2] - - mov x1, #ARM_EXCEPTION_TRAP - b __kvm_vcpu_return - - /* - * Translation failed. Just return to the guest and - * let it fault again. Another CPU is probably playing - * behind our back. - */ -3: pop x2, x3 - pop x0, x1 - - eret - -el1_irq: - push x0, x1 - push x2, x3 - mrs x0, tpidr_el2 - mov x1, #ARM_EXCEPTION_IRQ - b __kvm_vcpu_return - - .ltorg - - .align 11 - -ENTRY(__kvm_hyp_vector) - ventry el2t_sync_invalid // Synchronous EL2t - ventry el2t_irq_invalid // IRQ EL2t - ventry el2t_fiq_invalid // FIQ EL2t - ventry el2t_error_invalid // Error EL2t - - ventry el2h_sync_invalid // Synchronous EL2h - ventry el2h_irq_invalid // IRQ EL2h - ventry el2h_fiq_invalid // FIQ EL2h - ventry el2h_error_invalid // Error EL2h - - ventry el1_sync // Synchronous 64-bit EL1 - ventry el1_irq // IRQ 64-bit EL1 - ventry el1_fiq_invalid // FIQ 64-bit EL1 - ventry el1_error_invalid // Error 64-bit EL1 - - ventry el1_sync // Synchronous 32-bit EL1 - ventry el1_irq // IRQ 32-bit EL1 - ventry el1_fiq_invalid // FIQ 32-bit EL1 - ventry el1_error_invalid // Error 32-bit EL1 -ENDPROC(__kvm_hyp_vector) - - -ENTRY(__kvm_get_mdcr_el2) - mrs x0, mdcr_el2 - ret -ENDPROC(__kvm_get_mdcr_el2) - - .popsection +ENDPROC(__kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..826032bc3945ba266142f2a5a0d8367322d71a49 --- /dev/null +++ b/arch/arm64/kvm/hyp/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for Kernel-based Virtual Machine module, HYP part +# + +obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += switch.o +obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o +obj-$(CONFIG_KVM_ARM_HOST) += tlb.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c new file mode 100644 index 0000000000000000000000000000000000000000..2f8bca8af295b9029ba5780307d2a680495d08fb --- /dev/null +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include +#include +#include + +#include "hyp.h" + +#define read_debug(r,n) read_sysreg(r##n##_el1) +#define write_debug(v,r,n) write_sysreg(v, r##n##_el1) + +#define save_debug(ptr,reg,nr) \ + switch (nr) { \ + case 15: ptr[15] = read_debug(reg, 15); \ + case 14: ptr[14] = read_debug(reg, 14); \ + case 13: ptr[13] = read_debug(reg, 13); \ + case 12: ptr[12] = read_debug(reg, 12); \ + case 11: ptr[11] = read_debug(reg, 11); \ + case 10: ptr[10] = read_debug(reg, 10); \ + case 9: ptr[9] = read_debug(reg, 9); \ + case 8: ptr[8] = read_debug(reg, 8); \ + case 7: ptr[7] = read_debug(reg, 7); \ + case 6: ptr[6] = read_debug(reg, 6); \ + case 5: ptr[5] = read_debug(reg, 5); \ + case 4: ptr[4] = read_debug(reg, 4); \ + case 3: ptr[3] = read_debug(reg, 3); \ + case 2: ptr[2] = read_debug(reg, 2); \ + case 1: ptr[1] = read_debug(reg, 1); \ + default: ptr[0] = read_debug(reg, 0); \ + } + +#define restore_debug(ptr,reg,nr) \ + switch (nr) { \ + case 15: write_debug(ptr[15], reg, 15); \ + case 14: write_debug(ptr[14], reg, 14); \ + case 13: write_debug(ptr[13], reg, 13); \ + case 12: write_debug(ptr[12], reg, 12); \ + case 11: write_debug(ptr[11], reg, 11); \ + case 10: write_debug(ptr[10], reg, 10); \ + case 9: write_debug(ptr[9], reg, 9); \ + case 8: write_debug(ptr[8], reg, 8); \ + case 7: write_debug(ptr[7], reg, 7); \ + case 6: write_debug(ptr[6], reg, 6); \ + case 5: write_debug(ptr[5], reg, 5); \ + case 4: write_debug(ptr[4], reg, 4); \ + case 3: write_debug(ptr[3], reg, 3); \ + case 2: write_debug(ptr[2], reg, 2); \ + case 1: write_debug(ptr[1], reg, 1); \ + default: write_debug(ptr[0], reg, 0); \ + } + +void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt) +{ + u64 aa64dfr0; + int brps, wrps; + + if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)) + return; + + aa64dfr0 = read_sysreg(id_aa64dfr0_el1); + brps = (aa64dfr0 >> 12) & 0xf; + wrps = (aa64dfr0 >> 20) & 0xf; + + save_debug(dbg->dbg_bcr, dbgbcr, brps); + save_debug(dbg->dbg_bvr, dbgbvr, brps); + save_debug(dbg->dbg_wcr, dbgwcr, wrps); + save_debug(dbg->dbg_wvr, dbgwvr, wrps); + + ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); +} + +void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt) +{ + u64 aa64dfr0; + int brps, wrps; + + if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)) + return; + + aa64dfr0 = read_sysreg(id_aa64dfr0_el1); + + brps = (aa64dfr0 >> 12) & 0xf; + wrps = (aa64dfr0 >> 20) & 0xf; + + restore_debug(dbg->dbg_bcr, dbgbcr, brps); + restore_debug(dbg->dbg_bvr, dbgbvr, brps); + restore_debug(dbg->dbg_wcr, dbgwcr, wrps); + restore_debug(dbg->dbg_wvr, dbgwvr, wrps); + + write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); +} + +void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu) +{ + /* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform + * a full save/restore cycle. */ + if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) || + (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE)) + vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + + __debug_save_state(vcpu, &vcpu->arch.host_debug_state, + kern_hyp_va(vcpu->arch.host_cpu_context)); +} + +void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu) +{ + __debug_restore_state(vcpu, &vcpu->arch.host_debug_state, + kern_hyp_va(vcpu->arch.host_cpu_context)); + + if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY; +} + +static u32 __hyp_text __debug_read_mdcr_el2(void) +{ + return read_sysreg(mdcr_el2); +} + +__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void); diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S new file mode 100644 index 0000000000000000000000000000000000000000..fd0fbe9b7e6a4a26866120135e8db7b872392396 --- /dev/null +++ b/arch/arm64/kvm/hyp/entry.S @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) +#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) + + .text + .pushsection .hyp.text, "ax" + +.macro save_callee_saved_regs ctxt + stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +.macro restore_callee_saved_regs ctxt + ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +/* + * u64 __guest_enter(struct kvm_vcpu *vcpu, + * struct kvm_cpu_context *host_ctxt); + */ +ENTRY(__guest_enter) + // x0: vcpu + // x1: host/guest context + // x2-x18: clobbered by macros + + // Store the host regs + save_callee_saved_regs x1 + + // Preserve vcpu & host_ctxt for use at exit time + stp x0, x1, [sp, #-16]! + + add x1, x0, #VCPU_CONTEXT + + // Prepare x0-x1 for later restore by pushing them onto the stack + ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)] + stp x2, x3, [sp, #-16]! + + // x2-x18 + ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)] + ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)] + ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)] + ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)] + ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)] + ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)] + ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)] + ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)] + ldr x18, [x1, #CPU_XREG_OFFSET(18)] + + // x19-x29, lr + restore_callee_saved_regs x1 + + // Last bits of the 64bit state + ldp x0, x1, [sp], #16 + + // Do not touch any register after this! + eret +ENDPROC(__guest_enter) + +ENTRY(__guest_exit) + // x0: vcpu + // x1: return code + // x2-x3: free + // x4-x29,lr: vcpu regs + // vcpu x0-x3 on the stack + + add x2, x0, #VCPU_CONTEXT + + stp x4, x5, [x2, #CPU_XREG_OFFSET(4)] + stp x6, x7, [x2, #CPU_XREG_OFFSET(6)] + stp x8, x9, [x2, #CPU_XREG_OFFSET(8)] + stp x10, x11, [x2, #CPU_XREG_OFFSET(10)] + stp x12, x13, [x2, #CPU_XREG_OFFSET(12)] + stp x14, x15, [x2, #CPU_XREG_OFFSET(14)] + stp x16, x17, [x2, #CPU_XREG_OFFSET(16)] + str x18, [x2, #CPU_XREG_OFFSET(18)] + + ldp x6, x7, [sp], #16 // x2, x3 + ldp x4, x5, [sp], #16 // x0, x1 + + stp x4, x5, [x2, #CPU_XREG_OFFSET(0)] + stp x6, x7, [x2, #CPU_XREG_OFFSET(2)] + + save_callee_saved_regs x2 + + // Restore vcpu & host_ctxt from the stack + // (preserving return code in x1) + ldp x0, x2, [sp], #16 + // Now restore the host regs + restore_callee_saved_regs x2 + + mov x0, x1 + ret +ENDPROC(__guest_exit) + +ENTRY(__fpsimd_guest_restore) + stp x4, lr, [sp, #-16]! + + mrs x2, cptr_el2 + bic x2, x2, #CPTR_EL2_TFP + msr cptr_el2, x2 + isb + + mrs x3, tpidr_el2 + + ldr x0, [x3, #VCPU_HOST_CONTEXT] + kern_hyp_va x0 + add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS) + bl __fpsimd_save_state + + add x2, x3, #VCPU_CONTEXT + add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) + bl __fpsimd_restore_state + + // Skip restoring fpexc32 for AArch64 guests + mrs x1, hcr_el2 + tbnz x1, #HCR_RW_SHIFT, 1f + ldr x4, [x3, #VCPU_FPEXC32_EL2] + msr fpexc32_el2, x4 +1: + ldp x4, lr, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 + + eret +ENDPROC(__fpsimd_guest_restore) diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kvm/hyp/fpsimd.S similarity index 52% rename from arch/arm64/kernel/psci-call.S rename to arch/arm64/kvm/hyp/fpsimd.S index cf83e61cd3b59436eb75d30b76f590d4e4b7f7b8..da3f22c7f14ac6b3f10cb05d3055238cd568cccf 100644 --- a/arch/arm64/kernel/psci-call.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -1,4 +1,7 @@ /* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -8,21 +11,23 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * Copyright (C) 2015 ARM Limited - * - * Author: Will Deacon + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #include -/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ -ENTRY(__invoke_psci_fn_hvc) - hvc #0 +#include + + .text + .pushsection .hyp.text, "ax" + +ENTRY(__fpsimd_save_state) + fpsimd_save x0, 1 ret -ENDPROC(__invoke_psci_fn_hvc) +ENDPROC(__fpsimd_save_state) -/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ -ENTRY(__invoke_psci_fn_smc) - smc #0 +ENTRY(__fpsimd_restore_state) + fpsimd_restore x0, 1 ret -ENDPROC(__invoke_psci_fn_smc) +ENDPROC(__fpsimd_restore_state) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S new file mode 100644 index 0000000000000000000000000000000000000000..44c79fd81ad1c3a5ffc71c445d89dea50a5d5ae9 --- /dev/null +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + + .text + .pushsection .hyp.text, "ax" + +.macro save_x0_to_x3 + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! +.endm + +.macro restore_x0_to_x3 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + +.macro do_el2_call + /* + * Shuffle the parameters before calling the function + * pointed to in x0. Assumes parameters in x[1,2,3]. + */ + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr +.endm + +ENTRY(__vhe_hyp_call) + str lr, [sp, #-16]! + do_el2_call + ldr lr, [sp], #16 + /* + * We used to rely on having an exception return to get + * an implicit isb. In the E2H case, we don't have it anymore. + * rather than changing all the leaf functions, just do it here + * before returning to the rest of the kernel. + */ + isb + ret +ENDPROC(__vhe_hyp_call) + +el1_sync: // Guest trapped into EL2 + save_x0_to_x3 + + mrs x1, esr_el2 + lsr x2, x1, #ESR_ELx_EC_SHIFT + + cmp x2, #ESR_ELx_EC_HVC64 + b.ne el1_trap + + mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest + cbnz x3, el1_trap // called HVC + + /* Here, we're pretty sure the host called HVC. */ + restore_x0_to_x3 + + cmp x0, #HVC_GET_VECTORS + b.ne 1f + mrs x0, vbar_el2 + b 2f + +1: + /* + * Perform the EL2 call + */ + kern_hyp_va x0 + do_el2_call + +2: eret + +el1_trap: + /* + * x1: ESR + * x2: ESR_EC + */ + + /* Guest accessed VFP/SIMD registers, save host, restore Guest */ + cmp x2, #ESR_ELx_EC_FP_ASIMD + b.eq __fpsimd_guest_restore + + cmp x2, #ESR_ELx_EC_DABT_LOW + mov x0, #ESR_ELx_EC_IABT_LOW + ccmp x2, x0, #4, ne + b.ne 1f // Not an abort we care about + + /* This is an abort. Check for permission fault */ +alternative_if_not ARM64_WORKAROUND_834220 + and x2, x1, #ESR_ELx_FSC_TYPE + cmp x2, #FSC_PERM + b.ne 1f // Not a permission fault +alternative_else + nop // Use the permission fault path to + nop // check for a valid S1 translation, + nop // regardless of the ESR value. +alternative_endif + + /* + * Check for Stage-1 page table walk, which is guaranteed + * to give a valid HPFAR_EL2. + */ + tbnz x1, #7, 1f // S1PTW is set + + /* Preserve PAR_EL1 */ + mrs x3, par_el1 + stp x3, xzr, [sp, #-16]! + + /* + * Permission fault, HPFAR_EL2 is invalid. + * Resolve the IPA the hard way using the guest VA. + * Stage-1 translation already validated the memory access rights. + * As such, we can use the EL1 translation regime, and don't have + * to distinguish between EL0 and EL1 access. + */ + mrs x2, far_el2 + at s1e1r, x2 + isb + + /* Read result */ + mrs x3, par_el1 + ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack + msr par_el1, x0 + tbnz x3, #0, 3f // Bail out if we failed the translation + ubfx x3, x3, #12, #36 // Extract IPA + lsl x3, x3, #4 // and present it like HPFAR + b 2f + +1: mrs x3, hpfar_el2 + mrs x2, far_el2 + +2: mrs x0, tpidr_el2 + str w1, [x0, #VCPU_ESR_EL2] + str x2, [x0, #VCPU_FAR_EL2] + str x3, [x0, #VCPU_HPFAR_EL2] + + mov x1, #ARM_EXCEPTION_TRAP + b __guest_exit + + /* + * Translation failed. Just return to the guest and + * let it fault again. Another CPU is probably playing + * behind our back. + */ +3: restore_x0_to_x3 + + eret + +el1_irq: + save_x0_to_x3 + mrs x0, tpidr_el2 + mov x1, #ARM_EXCEPTION_IRQ + b __guest_exit + +ENTRY(__hyp_do_panic) + mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ + PSR_MODE_EL1h) + msr spsr_el2, lr + ldr lr, =panic + msr elr_el2, lr + eret +ENDPROC(__hyp_do_panic) + +.macro invalid_vector label, target = __hyp_panic + .align 2 +\label: + b \target +ENDPROC(\label) +.endm + + /* None of these should ever happen */ + invalid_vector el2t_sync_invalid + invalid_vector el2t_irq_invalid + invalid_vector el2t_fiq_invalid + invalid_vector el2t_error_invalid + invalid_vector el2h_sync_invalid + invalid_vector el2h_irq_invalid + invalid_vector el2h_fiq_invalid + invalid_vector el2h_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + + .ltorg + + .align 11 + +ENTRY(__kvm_hyp_vector) + ventry el2t_sync_invalid // Synchronous EL2t + ventry el2t_irq_invalid // IRQ EL2t + ventry el2t_fiq_invalid // FIQ EL2t + ventry el2t_error_invalid // Error EL2t + + ventry el2h_sync_invalid // Synchronous EL2h + ventry el2h_irq_invalid // IRQ EL2h + ventry el2h_fiq_invalid // FIQ EL2h + ventry el2h_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync // Synchronous 32-bit EL1 + ventry el1_irq // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +ENDPROC(__kvm_hyp_vector) diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h new file mode 100644 index 0000000000000000000000000000000000000000..fb275178b6afecac4a22590cb94ba1e8612f7098 --- /dev/null +++ b/arch/arm64/kvm/hyp/hyp.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM64_KVM_HYP_H__ +#define __ARM64_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK) +#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ + + PAGE_OFFSET) + +/** + * hyp_alternate_select - Generates patchable code sequences that are + * used to switch between two implementations of a function, depending + * on the availability of a feature. + * + * @fname: a symbol name that will be defined as a function returning a + * function pointer whose type will match @orig and @alt + * @orig: A pointer to the default function, as returned by @fname when + * @cond doesn't hold + * @alt: A pointer to the alternate function, as returned by @fname + * when @cond holds + * @cond: a CPU feature (as described in asm/cpufeature.h) + */ +#define hyp_alternate_select(fname, orig, alt, cond) \ +typeof(orig) * __hyp_text fname(void) \ +{ \ + typeof(alt) *val = orig; \ + asm volatile(ALTERNATIVE("nop \n", \ + "mov %0, %1 \n", \ + cond) \ + : "+r" (val) : "r" (alt)); \ + return val; \ +} + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v3_save_state(struct kvm_vcpu *vcpu); +void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void __sysreg32_save_state(struct kvm_vcpu *vcpu); +void __sysreg32_restore_state(struct kvm_vcpu *vcpu); + +void __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); +void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); + +void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); +void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +static inline bool __fpsimd_enabled(void) +{ + return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); +} + +u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); +void __noreturn __hyp_do_panic(unsigned long, ...); + +#endif /* __ARM64_KVM_HYP_H__ */ + diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c new file mode 100644 index 0000000000000000000000000000000000000000..ca8f5a5e2f965748fca28ced482c9c195270e5ab --- /dev/null +++ b/arch/arm64/kvm/hyp/switch.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) +{ + u64 val; + + /* + * We are about to set CPTR_EL2.TFP to trap all floating point + * register accesses to EL2, however, the ARM ARM clearly states that + * traps are only taken to EL2 if the operation would not otherwise + * trap to EL1. Therefore, always make sure that for 32-bit guests, + * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. + */ + val = vcpu->arch.hcr_el2; + if (!(val & HCR_RW)) { + write_sysreg(1 << 30, fpexc32_el2); + isb(); + } + write_sysreg(val, hcr_el2); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ + write_sysreg(1 << 15, hstr_el2); + write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); +} + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + write_sysreg(HCR_RW, hcr_el2); + write_sysreg(0, hstr_el2); + write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); + write_sysreg(0, cptr_el2); +} + +static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + write_sysreg(kvm->arch.vttbr, vttbr_el2); +} + +static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) +{ + write_sysreg(0, vttbr_el2); +} + +static hyp_alternate_select(__vgic_call_save_state, + __vgic_v2_save_state, __vgic_v3_save_state, + ARM64_HAS_SYSREG_GIC_CPUIF); + +static hyp_alternate_select(__vgic_call_restore_state, + __vgic_v2_restore_state, __vgic_v3_restore_state, + ARM64_HAS_SYSREG_GIC_CPUIF); + +static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) +{ + __vgic_call_save_state()(vcpu); + write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); +} + +static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) +{ + u64 val; + + val = read_sysreg(hcr_el2); + val |= HCR_INT_OVERRIDE; + val |= vcpu->arch.irq_lines; + write_sysreg(val, hcr_el2); + + __vgic_call_restore_state()(vcpu); +} + +static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_cpu_context *guest_ctxt; + bool fp_enabled; + u64 exit_code; + + vcpu = kern_hyp_va(vcpu); + write_sysreg(vcpu, tpidr_el2); + + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + guest_ctxt = &vcpu->arch.ctxt; + + __sysreg_save_state(host_ctxt); + __debug_cond_save_host_state(vcpu); + + __activate_traps(vcpu); + __activate_vm(vcpu); + + __vgic_restore_state(vcpu); + __timer_restore_state(vcpu); + + /* + * We must restore the 32-bit state before the sysregs, thanks + * to Cortex-A57 erratum #852523. + */ + __sysreg32_restore_state(vcpu); + __sysreg_restore_state(guest_ctxt); + __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); + + /* Jump in the fire! */ + exit_code = __guest_enter(vcpu, host_ctxt); + /* And we're baaack! */ + + fp_enabled = __fpsimd_enabled(); + + __sysreg_save_state(guest_ctxt); + __sysreg32_save_state(vcpu); + __timer_save_state(vcpu); + __vgic_save_state(vcpu); + + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + + __sysreg_restore_state(host_ctxt); + + if (fp_enabled) { + __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); + __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); + } + + __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); + __debug_cond_restore_host_state(vcpu); + + return exit_code; +} + +__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; + +void __hyp_text __noreturn __hyp_panic(void) +{ + unsigned long str_va = (unsigned long)__hyp_panic_string; + u64 spsr = read_sysreg(spsr_el2); + u64 elr = read_sysreg(elr_el2); + u64 par = read_sysreg(par_el1); + + if (read_sysreg(vttbr_el2)) { + struct kvm_vcpu *vcpu; + struct kvm_cpu_context *host_ctxt; + + vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + __sysreg_restore_state(host_ctxt); + } + + /* Call panic for real */ + __hyp_do_panic(hyp_kern_va(str_va), + spsr, elr, + read_sysreg(esr_el2), read_sysreg(far_el2), + read_sysreg(hpfar_el2), par, + (void *)read_sysreg(tpidr_el2)); + + unreachable(); +} diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c new file mode 100644 index 0000000000000000000000000000000000000000..425630980229f7c26d05190abb9866d02c1af13a --- /dev/null +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include +#include + +#include "hyp.h" + +/* ctxt is already in the HYP VA space */ +void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); + ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); + ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); + ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); + ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); + ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); + ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); + ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1); + ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1); + ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1); + ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1); + ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1); + ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); + ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); + ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); + ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); + ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); + ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); + ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); + ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); + ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); + + ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); + ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); + ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); + ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); + ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); + ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); +} + +void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); + write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); + write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); + write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); + write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); + write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); + write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); + write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1); + write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1); + write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1); + write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1); + write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1); + write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); + write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); + write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); + write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); + write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); + write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); + write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); + write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); + write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); + + write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); + write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); + write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); + write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); + write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); + write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); +} + +void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) +{ + u64 *spsr, *sysreg; + + if (read_sysreg(hcr_el2) & HCR_RW) + return; + + spsr = vcpu->arch.ctxt.gp_regs.spsr; + sysreg = vcpu->arch.ctxt.sys_regs; + + spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt); + spsr[KVM_SPSR_UND] = read_sysreg(spsr_und); + spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq); + spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq); + + sysreg[DACR32_EL2] = read_sysreg(dacr32_el2); + sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2); + + if (__fpsimd_enabled()) + sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2); + + if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); +} + +void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) +{ + u64 *spsr, *sysreg; + + if (read_sysreg(hcr_el2) & HCR_RW) + return; + + spsr = vcpu->arch.ctxt.gp_regs.spsr; + sysreg = vcpu->arch.ctxt.sys_regs; + + write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt); + write_sysreg(spsr[KVM_SPSR_UND], spsr_und); + write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq); + write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq); + + write_sysreg(sysreg[DACR32_EL2], dacr32_el2); + write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2); + + if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); +} diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c new file mode 100644 index 0000000000000000000000000000000000000000..1051e5d7320f7bb1d0c3da0e2de2529e8b283838 --- /dev/null +++ b/arch/arm64/kvm/hyp/timer-sr.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + if (kvm->arch.timer.enabled) { + timer->cntv_ctl = read_sysreg(cntv_ctl_el0); + timer->cntv_cval = read_sysreg(cntv_cval_el0); + } + + /* Disable the virtual timer */ + write_sysreg(0, cntv_ctl_el0); + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); + + /* Clear cntvoff for the host */ + write_sysreg(0, cntvoff_el2); +} + +void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); + + if (kvm->arch.timer.enabled) { + write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); + write_sysreg(timer->cntv_cval, cntv_cval_el0); + isb(); + write_sysreg(timer->cntv_ctl, cntv_ctl_el0); + } +} diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c new file mode 100644 index 0000000000000000000000000000000000000000..2a7e0d838698d2dff2822bad23865932ce72e3aa --- /dev/null +++ b/arch/arm64/kvm/hyp/tlb.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); + + /* + * We could do so much better if we had the VA as well. + * Instead, we invalidate Stage-2 for this IPA, and the + * whole of Stage-1. Weep... + */ + ipa >>= 12; + asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa)); + + /* + * We have to ensure completion of the invalidation at Stage-2, + * since a table walk on another CPU could refill a TLB with a + * complete (S1 + S2) walk based on the old Stage-2 mapping if + * the Stage-1 invalidation happened first. + */ + dsb(ish); + asm volatile("tlbi vmalle1is" : : ); + dsb(ish); + isb(); + + write_sysreg(0, vttbr_el2); +} + +__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, + phys_addr_t ipa); + +static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); + + asm volatile("tlbi vmalls12e1is" : : ); + dsb(ish); + isb(); + + write_sysreg(0, vttbr_el2); +} + +__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); + +static void __hyp_text __tlb_flush_vm_context(void) +{ + dsb(ishst); + asm volatile("tlbi alle1is \n" + "ic ialluis ": : ); + dsb(ish); +} + +__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c new file mode 100644 index 0000000000000000000000000000000000000000..e71761238cfce431ba859bfcfdd35e9751bd9c7d --- /dev/null +++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + u32 eisr0, eisr1, elrsr0, elrsr1; + int i, nr_lr; + + if (!base) + return; + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); + cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); + eisr0 = readl_relaxed(base + GICH_EISR0); + elrsr0 = readl_relaxed(base + GICH_ELRSR0); + if (unlikely(nr_lr > 32)) { + eisr1 = readl_relaxed(base + GICH_EISR1); + elrsr1 = readl_relaxed(base + GICH_ELRSR1); + } else { + eisr1 = elrsr1 = 0; + } +#ifdef CONFIG_CPU_BIG_ENDIAN + cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; + cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; +#else + cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; +#endif + cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); + + writel_relaxed(0, base + GICH_HCR); + + for (i = 0; i < nr_lr; i++) + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); +} + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + int i, nr_lr; + + if (!base) + return; + + writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); + writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); + writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + for (i = 0; i < nr_lr; i++) + writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); +} diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c new file mode 100644 index 0000000000000000000000000000000000000000..9142e082f5f3996df9428670f2e107cfc948d2e3 --- /dev/null +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -0,0 +1,228 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +#define vtr_to_max_lr_idx(v) ((v) & 0xf) +#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) + +#define read_gicreg(r) \ + ({ \ + u64 reg; \ + asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \ + reg; \ + }) + +#define write_gicreg(v,r) \ + do { \ + u64 __val = (v); \ + asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ + } while (0) + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + u64 val; + u32 max_lr_idx, nr_pri_bits; + + /* + * Make sure stores to the GIC via the memory mapped interface + * are now visible to the system register interface. + */ + dsb(st); + + cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); + cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); + cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); + cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); + + write_gicreg(0, ICH_HCR_EL2); + val = read_gicreg(ICH_VTR_EL2); + max_lr_idx = vtr_to_max_lr_idx(val); + nr_pri_bits = vtr_to_nr_pri_bits(val); + + switch (max_lr_idx) { + case 15: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2); + case 14: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2); + case 13: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2); + case 12: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2); + case 11: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2); + case 10: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2); + case 9: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2); + case 8: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2); + case 7: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2); + case 6: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2); + case 5: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2); + case 4: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2); + case 3: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2); + case 2: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2); + case 1: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2); + case 0: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); + cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); + case 6: + cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); + default: + cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); + cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); + case 6: + cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); + default: + cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); + } + + val = read_gicreg(ICC_SRE_EL2); + write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); + isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ + write_gicreg(1, ICC_SRE_EL1); +} + +void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + u64 val; + u32 max_lr_idx, nr_pri_bits; + + /* + * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a + * Group0 interrupt (as generated in GICv2 mode) to be + * delivered as a FIQ to the guest, with potentially fatal + * consequences. So we must make sure that ICC_SRE_EL1 has + * been actually programmed with the value we want before + * starting to mess with the rest of the GIC. + */ + write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); + isb(); + + write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); + write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); + + val = read_gicreg(ICH_VTR_EL2); + max_lr_idx = vtr_to_max_lr_idx(val); + nr_pri_bits = vtr_to_nr_pri_bits(val); + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); + write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); + write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); + } + + switch (max_lr_idx) { + case 15: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); + case 14: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2); + case 13: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2); + case 12: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2); + case 11: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2); + case 10: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2); + case 9: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2); + case 8: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2); + case 7: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2); + case 6: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2); + case 5: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2); + case 4: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2); + case 3: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2); + case 2: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2); + case 1: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2); + case 0: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2); + } + + /* + * Ensures that the above will have reached the + * (re)distributors. This ensure the guest will read the + * correct values from the memory-mapped interface. + */ + isb(); + dsb(sy); + + /* + * Prevent the guest from touching the GIC system registers if + * SRE isn't enabled for GICv3 emulation. + */ + if (!cpu_if->vgic_sre) { + write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, + ICC_SRE_EL2); + } +} + +static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) +{ + return read_gicreg(ICH_VTR_EL2); +} + +__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f34745cb3d236fe0a4731f8d02031f8ff764d69c..d6e155a212dc42fa9696879c710a395fc8be2779 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -29,7 +29,9 @@ #include #include #include +#include #include +#include /* * ARMv8 Reset Values @@ -123,3 +125,15 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset timer */ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); } + +extern char __hyp_idmap_text_start[]; + +phys_addr_t kvm_hyp_reset_entry(void) +{ + unsigned long offset; + + offset = (unsigned long)__kvm_hyp_reset + - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK); + + return TRAMPOLINE_VA + offset; +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d2650e84faf2f53f2afbdbd15e1f54d217e3fdb8..eec3598b4184077b83b5a1f24321891cb110f5bb 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -219,9 +220,9 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the * hyp.S code switches between host and guest values in future. */ -static inline void reg_to_dbg(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - u64 *dbg_reg) +static void reg_to_dbg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + u64 *dbg_reg) { u64 val = p->regval; @@ -234,18 +235,18 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } -static inline void dbg_to_reg(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - u64 *dbg_reg) +static void dbg_to_reg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + u64 *dbg_reg) { p->regval = *dbg_reg; if (p->is_32bit) p->regval &= 0xffffffffUL; } -static inline bool trap_bvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_bvr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; @@ -279,15 +280,15 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_bvr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_bvr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; } -static inline bool trap_bcr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_bcr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; @@ -322,15 +323,15 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_bcr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_bcr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; } -static inline bool trap_wvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_wvr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; @@ -365,15 +366,15 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_wvr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_wvr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; } -static inline bool trap_wcr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_wcr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; @@ -407,8 +408,8 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_wcr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_wcr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; } @@ -722,9 +723,9 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, * system is in. */ -static inline bool trap_xvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_xvr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S deleted file mode 100644 index 3f000712a85df9be3926a44fb1f67e9d2310c6eb..0000000000000000000000000000000000000000 --- a/arch/arm64/kvm/vgic-v2-switch.S +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - - .text - .pushsection .hyp.text, "ax" - -/* - * Save the VGIC CPU state into memory - * x0: Register pointing to VCPU struct - * Do not corrupt x1!!! - */ -ENTRY(__save_vgic_v2_state) -__save_vgic_v2_state: - /* Get VGIC VCTRL base into x2 */ - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr x2, [x2, #KVM_VGIC_VCTRL] - kern_hyp_va x2 - cbz x2, 2f // disabled - - /* Compute the address of struct vgic_cpu */ - add x3, x0, #VCPU_VGIC_CPU - - /* Save all interesting registers */ - ldr w5, [x2, #GICH_VMCR] - ldr w6, [x2, #GICH_MISR] - ldr w7, [x2, #GICH_EISR0] - ldr w8, [x2, #GICH_EISR1] - ldr w9, [x2, #GICH_ELRSR0] - ldr w10, [x2, #GICH_ELRSR1] - ldr w11, [x2, #GICH_APR] -CPU_BE( rev w5, w5 ) -CPU_BE( rev w6, w6 ) -CPU_BE( rev w7, w7 ) -CPU_BE( rev w8, w8 ) -CPU_BE( rev w9, w9 ) -CPU_BE( rev w10, w10 ) -CPU_BE( rev w11, w11 ) - - str w5, [x3, #VGIC_V2_CPU_VMCR] - str w6, [x3, #VGIC_V2_CPU_MISR] -CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] ) -CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] ) -CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] ) -CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ) -CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] ) -CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] ) -CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ) -CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] ) - str w11, [x3, #VGIC_V2_CPU_APR] - - /* Clear GICH_HCR */ - str wzr, [x2, #GICH_HCR] - - /* Save list registers */ - add x2, x2, #GICH_LR0 - ldr w4, [x3, #VGIC_CPU_NR_LR] - add x3, x3, #VGIC_V2_CPU_LR -1: ldr w5, [x2], #4 -CPU_BE( rev w5, w5 ) - str w5, [x3], #4 - sub w4, w4, #1 - cbnz w4, 1b -2: - ret -ENDPROC(__save_vgic_v2_state) - -/* - * Restore the VGIC CPU state from memory - * x0: Register pointing to VCPU struct - */ -ENTRY(__restore_vgic_v2_state) -__restore_vgic_v2_state: - /* Get VGIC VCTRL base into x2 */ - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr x2, [x2, #KVM_VGIC_VCTRL] - kern_hyp_va x2 - cbz x2, 2f // disabled - - /* Compute the address of struct vgic_cpu */ - add x3, x0, #VCPU_VGIC_CPU - - /* We only restore a minimal set of registers */ - ldr w4, [x3, #VGIC_V2_CPU_HCR] - ldr w5, [x3, #VGIC_V2_CPU_VMCR] - ldr w6, [x3, #VGIC_V2_CPU_APR] -CPU_BE( rev w4, w4 ) -CPU_BE( rev w5, w5 ) -CPU_BE( rev w6, w6 ) - - str w4, [x2, #GICH_HCR] - str w5, [x2, #GICH_VMCR] - str w6, [x2, #GICH_APR] - - /* Restore list registers */ - add x2, x2, #GICH_LR0 - ldr w4, [x3, #VGIC_CPU_NR_LR] - add x3, x3, #VGIC_V2_CPU_LR -1: ldr w5, [x3], #4 -CPU_BE( rev w5, w5 ) - str w5, [x2], #4 - sub w4, w4, #1 - cbnz w4, 1b -2: - ret -ENDPROC(__restore_vgic_v2_state) - - .popsection diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S deleted file mode 100644 index 3c20730ddff53c019998ff98672534ff0a45b1cc..0000000000000000000000000000000000000000 --- a/arch/arm64/kvm/vgic-v3-switch.S +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include -#include -#include -#include -#include -#include - - .text - .pushsection .hyp.text, "ax" - -/* - * We store LRs in reverse order to let the CPU deal with streaming - * access. Use this macro to make it look saner... - */ -#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8) - -/* - * Save the VGIC CPU state into memory - * x0: Register pointing to VCPU struct - * Do not corrupt x1!!! - */ -.macro save_vgic_v3_state - // Compute the address of struct vgic_cpu - add x3, x0, #VCPU_VGIC_CPU - - // Make sure stores to the GIC via the memory mapped interface - // are now visible to the system register interface - dsb st - - // Save all interesting registers - mrs_s x5, ICH_VMCR_EL2 - mrs_s x6, ICH_MISR_EL2 - mrs_s x7, ICH_EISR_EL2 - mrs_s x8, ICH_ELSR_EL2 - - str w5, [x3, #VGIC_V3_CPU_VMCR] - str w6, [x3, #VGIC_V3_CPU_MISR] - str w7, [x3, #VGIC_V3_CPU_EISR] - str w8, [x3, #VGIC_V3_CPU_ELRSR] - - msr_s ICH_HCR_EL2, xzr - - mrs_s x21, ICH_VTR_EL2 - mvn w22, w21 - ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - mrs_s x20, ICH_LR15_EL2 - mrs_s x19, ICH_LR14_EL2 - mrs_s x18, ICH_LR13_EL2 - mrs_s x17, ICH_LR12_EL2 - mrs_s x16, ICH_LR11_EL2 - mrs_s x15, ICH_LR10_EL2 - mrs_s x14, ICH_LR9_EL2 - mrs_s x13, ICH_LR8_EL2 - mrs_s x12, ICH_LR7_EL2 - mrs_s x11, ICH_LR6_EL2 - mrs_s x10, ICH_LR5_EL2 - mrs_s x9, ICH_LR4_EL2 - mrs_s x8, ICH_LR3_EL2 - mrs_s x7, ICH_LR2_EL2 - mrs_s x6, ICH_LR1_EL2 - mrs_s x5, ICH_LR0_EL2 - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - str x20, [x3, #LR_OFFSET(15)] - str x19, [x3, #LR_OFFSET(14)] - str x18, [x3, #LR_OFFSET(13)] - str x17, [x3, #LR_OFFSET(12)] - str x16, [x3, #LR_OFFSET(11)] - str x15, [x3, #LR_OFFSET(10)] - str x14, [x3, #LR_OFFSET(9)] - str x13, [x3, #LR_OFFSET(8)] - str x12, [x3, #LR_OFFSET(7)] - str x11, [x3, #LR_OFFSET(6)] - str x10, [x3, #LR_OFFSET(5)] - str x9, [x3, #LR_OFFSET(4)] - str x8, [x3, #LR_OFFSET(3)] - str x7, [x3, #LR_OFFSET(2)] - str x6, [x3, #LR_OFFSET(1)] - str x5, [x3, #LR_OFFSET(0)] - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - mrs_s x20, ICH_AP0R3_EL2 - str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] - mrs_s x19, ICH_AP0R2_EL2 - str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] -6: mrs_s x18, ICH_AP0R1_EL2 - str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] -5: mrs_s x17, ICH_AP0R0_EL2 - str w17, [x3, #VGIC_V3_CPU_AP0R] - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - mrs_s x20, ICH_AP1R3_EL2 - str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] - mrs_s x19, ICH_AP1R2_EL2 - str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] -6: mrs_s x18, ICH_AP1R1_EL2 - str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] -5: mrs_s x17, ICH_AP1R0_EL2 - str w17, [x3, #VGIC_V3_CPU_AP1R] - - // Restore SRE_EL1 access and re-enable SRE at EL1. - mrs_s x5, ICC_SRE_EL2 - orr x5, x5, #ICC_SRE_EL2_ENABLE - msr_s ICC_SRE_EL2, x5 - isb - mov x5, #1 - msr_s ICC_SRE_EL1, x5 -.endm - -/* - * Restore the VGIC CPU state from memory - * x0: Register pointing to VCPU struct - */ -.macro restore_vgic_v3_state - // Compute the address of struct vgic_cpu - add x3, x0, #VCPU_VGIC_CPU - - // Restore all interesting registers - ldr w4, [x3, #VGIC_V3_CPU_HCR] - ldr w5, [x3, #VGIC_V3_CPU_VMCR] - ldr w25, [x3, #VGIC_V3_CPU_SRE] - - msr_s ICC_SRE_EL1, x25 - - // make sure SRE is valid before writing the other registers - isb - - msr_s ICH_HCR_EL2, x4 - msr_s ICH_VMCR_EL2, x5 - - mrs_s x21, ICH_VTR_EL2 - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] - msr_s ICH_AP1R3_EL2, x20 - ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] - msr_s ICH_AP1R2_EL2, x19 -6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] - msr_s ICH_AP1R1_EL2, x18 -5: ldr w17, [x3, #VGIC_V3_CPU_AP1R] - msr_s ICH_AP1R0_EL2, x17 - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] - msr_s ICH_AP0R3_EL2, x20 - ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] - msr_s ICH_AP0R2_EL2, x19 -6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] - msr_s ICH_AP0R1_EL2, x18 -5: ldr w17, [x3, #VGIC_V3_CPU_AP0R] - msr_s ICH_AP0R0_EL2, x17 - - and w22, w21, #0xf - mvn w22, w21 - ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - ldr x20, [x3, #LR_OFFSET(15)] - ldr x19, [x3, #LR_OFFSET(14)] - ldr x18, [x3, #LR_OFFSET(13)] - ldr x17, [x3, #LR_OFFSET(12)] - ldr x16, [x3, #LR_OFFSET(11)] - ldr x15, [x3, #LR_OFFSET(10)] - ldr x14, [x3, #LR_OFFSET(9)] - ldr x13, [x3, #LR_OFFSET(8)] - ldr x12, [x3, #LR_OFFSET(7)] - ldr x11, [x3, #LR_OFFSET(6)] - ldr x10, [x3, #LR_OFFSET(5)] - ldr x9, [x3, #LR_OFFSET(4)] - ldr x8, [x3, #LR_OFFSET(3)] - ldr x7, [x3, #LR_OFFSET(2)] - ldr x6, [x3, #LR_OFFSET(1)] - ldr x5, [x3, #LR_OFFSET(0)] - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - msr_s ICH_LR15_EL2, x20 - msr_s ICH_LR14_EL2, x19 - msr_s ICH_LR13_EL2, x18 - msr_s ICH_LR12_EL2, x17 - msr_s ICH_LR11_EL2, x16 - msr_s ICH_LR10_EL2, x15 - msr_s ICH_LR9_EL2, x14 - msr_s ICH_LR8_EL2, x13 - msr_s ICH_LR7_EL2, x12 - msr_s ICH_LR6_EL2, x11 - msr_s ICH_LR5_EL2, x10 - msr_s ICH_LR4_EL2, x9 - msr_s ICH_LR3_EL2, x8 - msr_s ICH_LR2_EL2, x7 - msr_s ICH_LR1_EL2, x6 - msr_s ICH_LR0_EL2, x5 - - // Ensure that the above will have reached the - // (re)distributors. This ensure the guest will read - // the correct values from the memory-mapped interface. - isb - dsb sy - - // Prevent the guest from touching the GIC system registers - // if SRE isn't enabled for GICv3 emulation - cbnz x25, 1f - mrs_s x5, ICC_SRE_EL2 - and x5, x5, #~ICC_SRE_EL2_ENABLE - msr_s ICC_SRE_EL2, x5 -1: -.endm - -ENTRY(__save_vgic_v3_state) - save_vgic_v3_state - ret -ENDPROC(__save_vgic_v3_state) - -ENTRY(__restore_vgic_v3_state) - restore_vgic_v3_state - ret -ENDPROC(__restore_vgic_v3_state) - -ENTRY(__vgic_v3_get_ich_vtr_el2) - mrs_s x0, ICH_VTR_EL2 - ret -ENDPROC(__vgic_v3_get_ich_vtr_el2) - - .popsection diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 5d1cad3ce6d601aa474ae9c9b8ef4c76a785912e..08b5f18ba604f99461f1c879bbecef8cdb1f53c5 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -17,10 +17,10 @@ */ #include -#include #include #include #include +#include .text @@ -33,8 +33,7 @@ * Alignment fixed up by hardware. */ ENTRY(__clear_user) -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_enable_not_uao x2, x3 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -54,8 +53,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_disable_not_uao x2 ret ENDPROC(__clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 0b90497d4424c59d0a9ce2dcf5642012f452d3c8..6505ec81f1da18ac52427db04821259135c7fbd2 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -16,11 +16,11 @@ #include -#include #include #include #include #include +#include /* * Copy from user space to a kernel buffer (alignment handled by the hardware) @@ -67,12 +67,10 @@ end .req x5 ENTRY(__arch_copy_from_user) -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_enable_not_uao x3, x4 add end, x0, x2 #include "copy_template.S" -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_disable_not_uao x3 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index f7292dd08c840f27d39874fe7cc08aa89bdfb66d..9b04ff3ab6101d29ef8b6be48453260f9d3ffced 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -18,11 +18,11 @@ #include -#include #include #include #include #include +#include /* * Copy from user space to user space (alignment handled by the hardware) @@ -68,12 +68,10 @@ end .req x5 ENTRY(__copy_in_user) -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_enable_not_uao x3, x4 add end, x0, x2 #include "copy_template.S" -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_disable_not_uao x3 mov x0, #0 ret ENDPROC(__copy_in_user) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 7a7efe25503452bdfe8e4108b5f4aa0ad9495da5..8077e4f34d56b7f5b2b9a1ed9c5b53f23f573547 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -16,11 +16,11 @@ #include -#include #include #include #include #include +#include /* * Copy to user space from a kernel buffer (alignment handled by the hardware) @@ -66,12 +66,10 @@ end .req x5 ENTRY(__arch_copy_to_user) -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_enable_not_uao x3, x4 add end, x0, x2 #include "copy_template.S" -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ - CONFIG_ARM64_PAN) + uaccess_disable_not_uao x3 mov x0, #0 ret ENDPROC(__arch_copy_to_user) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 2f06997dcd59b7cbe8dd2293f78aeca8682ce8e4..6c2401aeb5ce69923fbe75d4c0803eb1da2d2d73 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -24,8 +24,6 @@ #include #include -#include "proc-macros.S" - /* * __flush_dcache_all() * @@ -127,7 +125,7 @@ ENTRY(__flush_cache_user_range) sub x3, x2, #1 bic x4, x0, x3 1: -USER(9f, dc cvau, x4 ) // clean D line to PoU +user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE add x4, x4, x2 cmp x4, x1 b.lo 1b diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 7275628ba59f663489f6f9403d46ca8a5050c6f7..25128089c386b72298d913105f67c9ddd3da3892 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -182,7 +182,12 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: - cpu_switch_mm(mm->pgd, mm); + /* + * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when + * emulating PAN. + */ + if (!system_uses_ttbr0_pan()) + cpu_switch_mm(mm->pgd, mm); } static int asids_init(void) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a65dec4b001b34fd865940f389aeddd29866927a..c3efb77d1229955778f07ec2fe434f714c433baf 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -29,7 +29,9 @@ #include #include #include +#include +#include #include #include #include @@ -45,6 +47,28 @@ static const char *fault_name(unsigned int esr); +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) +{ + int ret = 0; + + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, esr)) + ret = 1; + preempt_enable(); + } + + return ret; +} +#else +static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) +{ + return 0; +} +#endif + /* * Dump out the page tables associated with 'addr' in mm 'mm'. */ @@ -135,6 +159,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma, } #endif +static bool is_el1_instruction_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; +} + /* * The kernel tried to access some page that wasn't present. */ @@ -143,8 +172,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, { /* * Are we prepared to handle this kernel fault? + * We are almost certainly not prepared to handle instruction faults. */ - if (fixup_exception(regs)) + if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) return; /* @@ -208,8 +238,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -#define ESR_LNX_EXEC (1 << 24) - static int __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) @@ -248,12 +276,24 @@ out: return fault; } -static inline int permission_fault(unsigned int esr) +static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs) { - unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT; + unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; - return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); + if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) + return false; + + if (system_uses_ttbr0_pan()) + return fsc_type == ESR_ELx_FSC_FAULT && + (regs->pstate & PSR_PAN_BIT); + else + return fsc_type == ESR_ELx_FSC_PERM; +} + +static bool is_el0_instruction_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; } static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, @@ -265,6 +305,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + if (notify_page_fault(regs, esr)) + return 0; + tsk = current; mm = tsk->mm; @@ -282,7 +325,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (user_mode(regs)) mm_flags |= FAULT_FLAG_USER; - if (esr & ESR_LNX_EXEC) { + if (is_el0_instruction_abort(esr)) { vm_flags = VM_EXEC; } else if (((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) || ((esr & ESR_ELx_CM) && !(mm_flags & FAULT_FLAG_USER))) { @@ -290,11 +333,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } - if (permission_fault(esr) && (addr < USER_DS)) { + if (addr < USER_DS && is_permission_fault(esr, regs)) { /* regs->orig_addr_limit may be 0 if we entered from EL0 */ if (regs->orig_addr_limit == KERNEL_DS) die("Accessing user space memory with fs=KERNEL_DS", regs, esr); + if (is_el1_instruction_abort(esr)) + die("Attempting to execute userspace memory", regs, esr); + if (!search_exception_tables(regs->pc)) die("Accessing user space memory outside uaccess.h routines", regs, esr); } @@ -460,7 +506,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 1; } -static struct fault_info { +static const struct fault_info { int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); int sig; int code; @@ -486,10 +532,10 @@ static struct fault_info { { do_bad, SIGBUS, 0, "unknown 17" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, - { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, - { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, - { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, - { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, { do_bad, SIGBUS, 0, "unknown 25" }, { do_bad, SIGBUS, 0, "unknown 26" }, @@ -639,11 +685,20 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, return 0; } +NOKPROBE_SYMBOL(do_debug_exception); #ifdef CONFIG_ARM64_PAN -void cpu_enable_pan(void *__unused) +int cpu_enable_pan(void *__unused) { + /* + * We modify PSTATE. This won't work from irq context as the PSTATE + * is discarded once we return from the exception. + */ + WARN_ON_ONCE(in_interrupt()); + config_sctlr_el1(SCTLR_EL1_SPAN, 0); + asm(SET_PSTATE_PAN(1)); + return 0; } #endif /* CONFIG_ARM64_PAN */ @@ -654,7 +709,7 @@ void cpu_enable_pan(void *__unused) * We need to enable the feature at runtime (instead of adding it to * PSR_MODE_EL1h) as the feature may not be implemented by the cpu. */ -void cpu_enable_uao(void *__unused) +int cpu_enable_uao(void *__unused) { asm(SET_PSTATE_UAO(1)); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9b9b13ba603dcc86b51693c7b3060413ba45359a..b4ef9ea679a12341ec2b0eead55c5898d3ec8415 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -131,7 +131,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) int pfn_valid(unsigned long pfn) { - return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT); + return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); #endif @@ -192,8 +192,12 @@ void __init arm64_memblock_init(void) */ memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)), ULLONG_MAX); - if (memblock_end_of_DRAM() > linear_region_size) - memblock_remove(0, memblock_end_of_DRAM() - linear_region_size); + if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { + /* ensure that memstart_addr remains sufficiently aligned */ + memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, + ARM64_MEMSTART_ALIGN); + memblock_remove(0, memstart_addr); + } /* * Apply the memory limit if it was set. Since the kernel may be loaded diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c3a3deb76ae2adbc145f3e14a8b664dbba0e84e3..6c444d9683231c87d617861c9f9505941f90e489 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -444,6 +444,8 @@ static void __init map_mem(pgd_t *pgd) if (start >= end) break; + if (memblock_is_nomap(reg)) + continue; __map_memblock(pgd, start, end); } diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S deleted file mode 100644 index 984edcda1850f1be420f92be7666b36c687cc476..0000000000000000000000000000000000000000 --- a/arch/arm64/mm/proc-macros.S +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Based on arch/arm/mm/proc-macros.S - * - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -/* - * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) - */ - .macro vma_vm_mm, rd, rn - ldr \rd, [\rn, #VMA_VM_MM] - .endm - -/* - * mmid - get context id from mm pointer (mm->context.id) - */ - .macro mmid, rd, rn - ldr \rd, [\rn, #MM_CONTEXT_ID] - .endm - -/* - * dcache_line_size - get the minimum D-cache line size from the CTR register. - */ - .macro dcache_line_size, reg, tmp - mrs \tmp, ctr_el0 // read CTR - ubfm \tmp, \tmp, #16, #19 // cache line size encoding - mov \reg, #4 // bytes per word - lsl \reg, \reg, \tmp // actual cache line size - .endm - -/* - * icache_line_size - get the minimum I-cache line size from the CTR register. - */ - .macro icache_line_size, reg, tmp - mrs \tmp, ctr_el0 // read CTR - and \tmp, \tmp, #0xf // cache line size encoding - mov \reg, #4 // bytes per word - lsl \reg, \reg, \tmp // actual cache line size - .endm - -/* - * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map - */ - .macro tcr_set_idmap_t0sz, valreg, tmpreg -#ifndef CONFIG_ARM64_VA_BITS_48 - ldr_l \tmpreg, idmap_t0sz - bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH -#endif - .endm - -/* - * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present - */ - .macro reset_pmuserenr_el0, tmpreg - mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer - sbfx \tmpreg, \tmpreg, #8, #4 - cmp \tmpreg, #1 // Skip if no PMU present - b.lt 9000f - msr pmuserenr_el0, xzr // Disable PMU access from EL0 -9000: - .endm - -/* - * Macro to perform a data cache maintenance for the interval - * [kaddr, kaddr + size) - * - * op: operation passed to dc instruction - * domain: domain used in dsb instruciton - * kaddr: starting virtual address of the region - * size: size of the region - * Corrupts: kaddr, size, tmp1, tmp2 - */ - .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 - dcache_line_size \tmp1, \tmp2 - add \size, \kaddr, \size - sub \tmp2, \tmp1, #1 - bic \kaddr, \kaddr, \tmp2 -9998: dc \op, \kaddr - add \kaddr, \kaddr, \tmp1 - cmp \kaddr, \size - b.lo 9998b - dsb \domain - .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index ec968a0769198b6618f7c64a6471eb6f1a45306e..81a0de4e457d284a5990bcab4531298ceb2048b3 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -23,13 +23,11 @@ #include #include #include -#include #include +#include #include #include -#include "proc-macros.S" - #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K #elif defined(CONFIG_ARM64_16K_PAGES) @@ -112,62 +110,50 @@ ENTRY(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 - mrs x5, mair_el1 - mrs x6, cpacr_el1 - mrs x7, ttbr1_el1 - mrs x8, tcr_el1 - mrs x9, vbar_el1 - mrs x10, mdscr_el1 - mrs x11, oslsr_el1 - mrs x12, sctlr_el1 + mrs x5, cpacr_el1 + mrs x6, tcr_el1 + mrs x7, vbar_el1 + mrs x8, mdscr_el1 + mrs x9, oslsr_el1 + mrs x10, sctlr_el1 stp x2, x3, [x0] - stp x4, x5, [x0, #16] - stp x6, x7, [x0, #32] - stp x8, x9, [x0, #48] - stp x10, x11, [x0, #64] - str x12, [x0, #80] + stp x4, xzr, [x0, #16] + stp x5, x6, [x0, #32] + stp x7, x8, [x0, #48] + stp x9, x10, [x0, #64] ret ENDPROC(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context * - * x0: Physical address of context pointer - * x1: ttbr0_el1 to be restored - * - * Returns: - * sctlr_el1 value in x0 + * x0: Address of context pointer */ ENTRY(cpu_do_resume) - /* - * Invalidate local tlb entries before turning on MMU - */ - tlbi vmalle1 ldp x2, x3, [x0] ldp x4, x5, [x0, #16] - ldp x6, x7, [x0, #32] - ldp x8, x9, [x0, #48] - ldp x10, x11, [x0, #64] - ldr x12, [x0, #80] + ldp x6, x8, [x0, #32] + ldp x9, x10, [x0, #48] + ldp x11, x12, [x0, #64] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 - msr mair_el1, x5 msr cpacr_el1, x6 - msr ttbr0_el1, x1 - msr ttbr1_el1, x7 - tcr_set_idmap_t0sz x8, x7 + + /* Don't change t0sz here, mask those bits when restoring */ + mrs x5, tcr_el1 + bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + msr tcr_el1, x8 msr vbar_el1, x9 msr mdscr_el1, x10 + msr sctlr_el1, x12 /* * Restore oslsr_el1 by writing oslar_el1 */ ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 - mov x0, x12 - dsb nsh // Make sure local tlb invalidation completed isb ret ENDPROC(cpu_do_resume) @@ -185,17 +171,8 @@ ENTRY(cpu_do_switch_mm) bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 isb -alternative_if_not ARM64_WORKAROUND_CAVIUM_27456 - ret - nop - nop - nop -alternative_else - ic iallu - dsb nsh - isb + post_ttbr0_update_workaround ret -alternative_endif ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "ax" diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 8bbe9401f4f011d3239adcd2d6f5251d5fe37ff0..6d6e4af1a4bfb2e6354a242c0638a813d3f173d0 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -49,6 +49,7 @@ #include #include +#include #include @@ -89,6 +90,24 @@ ENTRY(privcmd_call) mov x2, x3 mov x3, x4 mov x4, x5 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Privcmd calls are issued by the userspace. The kernel needs to + * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 + * translations to user memory via AT instructions. Since AT + * instructions are not affected by the PAN bit (ARMv8.1), we only + * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation + * is enabled (it implies that hardware UAO and PAN disabled). + */ + uaccess_enable_not_uao x6, x7 +#endif hvc XEN_IMM + +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Disable userspace access from kernel once the hyp call completed. + */ + uaccess_disable_not_uao x6 +#endif ret ENDPROC(privcmd_call); diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 68cf638faf4867aef2d92b91f9ab48770100f132..b1ec1fa064632258ac1a4bb50b3b22416bd43c0f 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from, extern __kernel_size_t copy_to_user(void __user *to, const void *from, __kernel_size_t n); -extern __kernel_size_t copy_from_user(void *to, const void __user *from, +extern __kernel_size_t ___copy_from_user(void *to, const void __user *from, __kernel_size_t n); static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, @@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to, { return __copy_user(to, (const void __force *)from, n); } +static inline __kernel_size_t copy_from_user(void *to, + const void __user *from, + __kernel_size_t n) +{ + size_t res = ___copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; +} #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c index d93ead02daeda4635e772268b328dc652f1d86df..7c6cf14f09854aacd6f31cafedc39a88d451d18a 100644 --- a/arch/avr32/kernel/avr32_ksyms.c +++ b/arch/avr32/kernel/avr32_ksyms.c @@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page); /* * Userspace access stuff. */ -EXPORT_SYMBOL(copy_from_user); +EXPORT_SYMBOL(___copy_from_user); EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(strncpy_from_user); diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S index ea59c04b07de8e3a09ed486b8c48a9ffe25ad5c1..075373471da11d011871ab4681e7ded58732ddfb 100644 --- a/arch/avr32/lib/copy_user.S +++ b/arch/avr32/lib/copy_user.S @@ -23,13 +23,13 @@ */ .text .align 1 - .global copy_from_user - .type copy_from_user, @function -copy_from_user: + .global ___copy_from_user + .type ___copy_from_user, @function +___copy_from_user: branch_if_kernel r8, __copy_user ret_if_privileged r8, r11, r10, r10 rjmp __copy_user - .size copy_from_user, . - copy_from_user + .size ___copy_from_user, . - ___copy_from_user .global copy_to_user .type copy_to_user, @function diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index 4f61378c3453792af416dae65ffa1e9a8c70f4ff..456128174b1794e7b2806d4f7ed135a481f9b8ac 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c @@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev) struct resource *regs; struct pio_device *pio; - if (pdev->id > MAX_NR_PIO_DEVICES) { + if (pdev->id >= MAX_NR_PIO_DEVICES) { dev_err(&pdev->dev, "only %d PIO devices supported\n", MAX_NR_PIO_DEVICES); return; diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 90612a7f2cf32f0872af2651b608cd56eb0fff26..8cd0184ea9efbd517f97d8b0fcd90659ee66c0fe 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -177,11 +177,12 @@ static inline int bad_user_access_length(void) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) + if (likely(access_ok(VERIFY_READ, from, n))) { memcpy(to, (const void __force *)from, n); - else - return n; - return 0; + return 0; + } + memset(to, 0, n); + return n; } static inline unsigned long __must_check diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index c6db52ba3a06653e8b6a22a277c607d82c69de46..10c57771822d5d8f25eed0ccc64ceacfb2dca5ca 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = { #include static struct smc91x_platdata smc91x_info = { - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | + SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 2de71e8c104b1e2233c7eba47c42ad28d2ab60c9..93c22468cc141a6ab302b1c9f7c54a51704c422b 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = { #include static struct smc91x_platdata smc91x_info = { - .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, + .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | + SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index e3530d0f13ee74e9c4b2bdfbaf10e226499af2e9..56c7d5750abd66c0b75e7af2d6f0738a18236b6e 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); extern unsigned long __do_clear_user(void __user *to, unsigned long n); -static inline unsigned long -__generic_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to, from, n); - return n; -} - -static inline unsigned long -__generic_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (access_ok(VERIFY_READ, from, n)) - return __copy_user_zeroing(to, from, n); - return n; -} - -static inline unsigned long -__generic_clear_user(void __user *to, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - return __do_clear_user(to, n); - return n; -} - static inline long __strncpy_from_user(char *dst, const char __user *src, long count) { @@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) else if (n == 24) __asm_copy_from_user_24(to, from, ret); else - ret = __generic_copy_from_user(to, from, n); + ret = __copy_user_zeroing(to, from, n); return ret; } @@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) else if (n == 24) __asm_copy_to_user_24(to, from, ret); else - ret = __generic_copy_to_user(to, from, n); + ret = __copy_user(to, from, n); return ret; } @@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n) else if (n == 24) __asm_clear_24(to, ret); else - ret = __generic_clear_user(to, n); + ret = __do_clear_user(to, n); return ret; } -#define clear_user(to, n) \ - (__builtin_constant_p(n) ? \ - __constant_clear_user(to, n) : \ - __generic_clear_user(to, n)) +static inline size_t clear_user(void __user *to, size_t n) +{ + if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + return n; + if (__builtin_constant_p(n)) + return __constant_clear_user(to, n); + else + return __do_clear_user(to, n); +} -#define copy_from_user(to, from, n) \ - (__builtin_constant_p(n) ? \ - __constant_copy_from_user(to, from, n) : \ - __generic_copy_from_user(to, from, n)) +static inline size_t copy_from_user(void *to, const void __user *from, size_t n) +{ + if (unlikely(!access_ok(VERIFY_READ, from, n))) { + memset(to, 0, n); + return n; + } + if (__builtin_constant_p(n)) + return __constant_copy_from_user(to, from, n); + else + return __copy_user_zeroing(to, from, n); +} -#define copy_to_user(to, from, n) \ - (__builtin_constant_p(n) ? \ - __constant_copy_to_user(to, from, n) : \ - __generic_copy_to_user(to, from, n)) +static inline size_t copy_to_user(void __user *to, const void *from, size_t n) +{ + if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + return n; + if (__builtin_constant_p(n)) + return __constant_copy_to_user(to, from, n); + else + return __copy_user(to, from, n); +} /* We let the __ versions of copy_from/to_user inline, because they're often * used in fast paths and have only a small space overhead. diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 3ac9a59d65d4129142d06ba9516455e0c5dc96db..87d9e34c5df866072ced6de336ea07e3755853eb 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -263,19 +263,25 @@ do { \ extern long __memset_user(void *dst, unsigned long count); extern long __memcpy_user(void *dst, const void *src, unsigned long count); -#define clear_user(dst,count) __memset_user(____force(dst), (count)) +#define __clear_user(dst,count) __memset_user(____force(dst), (count)) #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) #else -#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) +#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) #endif -#define __clear_user clear_user +static inline unsigned long __must_check +clear_user(void __user *to, unsigned long n) +{ + if (likely(__access_ok(to, n))) + n = __clear_user(to, n); + return n; +} static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index b408fe660cf8ceab685de783220a2db991b4dfc3..3cef06875f5ca32930c06b07db027e86f362ce94 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h @@ -31,7 +31,6 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; - struct restart_block restart_block; }; /* @@ -44,9 +43,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 380fffd081b2d3c8c4fb962aca8bc0efbd5783bb..036ad04edd2d8aa2d72c5be62d8ce0752ccc9d6a 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0) unsigned int er0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* restore passed registers */ #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0) diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index f000a382bc7f62f28dfe980ab184904c70681914..f61cfb28e9f283800f968c2953705851da429e68 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, { long res = __strnlen_user(src, n); - /* return from strnlen can't be zero -- that would be rubbish. */ + if (unlikely(!res)) + return -EFAULT; if (res > n) { copy_from_user(dst, src, n); diff --git a/arch/ia64/include/asm/early_ioremap.h b/arch/ia64/include/asm/early_ioremap.h new file mode 100644 index 0000000000000000000000000000000000000000..eec9e1d1b833cb3cc7ec728522b8bc45d3d952ba --- /dev/null +++ b/arch/ia64/include/asm/early_ioremap.h @@ -0,0 +1,10 @@ +#ifndef _ASM_IA64_EARLY_IOREMAP_H +#define _ASM_IA64_EARLY_IOREMAP_H + +extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); +#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) + +extern void early_iounmap (volatile void __iomem *addr, unsigned long size); +#define early_memunmap(addr, size) early_iounmap(addr, size) + +#endif diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 8fdb9c7eeb6641c7c538116f48bd69fec5ce2930..5de673ac9cb13602dc6e8cc31d9a78b38448bbf5 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -20,6 +20,7 @@ */ #include +#include /* We don't use IO slowdowns on the ia64, but.. */ #define __SLOW_DOWN_IO do { } while (0) @@ -427,10 +428,6 @@ __writeq (unsigned long val, volatile void __iomem *addr) extern void __iomem * ioremap(unsigned long offset, unsigned long size); extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap (volatile void __iomem *addr); -extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); -#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) -extern void early_iounmap (volatile void __iomem *addr, unsigned long size); -#define early_memunmap(addr, size) early_iounmap(addr, size) static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) { return ioremap(phys_addr, size); diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 3d6b840c5c99aeba08852ab70ce40d1a56a65c0c..71e8c6f45a1d2e233e0978d677d5b4f2106882e1 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -241,8 +241,7 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { - if (!__builtin_constant_p(count)) - check_object_size(from, count, true); + check_object_size(from, count, true); return __copy_user(to, (__force void __user *) from, count); } @@ -250,8 +249,7 @@ __copy_to_user (void __user *to, const void *from, unsigned long count) static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { - if (!__builtin_constant_p(count)) - check_object_size(to, count, false); + check_object_size(to, count, false); return __copy_user((__force void __user *) to, from, count); } @@ -265,27 +263,21 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) long __cu_len = (n); \ \ if (__access_ok(__cu_to, __cu_len, get_fs())) { \ - if (!__builtin_constant_p(n)) \ - check_object_size(__cu_from, __cu_len, true); \ + check_object_size(__cu_from, __cu_len, true); \ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ } \ __cu_len; \ }) -#define copy_from_user(to, from, n) \ -({ \ - void *__cu_to = (to); \ - const void __user *__cu_from = (from); \ - long __cu_len = (n); \ - \ - __chk_user_ptr(__cu_from); \ - if (__access_ok(__cu_from, __cu_len, get_fs())) { \ - if (!__builtin_constant_p(n)) \ - check_object_size(__cu_to, __cu_len, false); \ - __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ - } \ - __cu_len; \ -}) +static inline unsigned long +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (likely(__access_ok(from, n, get_fs()))) + n = __copy_user((__force void __user *) to, from, n); + else + memset(to, 0, n); + return n; +} #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index cac7014daef3aa6949d17a72824a757dfdbee5b4..6f8982157a75849b491c8461c0c3b34d4c7c4088 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs); #define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err = 0; \ - unsigned long __gu_val; \ + unsigned long __gu_val = 0; \ might_fault(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h index 470e365f04ea4ee3f4503c060e8dacbd1e9dd0da..8ff0a70865f65cc9f9f71d9b9f126dd7fb820793 100644 --- a/arch/metag/include/asm/atomic.h +++ b/arch/metag/include/asm/atomic.h @@ -39,11 +39,10 @@ #define atomic_dec(v) atomic_sub(1, (v)) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) #endif -#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) - #include #endif /* __ASM_METAG_ATOMIC_H */ diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 8282cbce7e399a84488e675af0751341d24f0205..273e61225c277ae67ba28dfae4ef9123e3ef34a9 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to, static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) + if (likely(access_ok(VERIFY_READ, from, n))) return __copy_user_zeroing(to, from, n); + memset(to, 0, n); return n; } diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 331b0d35f89ce301ad9ba876be7322f417d7c909..826676778094f23a8ced8264b5c448184c0fa006 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -227,7 +227,7 @@ extern long __user_bad(void); #define __get_user(x, ptr) \ ({ \ - unsigned long __gu_val; \ + unsigned long __gu_val = 0; \ /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ long __gu_err; \ switch (sizeof(*(ptr))) { \ @@ -373,10 +373,13 @@ extern long __user_bad(void); static inline long copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long res = n; might_fault(); - if (access_ok(VERIFY_READ, from, n)) - return __copy_from_user(to, from, n); - return n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } #define __copy_to_user(to, from, n) \ diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index f0e314ceb8baa84d36ea6664772f71df46ad003f..7f975b20b20c713e6225d14e3984013f57fe5026 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -113,42 +113,6 @@ config SPINLOCK_TEST help Add several files to the debugfs to test spinlock speed. -if CPU_MIPSR6 - -choice - prompt "Compact branch policy" - default MIPS_COMPACT_BRANCHES_OPTIMAL - -config MIPS_COMPACT_BRANCHES_NEVER - bool "Never (force delay slot branches)" - help - Pass the -mcompact-branches=never flag to the compiler in order to - force it to always emit branches with delay slots, and make no use - of the compact branch instructions introduced by MIPSr6. This is - useful if you suspect there may be an issue with compact branches in - either the compiler or the CPU. - -config MIPS_COMPACT_BRANCHES_OPTIMAL - bool "Optimal (use where beneficial)" - help - Pass the -mcompact-branches=optimal flag to the compiler in order for - it to make use of compact branch instructions where it deems them - beneficial, and use branches with delay slots elsewhere. This is the - default compiler behaviour, and should be used unless you have a - reason to choose otherwise. - -config MIPS_COMPACT_BRANCHES_ALWAYS - bool "Always (force compact branches)" - help - Pass the -mcompact-branches=always flag to the compiler in order to - force it to always emit compact branches, making no use of branch - instructions with delay slots. This can result in more compact code - which may be beneficial in some scenarios. - -endchoice - -endif # CPU_MIPSR6 - config SCACHE_DEBUGFS bool "L2 cache debugfs entries" depends on DEBUG_FS diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 3f70ba54ae21e909973ad9913eb71151fcc50492..252e347958f383354a597323078512846f592d52 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$( cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA endif -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always - # # Firmware support # diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index e689b894353c451b806b6429f3242db0486ff22a..8dedee1def8398a14cc2059568278940842906de 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -135,6 +135,7 @@ ldc1 $f28, THREAD_FPR28(\thread) ldc1 $f30, THREAD_FPR30(\thread) ctc1 \tmp, fcr31 + .set pop .endm .macro fpu_restore_16odd thread diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index dd7cee7957097009ab08f14841a4f220e756bc3d..c8c04a1f1c9f97686db1b77026ade2604ce9db92 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -400,7 +400,10 @@ struct kvm_vcpu_arch { /* Host KSEG0 address of the EI/DI offset */ void *kseg0_commpage; - u32 io_gpr; /* GPR used as IO source/target */ + /* Resume PC after MMIO completion */ + unsigned long io_pc; + /* GPR used as IO source/target */ + u32 io_gpr; struct hrtimer comparecount_timer; /* Count timer control KVM register */ @@ -422,8 +425,6 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; - unsigned long pending_load_cause; - /* Save/Restore the entryhi register when are are preempted/scheduled back in */ unsigned long preempt_entryhi; diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h index 2f82bfa3a77347155a1526dde873387d01dcf138..c9f5769dfc8fca9d10c1ce4fe12ff62d3c4c8c66 100644 --- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h @@ -11,11 +11,13 @@ #define CP0_EBASE $15, 1 .macro kernel_entry_setup +#ifdef CONFIG_SMP mfc0 t0, CP0_EBASE andi t0, t0, 0x3ff # CPUNum beqz t0, 1f # CPUs other than zero goto smp_bootstrap j smp_bootstrap +#endif /* CONFIG_SMP */ 1: .endm diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index f6fc6aac54963fcebcf7a55e094ec32689923ea7..b6578611dddbfc37e9a7bea36886affe93d87411 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs) static inline long regs_return_value(struct pt_regs *regs) { - if (is_syscall_success(regs)) + if (is_syscall_success(regs) || !user_mode(regs)) return regs->regs[2]; else return -regs->regs[2]; diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 095ecafe6bd3525444c27812937e2d085bf69b2a..c74c32ccc647200ec7941bac172a8ab1dfbe1236 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -14,6 +14,7 @@ #include #include #include +#include #include /* @@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len = __invoke_copy_from_user(__cu_to, \ __cu_from, \ __cu_len); \ + } else { \ + memset(__cu_to, 0, __cu_len); \ } \ } \ __cu_len; \ diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h index 34c325c674c445306667ece3d7c422a51ff686c4..70a4a2f173ffca76cb05d078190e875ed6a06d95 100644 --- a/arch/mips/include/asm/uprobes.h +++ b/arch/mips/include/asm/uprobes.h @@ -36,7 +36,6 @@ struct arch_uprobe { unsigned long resume_epc; u32 insn[2]; u32 ixol[2]; - union mips_instruction orig_inst[MAX_UINSN_BYTES / 4]; }; struct arch_uprobe_task { diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 4674a74a08b5491783ffcd83f4dad8a947e61233..af27334d6809b7a7d2e9918da1ed7a28575c53c7 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -1164,7 +1164,9 @@ fpu_emul: regs->regs[31] = r31; regs->cp0_epc = epc; if (!used_math()) { /* First time FPU user. */ + preempt_disable(); err = init_fpu(); + preempt_enable(); set_used_math(); } lose_fpu(1); /* Save FPU state for the emulator. */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 89847bee2b53538eba13f74a89b43ae8e9aec43b..44a6f25e902eed401ee227ce79ab2ffa64d76ab3 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) return -EOPNOTSUPP; /* Avoid inadvertently triggering emulation */ - if ((value & PR_FP_MODE_FR) && cpu_has_fpu && - !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) return -EOPNOTSUPP; - if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) return -EOPNOTSUPP; /* FR = 0 not supported in MIPS R6 */ - if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) return -EOPNOTSUPP; /* Proceed with the mode switch */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 2b521e07b8601a2c80e888b3064009779effa78b..7fef02a9eb85c7827284a490fd862dcf653d2a83 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -174,6 +174,9 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); + cpumask_set_cpu(cpu, &cpu_callin_map); + synchronise_count_slave(cpu); + set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -181,10 +184,6 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); - cpumask_set_cpu(cpu, &cpu_callin_map); - - synchronise_count_slave(cpu); - /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index 8452d933a6453c9a1f8655d56a3e0812605a6a12..4e7b89f2e244ef00f0290892b09aba98b6827f28 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn) int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - union mips_instruction insn; /* * Now find the EPC where to resume after the breakpoint has been @@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) unsigned long epc; epc = regs->cp0_epc; - __compute_return_epc_for_insn(regs, insn); + __compute_return_epc_for_insn(regs, + (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; } - utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; regs->cp0_epc = current->utask->xol_vaddr; @@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr( ra = regs->regs[31]; /* Replace the return address with the trampoline address */ - regs->regs[31] = ra; + regs->regs[31] = trampoline_vaddr; return ra; } @@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); } -/** - * set_orig_insn - Restore the original instruction. - * @mm: the probed process address space. - * @auprobe: arch specific probepoint information. - * @vaddr: the virtual address to insert the opcode. - * - * For mm @mm, restore the original opcode (opcode) at @vaddr. - * Return 0 (success) or a negative errno. - * - * This overrides the weak version in kernel/events/uprobes.c. - */ -int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, - unsigned long vaddr) -{ - return uprobe_write_opcode(mm, vaddr, - *(uprobe_opcode_t *)&auprobe->orig_inst[0].word); -} - void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 975e99759bab9ec1ae6aa80a47fa2f97765efcc6..5649a9e429e0a52861fb8d676f1a1103200b157a 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = { static void __init init_vdso_image(struct mips_vdso_image *image) { unsigned long num_pages, i; + unsigned long data_pfn; BUG_ON(!PAGE_ALIGNED(image->data)); BUG_ON(!PAGE_ALIGNED(image->size)); num_pages = image->size / PAGE_SIZE; - for (i = 0; i < num_pages; i++) { - image->mapping.pages[i] = - virt_to_page(image->data + (i * PAGE_SIZE)); - } + data_pfn = __phys_to_pfn(__pa_symbol(image->data)); + for (i = 0; i < num_pages; i++) + image->mapping.pages[i] = pfn_to_page(data_pfn + i); } static int __init init_vdso(void) diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d6476d11212e5b20a7f8332330c99445ff932ac8..4c85ab808f9935cb76434d54555ce16f7c192368 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -752,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; - if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { + if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { + kvm_clear_c0_guest_status(cop0, ST0_ERL); + vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); + } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_read_c0_guest_epc(cop0)); kvm_clear_c0_guest_status(cop0, ST0_EXL); vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); - } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { - kvm_clear_c0_guest_status(cop0, ST0_ERL); - vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else { kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", vcpu->arch.pc); @@ -807,6 +807,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) return EMULATE_FAIL; } +/** + * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. + * @vcpu: VCPU with changed mappings. + * @tlb: TLB entry being removed. + * + * This is called to indicate a single change in guest MMU mappings, so that we + * can arrange TLB flushes on this and other CPUs. + */ +static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb) +{ + int cpu, i; + bool user; + + /* No need to flush for entries which are already invalid */ + if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V)) + return; + /* User address space doesn't need flushing for KSeg2/3 changes */ + user = tlb->tlb_hi < KVM_GUEST_KSEG0; + + preempt_disable(); + + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + /* Invalidate the whole ASID on other CPUs */ + cpu = smp_processor_id(); + for_each_possible_cpu(i) { + if (i == cpu) + continue; + if (user) + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + + preempt_enable(); +} + /* Write Guest TLB Entry @ Index */ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { @@ -826,11 +867,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) } tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -859,11 +897,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -982,6 +1016,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, int32_t rt, rd, copz, sel, co_bit, op; uint32_t pc = vcpu->arch.pc; unsigned long curr_pc; + int cpu, i; /* * Update PC and hold onto current PC in case there is @@ -1089,8 +1124,16 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, vcpu->arch.gprs[rt] & ASID_MASK); + preempt_disable(); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); + cpu = smp_processor_id(); + for_each_possible_cpu(i) + if (i != cpu) { + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, vcpu->arch.gprs[rt]); @@ -1430,6 +1473,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; + unsigned long curr_pc; int32_t op, base, rt, offset; uint32_t bytes; @@ -1438,7 +1482,18 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, offset = inst & 0xffff; op = (inst >> 26) & 0x3f; - vcpu->arch.pending_load_cause = cause; + /* + * Find the resume PC now while we have safe and easy access to the + * prior branch instruction, and save it for + * kvm_mips_complete_mmio_load() to restore later. + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + vcpu->arch.io_pc = vcpu->arch.pc; + vcpu->arch.pc = curr_pc; + vcpu->arch.io_gpr = rt; switch (op) { @@ -2418,9 +2473,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, goto done; } - er = update_pc(vcpu, vcpu->arch.pending_load_cause); - if (er == EMULATE_FAIL) - return er; + /* Restore saved resume PC */ + vcpu->arch.pc = vcpu->arch.io_pc; switch (run->mmio.len) { case 4: @@ -2442,11 +2496,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, break; } - if (vcpu->arch.pending_load_cause & CAUSEF_BD) - kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", - vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, - vcpu->mmio_needed); - done: return er; } diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 7a7ed9ca01bbcb86fe2b0cb7265bcfe888e3b011..eff71c75dc2703b4dacf3691d85d785967f3f913 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -152,7 +152,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); - if (kvm_mips_is_error_pfn(pfn)) { + if (is_error_noslot_pfn(pfn)) { kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); err = -EFAULT; goto out; diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index 4740c82fb97a14e95118d05590b66b3192e9f1f5..36b09b2ea9729144c19ebd69a734b28a8ad2d45d 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -39,6 +39,9 @@ #include #endif +#define ROCIT_CONFIG_GEN0 0x1f403000 +#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) + extern void malta_be_init(void); extern int malta_be_handler(struct pt_regs *regs, int is_fixup); @@ -107,6 +110,8 @@ static void __init fd_activate(void) static int __init plat_enable_iocoherency(void) { int supported = 0; + u32 cfg; + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; @@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void) } else if (mips_cm_numiocu() != 0) { /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); - if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { + cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); + if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); return 0; } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 090393aa0f20d576a3bc87f64ed672998da828b4..6c7d78546eeeb9248e6ba8a007dcbc1d15a983ca 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -75,7 +75,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o) $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi) $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi) -$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi) +$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi) $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 537278746a1534cead71e487aea7f13533e60b97..4af43d9ba4950dc7ec7d04051d276f66d35ceeac 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; }; "2:\n" \ " .section .fixup,\"ax\"\n" \ "3:\n\t" \ + " mov 0,%1\n" \ " mov %3,%0\n" \ " jmp 2b\n" \ " .previous\n" \ diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c index 7826e6c364e74f0075e43081e5db367062baef30..ce8899e5e171370ebc88c08a91727ef9232febf9 100644 --- a/arch/mn10300/lib/usercopy.c +++ b/arch/mn10300/lib/usercopy.c @@ -9,7 +9,7 @@ * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ -#include +#include unsigned long __generic_copy_to_user(void *to, const void *from, unsigned long n) @@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) __copy_user_zeroing(to, from, n); + else + memset(to, 0, n); return n; } diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index caa51ff85a3c7d0ac7c57cf034758185aad64724..0ab82324c8174559ddd478aa0a2dd171b652b8c0 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n); static inline long copy_from_user(void *to, const void __user *from, unsigned long n) { - if (!access_ok(VERIFY_READ, from, n)) - return n; - return __copy_from_user(to, from, n); + unsigned long res = n; + if (access_ok(VERIFY_READ, from, n)) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } static inline long copy_to_user(void __user *to, const void *from, @@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n); #define __get_user_unknown(val, size, ptr, err) do { \ err = 0; \ - if (copy_from_user(&(val), ptr, size)) { \ + if (__copy_from_user(&(val), ptr, size)) { \ err = -EFAULT; \ } \ } while (0) @@ -166,7 +169,7 @@ do { \ ({ \ long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - unsigned long __gu_val; \ + unsigned long __gu_val = 0; \ __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\ (x) = (__force __typeof__(x))__gu_val; \ __gu_err; \ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index a6bd07ca3d6c08dc49af6ab4738fe85e1ddcd485..5cc6b4f1b79516a7a5882886b59884d03529df6d 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size); static inline unsigned long copy_from_user(void *to, const void *from, unsigned long n) { - unsigned long over; - - if (access_ok(VERIFY_READ, from, n)) - return __copy_tofrom_user(to, from, n); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + n - TASK_SIZE; - return __copy_tofrom_user(to, from, n - over) + over; - } - return n; + unsigned long res = n; + + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_tofrom_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } static inline unsigned long copy_to_user(void *to, const void *from, unsigned long n) { - unsigned long over; - - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_tofrom_user(to, from, n); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + n - TASK_SIZE; - return __copy_tofrom_user(to, from, n - over) + over; - } + if (likely(access_ok(VERIFY_WRITE, to, n))) + n = __copy_tofrom_user(to, from, n); return n; } @@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size); static inline __must_check unsigned long clear_user(void *addr, unsigned long size) { - - if (access_ok(VERIFY_WRITE, addr, size)) - return __clear_user(addr, size); - if ((unsigned long)addr < TASK_SIZE) { - unsigned long over = (unsigned long)addr + size - TASK_SIZE; - return __clear_user(addr, size - over) + over; - } + if (likely(access_ok(VERIFY_WRITE, addr, size))) + size = __clear_user(addr, size); return size; } diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 291cee28ccb60da84576ba1d9e103a0dc6e78ba5..c2c43f71468498828f924ebc54b9aa3f189ba95e 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) /* This is the size of the initially mapped kernel memory */ -#ifdef CONFIG_64BIT -#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ +#if defined(CONFIG_64BIT) +#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */ #else -#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ +#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ #endif #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 6f893d29f1b21625aadc4464d06ffba30b7697f3..30eed2d6d8a8d7d2225b28b2e33f75c2dd0fbf52 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -10,6 +10,7 @@ #include #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -246,13 +247,14 @@ static inline unsigned long __must_check copy_from_user(void *to, unsigned long n) { int sz = __compiletime_object_size(to); - int ret = -EFAULT; + unsigned long ret = n; if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) ret = __copy_from_user(to, from, n); else copy_from_user_overflow(); - + if (unlikely(ret)) + memset(to + (n - ret), 0, ret); return ret; } diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index cda6dbbe98426c4bd77a7c0ff31f0525b06fc2ee..fd5979f28ada3bdf272621e8a41ad99208386f9b 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -351,6 +351,7 @@ void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size, start; + unsigned long threshold; alltime = mfctl(16); flush_data_cache(); @@ -364,17 +365,12 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - /* Racy, but if we see an intermediate value, it's ok too... */ - parisc_cache_flush_threshold = size * alltime / rangetime; - - parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); - if (!parisc_cache_flush_threshold) - parisc_cache_flush_threshold = FLUSH_THRESHOLD; - - if (parisc_cache_flush_threshold > cache_info.dc_size) - parisc_cache_flush_threshold = cache_info.dc_size; - - printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", + threshold = L1_CACHE_ALIGN(size * alltime / rangetime); + if (threshold > cache_info.dc_size) + threshold = cache_info.dc_size; + if (threshold) + parisc_cache_flush_threshold = threshold; + printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", parisc_cache_flush_threshold/1024); /* calculate TLB flush threshold */ @@ -383,7 +379,7 @@ void __init parisc_setup_cache_timing(void) flush_tlb_all(); alltime = mfctl(16) - alltime; - size = PAGE_SIZE; + size = 0; start = (unsigned long) _text; rangetime = mfctl(16); while (start < (unsigned long) _end) { @@ -396,13 +392,10 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - parisc_tlb_flush_threshold = size * alltime / rangetime; - parisc_tlb_flush_threshold *= num_online_cpus(); - parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); - if (!parisc_tlb_flush_threshold) - parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; - - printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", + threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); + if (threshold) + parisc_tlb_flush_threshold = threshold; + printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); } diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index b743a80eaba0311e934eda70c76e7acbf8ae6a78..6755219192295cc366d2259d0fc8fe8ff88ab76d 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */ fitmanymiddle: /* Loop if LOOP >= 2 */ addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ - pitlbe 0(%sr1, %r28) + pitlbe %r0(%sr1, %r28) pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ copy %arg3, %r31 /* Re-init inner loop count */ @@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */ fdtmanymiddle: /* Loop if LOOP >= 2 */ addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ - pdtlbe 0(%sr1, %r28) + pdtlbe %r0(%sr1, %r28) pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ copy %arg3, %r31 /* Re-init inner loop count */ @@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm) /* Purge any old translations */ #ifdef CONFIG_PA20 - pdtlb,l 0(%r28) - pdtlb,l 0(%r29) + pdtlb,l %r0(%r28) + pdtlb,l %r0(%r29) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r28) - pdtlb 0(%r29) + pdtlb %r0(%r28) + pdtlb %r0(%r29) tlb_unlock %r20,%r21,%r22 #endif @@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm) /* Purge any old translation */ #ifdef CONFIG_PA20 - pdtlb,l 0(%r28) + pdtlb,l %r0(%r28) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r28) + pdtlb %r0(%r28) tlb_unlock %r20,%r21,%r22 #endif @@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm) /* Purge any old translation */ #ifdef CONFIG_PA20 - pdtlb,l 0(%r28) + pdtlb,l %r0(%r28) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r28) + pdtlb %r0(%r28) tlb_unlock %r20,%r21,%r22 #endif @@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm) sync #ifdef CONFIG_PA20 - pdtlb,l 0(%r25) + pdtlb,l %r0(%r25) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r25) + pdtlb %r0(%r25) tlb_unlock %r20,%r21,%r22 #endif @@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm) depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ #endif - /* Purge any old translation */ + /* Purge any old translation. Note that the FIC instruction + * may use either the instruction or data TLB. Given that we + * have a flat address space, it's not clear which TLB will be + * used. So, we purge both entries. */ #ifdef CONFIG_PA20 + pdtlb,l %r0(%r28) pitlb,l %r0(%sr4,%r28) #else tlb_lock %r20,%r21,%r22 - pitlb (%sr4,%r28) + pdtlb %r0(%r28) + pitlb %r0(%sr4,%r28) tlb_unlock %r20,%r21,%r22 #endif @@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm) sync #ifdef CONFIG_PA20 + pdtlb,l %r0(%r28) pitlb,l %r0(%sr4,%r25) #else tlb_lock %r20,%r21,%r22 - pitlb (%sr4,%r25) + pdtlb %r0(%r28) + pitlb %r0(%sr4,%r25) tlb_unlock %r20,%r21,%r22 #endif diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index b9402c9b34545e81c42b894cd9e1532bac66fccf..af0d7fae7aa7c61582b9dc3b59a48842e5df8395 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte, if (!pte_none(*pte)) printk(KERN_ERR "map_pte_uncached: page already exists\n"); - set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); purge_tlb_start(flags); + set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); pdtlb_kernel(orig_vaddr); purge_tlb_end(flags); vaddr += PAGE_SIZE; diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index f7ea626e29c9b3bce80d28062cbf802ab2313785..2e66a887788e8781bf76b13cbd3fed02e6979259 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include /* for pa7300lc_init() proto */ @@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p) #endif printk(KERN_CONT ".\n"); + /* + * Check if initial kernel page mappings are sufficient. + * panic early if not, else we may access kernel functions + * and variables which can't be reached. + */ + if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE) + panic("KERNEL_INITIAL_ORDER too small!"); pdc_console_init(); @@ -326,6 +334,10 @@ static int __init parisc_init(void) /* tell PDC we're Linux. Nevermind failure. */ pdc_stable_write(0x40, &osid, sizeof(osid)); + /* start with known state */ + flush_cache_all_local(); + flush_tlb_all_local(NULL); + processor_init(); #ifdef CONFIG_SMP pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index fbafa0d0e2bf865db726d15f9c4beb4817afe86c..a86b19fccb63bada4d94cc6e4615c19b919743c7 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -106,8 +106,6 @@ linux_gateway_entry: mtsp %r0,%sr4 /* get kernel space into sr4 */ mtsp %r0,%sr5 /* get kernel space into sr5 */ mtsp %r0,%sr6 /* get kernel space into sr6 */ - mfsp %sr7,%r1 /* save user sr7 */ - mtsp %r1,%sr3 /* and store it in sr3 */ #ifdef CONFIG_64BIT /* for now we can *always* set the W bit on entry to the syscall @@ -133,6 +131,14 @@ linux_gateway_entry: depdi 0, 31, 32, %r21 1: #endif + + /* We use a rsm/ssm pair to prevent sr3 from being clobbered + * by external interrupts. + */ + mfsp %sr7,%r1 /* save user sr7 */ + rsm PSW_SM_I, %r0 /* disable interrupts */ + mtsp %r1,%sr3 /* and store it in sr3 */ + mfctl %cr30,%r1 xor %r1,%r30,%r30 /* ye olde xor trick */ xor %r1,%r30,%r1 @@ -147,6 +153,7 @@ linux_gateway_entry: */ mtsp %r0,%sr7 /* get kernel space into sr7 */ + ssm PSW_SM_I, %r0 /* enable interrupts */ STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ mfctl %cr30,%r1 /* get task ptr in %r1 */ LDREG TI_TASK(%r1),%r1 diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 308f29081d461b0a9b0a0d8eec0d54e0a1936ff3..60771df10fdea4e7ae3a45646a1615fe02823082 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -88,8 +88,9 @@ SECTIONS /* Start of data section */ _sdata = .; - RO_DATA_SECTION(8) - + /* Architecturally we need to keep __gp below 0x1000000 and thus + * in front of RO_DATA_SECTION() which stores lots of tracepoint + * and ftrace symbols. */ #ifdef CONFIG_64BIT . = ALIGN(16); /* Linkage tables */ @@ -104,6 +105,8 @@ SECTIONS } #endif + RO_DATA_SECTION(8) + /* unwind info */ .PARISC.unwind : { __start___unwind = .; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 070fa85520517943087f8bdc237aa9fefa29bdb7..627d129d7fcbb299b341fe6df5a3e269bcff8358 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -708,6 +708,7 @@ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 #define SPRN_MMCR2 785 +#define SPRN_UMMCR2 769 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index b39a6937005776470de5c6ee3b4b3a26d4a954cc..db71448b9bb92455cb6b93343b2604f5eb78ebdb 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -323,40 +323,21 @@ extern unsigned long __copy_tofrom_user(void __user *to, static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - unsigned long over; - - if (access_ok(VERIFY_READ, from, n)) { - if (!__builtin_constant_p(n)) - check_object_size(to, n, false); + if (likely(access_ok(VERIFY_READ, from, n))) { + check_object_size(to, n, false); return __copy_tofrom_user((__force void __user *)to, from, n); } - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + n - TASK_SIZE; - if (!__builtin_constant_p(n - over)) - check_object_size(to, n - over, false); - return __copy_tofrom_user((__force void __user *)to, from, - n - over) + over; - } + memset(to, 0, n); return n; } static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - unsigned long over; - if (access_ok(VERIFY_WRITE, to, n)) { - if (!__builtin_constant_p(n)) - check_object_size(from, n, true); + check_object_size(from, n, true); return __copy_tofrom_user(to, (__force void __user *)from, n); } - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + n - TASK_SIZE; - if (!__builtin_constant_p(n)) - check_object_size(from, n - over, true); - return __copy_tofrom_user(to, (__force void __user *)from, - n - over) + over; - } return n; } @@ -398,8 +379,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to, return 0; } - if (!__builtin_constant_p(n)) - check_object_size(to, n, false); + check_object_size(to, n, false); return __copy_tofrom_user((__force void __user *)to, from, n); } @@ -427,8 +407,8 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, if (ret == 0) return 0; } - if (!__builtin_constant_p(n)) - check_object_size(from, n, true); + + check_object_size(from, n, true); return __copy_tofrom_user(to, (__force const void __user *)from, n); } @@ -454,10 +434,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_fault(); if (likely(access_ok(VERIFY_WRITE, addr, size))) return __clear_user(addr, size); - if ((unsigned long)addr < TASK_SIZE) { - unsigned long over = (unsigned long)addr + size - TASK_SIZE; - return __clear_user(addr, size - over) + over; - } return size; } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 247a0dc012f10795a82b059274b4e7e1b8969ae3..c07bfb52275e27e307356cf58e48e0ed0db27c72 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -909,6 +909,14 @@ static void eeh_handle_special_event(void) /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); bus = eeh_pe_bus_get(phb_pe); + if (!bus) { + pr_err("%s: Cannot find PCI bus for " + "PHB#%d-PE#%x\n", + __func__, + pe->phb->global_number, + pe->addr); + break; + } eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); pcibios_remove_pci_devices(bus); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index a94f155db78e83d67fca2e967ba43585f18aeda8..edba294620db292a662cd04520e328802c5020c6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -334,13 +334,13 @@ syscall_exit_work: tabort_syscall: /* Firstly we need to enable TM in the kernel */ mfmsr r10 - li r13, 1 - rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG + li r9, 1 + rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG mtmsrd r10, 0 /* tabort, this dooms the transaction, nothing else */ - li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) - TABORT(R13) + li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) + TABORT(R9) /* * Return directly to userspace. We have corrupted user register state, @@ -348,8 +348,8 @@ tabort_syscall: * resume after the tbegin of the aborted transaction with the * checkpointed register state. */ - li r13, MSR_RI - andc r10, r10, r13 + li r9, MSR_RI + andc r10, r10, r9 mtmsrd r10, 1 mtspr SPRN_SRR0, r11 mtspr SPRN_SRR1, r12 diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 32e26526f7e4e0fc5d22adda109bb075a044ae70..1eb698f653b4c0e060963f1d8308c359cd2f4a06 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -969,7 +969,7 @@ int __init nvram_remove_partition(const char *name, int sig, /* Make partition a free partition */ part->header.signature = NVRAM_SIG_FREE; - strncpy(part->header.name, "wwwwwwwwwwww", 12); + memset(part->header.name, 'w', 12); part->header.checksum = nvram_checksum(&part->header); rc = nvram_write_header(part); if (rc <= 0) { @@ -987,8 +987,8 @@ int __init nvram_remove_partition(const char *name, int sig, } if (prev) { prev->header.length += part->header.length; - prev->header.checksum = nvram_checksum(&part->header); - rc = nvram_write_header(part); + prev->header.checksum = nvram_checksum(&prev->header); + rc = nvram_write_header(prev); if (rc <= 0) { printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); return rc; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index b7e86e00048fafe86f3a6122fb3e8959c7ec187f..7b89e7b305e6b15fb51cfe5cf601dec100e83b7a 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -694,7 +694,7 @@ unsigned char ibm_architecture_vec[] = { OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ /* option vector 5: PAPR/OF options */ - VECTOR_LENGTH(18), /* length */ + VECTOR_LENGTH(21), /* length */ 0, /* don't ignore, don't halt */ OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | @@ -725,8 +725,11 @@ unsigned char ibm_architecture_vec[] = { 0, 0, OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | - OV5_FEAT(OV5_PFO_HW_842), - OV5_FEAT(OV5_SUB_PROCESSORS), + OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */ + 0, /* Byte 18 */ + 0, /* Byte 19 */ + 0, /* Byte 20 */ + OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */ /* option vector 6: IBM PAPR hints */ VECTOR_LENGTH(3), /* length */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 737c0d0b53ac43dcdc41f09b0a3177cf8b4e389b..b38fd081b22235500b4b43cad1e11da9ea294e91 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -376,7 +376,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, #else BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != - offsetof(struct thread_fp_state, fpr[32][0])); + offsetof(struct thread_fp_state, fpr[32])); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fp_state, 0, -1); @@ -404,7 +404,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, return 0; #else BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != - offsetof(struct thread_fp_state, fpr[32][0])); + offsetof(struct thread_fp_state, fpr[32])); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fp_state, 0, -1); diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index 2f01c4a0d8a037ca65cacd9af51022846661fcb0..7612eeb31da1483dc63dcce312a0a9ebfd113b25 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) bl V_LOCAL_FUNC(__get_datapage) mtlr r12 addi r3,r3,CFG_SYSCALL_MAP64 - cmpli cr0,r4,0 + cmpldi cr0,r4,0 crclr cr0*4+so beqlr li r0,__NR_syscalls diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index a76b4af37ef297cdac6688785ed65d55d673bc66..3820213248836474c3db105f0a4fe2d0fad3d0d7 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) bne cr0,99f li r3,0 - cmpli cr0,r4,0 + cmpldi cr0,r4,0 crclr cr0*4+so beqlr lis r5,CLOCK_REALTIME_RES@h diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 2afdb9c0937dbd3b7471b88d75e17ca714bba1ae..729f8faa95c5794d270f4edffe375bdcf934a4da 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -498,6 +498,7 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: + case SPRN_UMMCR2: #endif break; unprivileged: @@ -640,6 +641,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: + case SPRN_UMMCR2: case SPRN_TIR: #endif *spr_val = 0; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index fd5875179e5c0e6738a1e7867a75f05cbe1fa0c8..6d63cd67b09b689a9cc2322613bfcadf74bafd3d 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -2033,7 +2033,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, if (type == KVMPPC_DEBUG_NONE) continue; - if (type & !(KVMPPC_DEBUG_WATCH_READ | + if (type & ~(KVMPPC_DEBUG_WATCH_READ | KVMPPC_DEBUG_WATCH_WRITE | KVMPPC_DEBUG_BREAKPOINT)) return -EINVAL; diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index f09899e35991711d0a57e74519af662b59e59f15..7b22624f332ce5e45dc5337d2d97b0c31c36a764 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) addi r3,r3,8 171: 177: +179: addi r3,r3,8 370: 372: @@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 173: 174: 175: -179: 181: 184: 186: diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 6527882ce05ede3a0a45f74d3a11c4c375da6514..ddfd2740a1b5d3cd643207fd5250a3515e810987 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) switch (REGION_ID(ea)) { case USER_REGION_ID: pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); + if (mm == NULL) + return 1; psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); vsid = get_vsid(mm->context.id, ea, ssize); diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 736d18b3cefd3bd16e2d9ab7c34b56bb7f5e8a61..4c48b487698cf4208618647d8038cc1a9d72e3aa 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -113,7 +113,12 @@ BEGIN_FTR_SECTION END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T -0: +0: /* + * For userspace addresses, make sure this is region 0. + */ + cmpdi r9, 0 + bne 8f + /* when using slices, we extract the psize off the slice bitmaps * and then we need to get the sllp encoding off the mmu_psize_defs * array. diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 2ba602591a20412c95cd470c5b6fb129872c3f94..92736851c795afac23926700014a72ac0e6c44d1 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -956,6 +956,11 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) } bus = eeh_pe_bus_get(pe); + if (!bus) { + pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", + __func__, pe->phb->global_number, pe->addr); + return -EIO; + } if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent)) ret = pnv_eeh_root_reset(hose, option); @@ -1163,7 +1168,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) return; } - switch (data->type) { + switch (be16_to_cpu(data->type)) { case OPAL_P7IOC_DIAG_TYPE_RGC: pr_info("P7IOC diag-data for RGC\n\n"); pnv_eeh_dump_hub_diag_common(data); @@ -1395,7 +1400,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) /* Try best to clear it */ opal_pci_eeh_freeze_clear(phb->opal_id, - frozen_pe_no, + be64_to_cpu(frozen_pe_no), OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); ret = EEH_NEXT_ERR_NONE; } else if ((*pe)->state & EEH_PE_ISOLATED || diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 2ee96431f7360e1d8d63ece45520416860c62428..4c827826c05eb13b09f4e0cd9f9c73d6967ec9a9 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data) uint32_t dump_id, dump_size, dump_type; struct dump_obj *dump; char name[22]; + struct kobject *kobj; rc = dump_read_info(&dump_id, &dump_size, &dump_type); if (rc != OPAL_SUCCESS) @@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data) * that gracefully and not create two conflicting * entries. */ - if (kset_find_obj(dump_kset, name)) + kobj = kset_find_obj(dump_kset, name); + if (kobj) { + /* Drop reference added by kset_find_obj() */ + kobject_put(kobj); return 0; + } dump = create_dump_obj(dump_id, dump_size, dump_type); if (!dump) diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 37f959bf392e72a8a4bc7dee92181846b1383458..f2344cbd2f464cb93a3afbcbf21dc8263069b555 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data) uint64_t elog_type; int rc; char name[2+16+1]; + struct kobject *kobj; rc = opal_get_elog_size(&id, &size, &type); if (rc != OPAL_SUCCESS) { @@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data) * that gracefully and not create two conflicting * entries. */ - if (kset_find_obj(elog_kset, name)) + kobj = kset_find_obj(elog_kset, name); + if (kobj) { + /* Drop reference added by kset_find_obj() */ + kobject_put(kobj); return IRQ_HANDLED; + } create_elog_obj(log_id, elog_size, elog_type); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index ad8c3f4a5e0bec7a839b624ab99c5238f472f18f..dd5e0f3b1b5d00c8772003787d68ba030df53cea 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -197,8 +197,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, be64_to_cpu(data->dma1ErrorLog1)); for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { - if ((data->pestA[i] >> 63) == 0 && - (data->pestB[i] >> 63) == 0) + if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && + (be64_to_cpu(data->pestB[i]) >> 63) == 0) continue; pr_info("PE[%3d] A/B: %016llx %016llx\n", diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index b7a67e3d2201e4d5988e9cb3df651651c12024c8..3ae43282460e34edea9f26d054b4622f9b4e69bd 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -406,7 +406,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, unsigned long *vpn, int count, int psize, int ssize) { - unsigned long param[8]; + unsigned long param[PLPAR_HCALL9_BUFSIZE]; int i = 0, pix = 0, rc; unsigned long flags = 0; int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); @@ -523,7 +523,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) unsigned long flags = 0; struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); - unsigned long param[9]; + unsigned long param[PLPAR_HCALL9_BUFSIZE]; unsigned long hash, index, shift, hidx, slot; real_pte_t pte; int psize, ssize; diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 045035796ca7d59f196740078b4ae4c0f0243717..b63b9a42af70e3759113f315e955ff8f7fc2d783 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -525,11 +525,11 @@ static int diag224(void *ptr) static int diag224_get_name_table(void) { /* memory must be below 2GB */ - diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); + diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); if (!diag224_cpu_names) return -ENOMEM; if (diag224(diag224_cpu_names)) { - kfree(diag224_cpu_names); + free_page((unsigned long) diag224_cpu_names); return -EOPNOTSUPP; } EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); @@ -538,7 +538,7 @@ static int diag224_get_name_table(void) static void diag224_delete_name_table(void) { - kfree(diag224_cpu_names); + free_page((unsigned long) diag224_cpu_names); } static int diag224_idx2name(int index, char *name) diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index a2e6ef32e05445b190b444cb249db44f638e10d2..0a2031618f7fc619326c3d3e69f20dd71802c4dd 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm) } /* - * Flush TLB entries for a specific ASCE on all CPUs. + * Flush TLB entries for a specific ASCE on all CPUs. Should never be used + * when more than one asce (e.g. gmap) ran on this mm. */ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) { diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 9dd4cc47ddc79298886fe40a5e229628b2ceff3d..5c7381c5ad7f8a93082497c2776bcb66287efe60 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -215,28 +215,28 @@ int __put_user_bad(void) __attribute__((noreturn)); __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ - unsigned char __x; \ + unsigned char __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ - unsigned short __x; \ + unsigned short __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ - unsigned int __x; \ + unsigned int __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ - unsigned long long __x; \ + unsigned long long __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 471a370a527bfd9eb5e9bd03e98a9e6c7eff4b96..8345ae1f117d0cfdf4cfcadc5a01c0ec4bfd7174 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc); static void gmap_flush_tlb(struct gmap *gmap) { if (MACHINE_HAS_IDTE) - __tlb_flush_asce(gmap->mm, gmap->asce); + __tlb_flush_idte(gmap->asce); else __tlb_flush_global(); } @@ -205,7 +205,7 @@ void gmap_free(struct gmap *gmap) /* Flush tlb. */ if (MACHINE_HAS_IDTE) - __tlb_flush_asce(gmap->mm, gmap->asce); + __tlb_flush_idte(gmap->asce); else __tlb_flush_global(); diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index 20a3591225ccea9aa3d93acc11a97a34bf254501..01aec8ccde834119b0cf893c6d8ff4b81a41cb20 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -163,7 +163,7 @@ do { \ __get_user_asm(val, "lw", ptr); \ break; \ case 8: \ - if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ + if (__copy_from_user((void *)&val, ptr, 8) == 0) \ __gu_err = 0; \ else \ __gu_err = -EFAULT; \ @@ -188,6 +188,8 @@ do { \ \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ __get_user_common((x), size, __gu_ptr); \ + else \ + (x) = 0; \ \ __gu_err; \ }) @@ -201,6 +203,7 @@ do { \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:li %0, %4\n" \ + "li %1, 0\n" \ "j 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ @@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); static inline unsigned long copy_from_user(void *to, const void *from, unsigned long len) { - unsigned long over; + unsigned long res = len; - if (access_ok(VERIFY_READ, from, len)) - return __copy_tofrom_user(to, from, len); + if (likely(access_ok(VERIFY_READ, from, len))) + res = __copy_tofrom_user(to, from, len); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + len - TASK_SIZE; - return __copy_tofrom_user(to, from, len - over) + over; - } - return len; + if (unlikely(res)) + memset(to + (len - res), 0, res); + + return res; } static inline unsigned long copy_to_user(void *to, const void *from, unsigned long len) { - unsigned long over; - - if (access_ok(VERIFY_WRITE, to, len)) - return __copy_tofrom_user(to, from, len); + if (likely(access_ok(VERIFY_WRITE, to, len))) + len = __copy_tofrom_user(to, from, len); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + len - TASK_SIZE; - return __copy_tofrom_user(to, from, len - over) + over; - } return len; } -#define __copy_from_user(to, from, len) \ - __copy_tofrom_user((to), (from), (len)) +static inline unsigned long +__copy_from_user(void *to, const void *from, unsigned long len) +{ + unsigned long left = __copy_tofrom_user(to, from, len); + if (unlikely(left)) + memset(to + (len - left), 0, left); + return left; +} #define __copy_to_user(to, from, len) \ __copy_tofrom_user((to), (from), (len)) @@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len) static inline unsigned long __copy_from_user_inatomic(void *to, const void *from, unsigned long len) { - return __copy_from_user(to, from, len); + return __copy_tofrom_user(to, from, len); } -#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) +#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len) static inline unsigned long copy_in_user(void *to, const void *from, unsigned long len) { if (access_ok(VERIFY_READ, from, len) && access_ok(VERFITY_WRITE, to, len)) - return copy_from_user(to, from, len); + return __copy_tofrom_user(to, from, len); } /* diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index a49635c512665ddc6b9e70e5d944c57e26d349d6..92ade79ac4272ea22657b3aac6adfb90b1d4105f 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) __kernel_size_t __copy_size = (__kernel_size_t) n; if (__copy_size && __access_ok(__copy_from, __copy_size)) - return __copy_user(to, from, __copy_size); + __copy_size = __copy_user(to, from, __copy_size); + + if (unlikely(__copy_size)) + memset(to + (n - __copy_size), 0, __copy_size); return __copy_size; } diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index c01376c76b868727b55bd117551cc1b96b93d8f1..ca5073dd459674d1ad835406fc46baa1957d01ac 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -24,6 +24,7 @@ #define __get_user_size(x,ptr,size,retval) \ do { \ retval = 0; \ + x = 0; \ switch (size) { \ case 1: \ retval = __get_user_asm_b((void *)&x, \ diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 70067ce184b16a9d91de2737bf804dbb89e94307..f7de0dbc38af2dd36c9f34df53e6e951f6729825 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -92,7 +92,8 @@ struct tsb_config { typedef struct { spinlock_t lock; unsigned long sparc64_ctx_val; - unsigned long huge_pte_count; + unsigned long hugetlb_pte_count; + unsigned long thp_pte_count; struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; } mm_context_t; diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 4cfb77913cd2ef388c9c9bc316fc1eb014e7d2ca..e7d6bb4cd619b362aa8fd84c71ecc3d7a18b9eb3 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -314,8 +314,7 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { if (n && __access_ok((unsigned long) to, n)) { - if (!__builtin_constant_p(n)) - check_object_size(from, n, true); + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); } else return n; @@ -323,19 +322,19 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { - if (!__builtin_constant_p(n)) - check_object_size(from, n, true); + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); } static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { if (n && __access_ok((unsigned long) from, n)) { - if (!__builtin_constant_p(n)) - check_object_size(to, n, false); + check_object_size(to, n, false); return __copy_user((__force void __user *) to, from, n); - } else + else { + memset(to, 0, n); return n; + } } static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 6069e9040388330d81692d9cccfb529f1fbf8986..a6847fc05a6dc7fbb33d6ebcddb7bd7e116b37cc 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -98,7 +98,6 @@ struct exception_table_entry { unsigned int insn, fixup; }; -void __ret_efault(void); void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. @@ -179,20 +178,6 @@ int __put_user_bad(void); __gu_ret; \ }) -#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ - register unsigned long __gu_val __asm__ ("l1"); \ - switch (size) { \ - case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ - case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ - case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \ - case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \ - default: \ - if (__get_user_bad()) \ - return retval; \ - } \ - data = (__force type) __gu_val; \ -}) - #define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ "/* Get user asm, inline. */\n" \ @@ -214,87 +199,39 @@ __asm__ __volatile__( \ : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ "i" (-EFAULT)) -#define __get_user_asm_ret(x, size, addr, retval) \ -if (__builtin_constant_p(retval) && retval == -EFAULT) \ - __asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ - "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr))); \ -else \ - __asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ - "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ - "3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr)), "i" (retval)) - int __get_user_bad(void); unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long size); -unsigned long copy_from_user_fixup(void *to, const void __user *from, - unsigned long size); static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret; + check_object_size(to, size, false); - if (!__builtin_constant_p(size)) - check_object_size(to, size, false); - - ret = ___copy_from_user(to, from, size); - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); - - return ret; + return ___copy_from_user(to, from, size); } #define __copy_from_user copy_from_user unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long size); -unsigned long copy_to_user_fixup(void __user *to, const void *from, - unsigned long size); static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { - unsigned long ret; - - if (!__builtin_constant_p(size)) - check_object_size(from, size, true); - ret = ___copy_to_user(to, from, size); - if (unlikely(ret)) - ret = copy_to_user_fixup(to, from, size); - return ret; + check_object_size(from, size, true); + + return ___copy_to_user(to, from, size); } #define __copy_to_user copy_to_user unsigned long __must_check ___copy_in_user(void __user *to, const void __user *from, unsigned long size); -unsigned long copy_in_user_fixup(void __user *to, void __user *from, - unsigned long size); static inline unsigned long __must_check copy_in_user(void __user *to, void __user *from, unsigned long size) { - unsigned long ret = ___copy_in_user(to, from, size); - - if (unlikely(ret)) - ret = copy_in_user_fixup(to, from, size); - return ret; + return ___copy_in_user(to, from, size); } #define __copy_in_user copy_in_user diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S index d668ca149e647ac832f88b3602134768d712ef80..4087a62f96b099f3b2d9e46b1b73ff49220da718 100644 --- a/arch/sparc/kernel/dtlb_prot.S +++ b/arch/sparc/kernel/dtlb_prot.S @@ -25,13 +25,13 @@ /* PROT ** ICACHE line 2: More real fault processing */ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 + srlx %g5, PAGE_SHIFT, %g5 + sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault nop nop - nop - nop /* PROT ** ICACHE line 3: Unused... */ nop diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 51faf92ace001c38cad6db2ecfd7e762363fe23f..7eeeb1d5a41015968b2534ef30c92f79e9ce4d28 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -922,47 +922,11 @@ prom_tba: .xword 0 tlb_type: .word 0 /* Must NOT end up in BSS */ .section ".fixup",#alloc,#execinstr - .globl __ret_efault, __retl_efault, __ret_one, __retl_one -ENTRY(__ret_efault) - ret - restore %g0, -EFAULT, %o0 -ENDPROC(__ret_efault) - ENTRY(__retl_efault) retl mov -EFAULT, %o0 ENDPROC(__retl_efault) -ENTRY(__retl_one) - retl - mov 1, %o0 -ENDPROC(__retl_one) - -ENTRY(__retl_one_fp) - VISExitHalf - retl - mov 1, %o0 -ENDPROC(__retl_one_fp) - -ENTRY(__ret_one_asi) - wr %g0, ASI_AIUS, %asi - ret - restore %g0, 1, %o0 -ENDPROC(__ret_one_asi) - -ENTRY(__retl_one_asi) - wr %g0, ASI_AIUS, %asi - retl - mov 1, %o0 -ENDPROC(__retl_one_asi) - -ENTRY(__retl_one_asi_fp) - wr %g0, ASI_AIUS, %asi - VISExitHalf - retl - mov 1, %o0 -ENDPROC(__retl_one_asi_fp) - ENTRY(__retl_o1) retl mov %o1, %o0 diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 59bbeff550243dc10ea558a013d09825303e4793..07933b9e9ce00a34fc3677ee614a24ce2c0532a5 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -13,19 +13,30 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - u32 val; u32 *insn = (u32 *) (unsigned long) entry->code; + u32 val; if (type == JUMP_LABEL_JMP) { s32 off = (s32)entry->target - (s32)entry->code; + bool use_v9_branch = false; + + BUG_ON(off & 3); #ifdef CONFIG_SPARC64 - /* ba,pt %xcc, . + (off << 2) */ - val = 0x10680000 | ((u32) off >> 2); -#else - /* ba . + (off << 2) */ - val = 0x10800000 | ((u32) off >> 2); + if (off <= 0xfffff && off >= -0x100000) + use_v9_branch = true; #endif + if (use_v9_branch) { + /* WDISP19 - target is . + immed << 2 */ + /* ba,pt %xcc, . + off */ + val = 0x10680000 | (((u32) off >> 2) & 0x7ffff); + } else { + /* WDISP22 - target is . + immed << 2 */ + BUG_ON(off > 0x7fffff); + BUG_ON(off < -0x800000); + /* ba . + off */ + val = 0x10800000 | (((u32) off >> 2) & 0x3fffff); + } } else { val = 0x01000000; } diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index ef0d8e9e1210e6dffbc184a8ce426c708a968062..f22bec0db64549ace3b68d645b8d23db87ec1b5b 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -20,6 +20,10 @@ kvmap_itlb: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_IMMU, %g4 + /* The kernel executes in context zero, therefore we do not + * need to clear the context ID bits out of %g4 here. + */ + /* sun4v_itlb_miss branches here with the missing virtual * address already loaded into %g4 */ @@ -128,6 +132,10 @@ kvmap_dtlb: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g4 + /* The kernel executes in context zero, therefore we do not + * need to clear the context ID bits out of %g4 here. + */ + /* sun4v_dtlb_miss branches here with the missing virtual * address already loaded into %g4 */ @@ -251,6 +259,10 @@ kvmap_dtlb_longpath: nop .previous + /* The kernel executes in context zero, therefore we do not + * need to clear the context ID bits out of %g5 here. + */ + be,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB, %g4 ba,pt %xcc, winfix_trampoline diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index c3c12efe0bc004053fea6b49c2f34e51e1ffc32f..9c0c8fd0b2922cacd2ad6ad81f3238a707286d69 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv_and_exit; if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) @@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c index a92d5d2c46a3a6553bf13a57f95f65c6d61c334c..51b25325a9617f1e168a578a3516584948905914 100644 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ b/arch/sparc/kernel/sparc_ksyms_64.c @@ -27,7 +27,6 @@ EXPORT_SYMBOL(__flushw_user); EXPORT_SYMBOL_GPL(real_hard_smp_processor_id); /* from head_64.S */ -EXPORT_SYMBOL(__ret_efault); EXPORT_SYMBOL(tlb_type); EXPORT_SYMBOL(sun4v_chip_type); EXPORT_SYMBOL(prom_root_node); diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index be98685c14c62301250db79791269fb411ea9e0a..d568c8207af72ffbd15aae8e5f41f77401ba5397 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -29,13 +29,17 @@ */ tsb_miss_dtlb: mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g4 + srlx %g4, PAGE_SHIFT, %g4 ba,pt %xcc, tsb_miss_page_table_walk - ldxa [%g4] ASI_DMMU, %g4 + sllx %g4, PAGE_SHIFT, %g4 tsb_miss_itlb: mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_IMMU, %g4 + srlx %g4, PAGE_SHIFT, %g4 ba,pt %xcc, tsb_miss_page_table_walk - ldxa [%g4] ASI_IMMU, %g4 + sllx %g4, PAGE_SHIFT, %g4 /* At this point we have: * %g1 -- PAGE_SIZE TSB entry address @@ -284,6 +288,10 @@ tsb_do_dtlb_fault: nop .previous + /* Clear context ID bits. */ + srlx %g5, PAGE_SHIFT, %g5 + sllx %g5, PAGE_SHIFT, %g5 + be,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB, %g4 ba,pt %xcc, winfix_trampoline diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S index b7d0bd6b14063bc1e62cfed3c0e32f0b43799396..69a439fa2fc1ac809a969d6651e9b9e0f9be11b9 100644 --- a/arch/sparc/lib/GENcopy_from_user.S +++ b/arch/sparc/lib/GENcopy_from_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S index 780550e1afc74fd6efe38ef16f73a07ea021a8f2..9947427ce3549799b2e0c5592b3f972e516093ed 100644 --- a/arch/sparc/lib/GENcopy_to_user.S +++ b/arch/sparc/lib/GENcopy_to_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S index 89358ee948516cf2ad84c60d1e96e021434bcd32..059ea24ad73dcd91b81ef920f3bb18a1475dceb1 100644 --- a/arch/sparc/lib/GENmemcpy.S +++ b/arch/sparc/lib/GENmemcpy.S @@ -4,21 +4,18 @@ */ #ifdef __KERNEL__ +#include #define GLOBAL_SPARE %g7 #else #define GLOBAL_SPARE %g5 #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST(x,y) x #endif #ifndef LOAD @@ -45,6 +42,29 @@ .register %g3,#scratch .text + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +ENTRY(GEN_retl_o4_1) + add %o4, %o2, %o4 + retl + add %o4, 1, %o0 +ENDPROC(GEN_retl_o4_1) +ENTRY(GEN_retl_g1_8) + add %g1, %o2, %g1 + retl + add %g1, 8, %o0 +ENDPROC(GEN_retl_g1_8) +ENTRY(GEN_retl_o2_4) + retl + add %o2, 4, %o0 +ENDPROC(GEN_retl_o2_4) +ENTRY(GEN_retl_o2_1) + retl + add %o2, 1, %o0 +ENDPROC(GEN_retl_o2_1) +#endif + .align 64 .globl FUNC_NAME @@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g0, %o4, %o4 sub %o2, %o4, %o2 1: subcc %o4, 1, %o4 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o0)) + EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1) + EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1) add %o1, 1, %o1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x7, %g1 sub %o2, %g1, %o2 1: subcc %g1, 0x8, %g1 - EX_LD(LOAD(ldx, %o1, %g2)) - EX_ST(STORE(stx, %g2, %o0)) + EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8) + EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8) add %o1, 0x8, %o1 bne,pt %XCC, 1b add %o0, 0x8, %o0 @@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 1: subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4) + EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + %o3)) + EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1) + EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 3269b0234093bfdbd6b6dcd22388cdc65a627d0a..4f2384a4286aace90bb52d573dac6d0e4fbe4867 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o -lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o +lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o obj-$(CONFIG_SPARC64) += iomap.o diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S index d5242b8c4f9495fe4241ee39de01255364e877af..b79a6998d87c82eaeb12f5c1c23ab8fcbc8b10de 100644 --- a/arch/sparc/lib/NG2copy_from_user.S +++ b/arch/sparc/lib/NG2copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S index 4e962d993b10cdff7677f8d51e61ad877901facc..dcec55f254ab214dc9aa959c4a575ed58f5a19d8 100644 --- a/arch/sparc/lib/NG2copy_to_user.S +++ b/arch/sparc/lib/NG2copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index d5f585df2f3fc345c87f04fb1a420fa766816cb5..c629dbd121b6e4fe64494c62bcad656747f05ef2 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include #include #include #define GLOBAL_SPARE %g7 @@ -32,21 +33,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST_FP(x,y) x #endif #ifndef LOAD @@ -140,45 +137,110 @@ fsrc2 %x6, %f12; \ fsrc2 %x7, %f14; #define FREG_LOAD_1(base, x0) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)) + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1) #define FREG_LOAD_2(base, x0, x1) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); #define FREG_LOAD_3(base, x0, x1, x2) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); #define FREG_LOAD_4(base, x0, x1, x2, x3) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ - EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ - EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \ - EX_LD_FP(LOAD(ldd, base + 0x30, %x6)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1); .register %g2,#scratch .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_fp: + VISExitHalf +__restore_asi: + retl + wr %g0, ASI_AIUS, %asi +ENTRY(NG2_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(NG2_retl_o2) +ENTRY(NG2_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(NG2_retl_o2_plus_1) +ENTRY(NG2_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(NG2_retl_o2_plus_4) +ENTRY(NG2_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(NG2_retl_o2_plus_8) +ENTRY(NG2_retl_o2_plus_o4_plus_1) + add %o4, 1, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_1) +ENTRY(NG2_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_8) +ENTRY(NG2_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_16) +ENTRY(NG2_retl_o2_plus_g1_fp) + ba,pt %xcc, __restore_fp + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_fp) +ENTRY(NG2_retl_o2_plus_g1_plus_64_fp) + add %g1, 64, %g1 + ba,pt %xcc, __restore_fp + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp) +ENTRY(NG2_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_plus_1) +ENTRY(NG2_retl_o2_and_7_plus_o4) + and %o2, 7, %o2 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_and_7_plus_o4) +ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8) + and %o2, 7, %o2 + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8) +#endif + .align 64 .globl FUNC_NAME @@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g0, %o4, %o4 ! bytes to align dst sub %o2, %o4, %o2 1: subcc %o4, 1, %o4 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o0)) + EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1) + EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1) add %o1, 1, %o1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ nop /* fall through for 0 < low bits < 8 */ 110: sub %o4, 64, %g2 - EX_LD_FP(LOAD_BLK(%g2, %f0)) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) + EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 120: sub %o4, 56, %g2 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 130: sub %o4, 48, %g2 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_6(f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 140: sub %o4, 40, %g2 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_5(f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 150: sub %o4, 32, %g2 FREG_LOAD_4(%g2, f0, f2, f4, f6) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_4(f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 160: sub %o4, 24, %g2 FREG_LOAD_3(%g2, f0, f2, f4) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_3(f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 170: sub %o4, 16, %g2 FREG_LOAD_2(%g2, f0, f2) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_2(f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 180: sub %o4, 8, %g2 FREG_LOAD_1(%g2, f0) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_1(f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ nop 190: -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) subcc %g1, 64, %g1 - EX_LD_FP(LOAD_BLK(%o4, %f0)) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64) add %o4, 64, %o4 bne,pt %xcc, 1b LOAD(prefetch, %o4 + 64, #one_read) @@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0xf, %o4 and %o2, 0xf, %o2 1: subcc %o4, 0x10, %o4 - EX_LD(LOAD(ldx, %o1, %o5)) + EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16) add %o1, 0x08, %o1 - EX_LD(LOAD(ldx, %o1, %g1)) + EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16) sub %o1, 0x08, %o1 - EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) + EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16) add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE)) + EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8) bgu,pt %XCC, 1b add %o1, 0x8, %o1 73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop sub %o2, 0x8, %o2 - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8) + EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8) add %o1, 0x8, %o1 1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop sub %o2, 0x4, %o2 - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4) + EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 be,pt %XCC, 85f @@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %o1, %o5)) - EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1) bgu,pt %icc, 1b add %o1, 1, %o1 @@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 8: mov 64, GLOBAL_SPARE andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) + EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2) sub GLOBAL_SPARE, %g1, GLOBAL_SPARE andn %o2, 0x7, %o4 sllx %g2, %g1, %g2 1: add %o1, 0x8, %o1 - EX_LD(LOAD(ldx, %o1, %g3)) + EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4) subcc %o4, 0x8, %o4 srlx %g3, GLOBAL_SPARE, %o5 or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 1: subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S index 2e8ee7ad07a9ce06129cd63c4129ccade674ab77..16a286c1a52836ee92b2a7d6bac66d3341b0f905 100644 --- a/arch/sparc/lib/NG4copy_from_user.S +++ b/arch/sparc/lib/NG4copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2012 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x, y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S index be0bf4590df8971ddf29a81153f05de7c0ef30da..6b0276ffc858c4777d95dffcff95f048dd340cc2 100644 --- a/arch/sparc/lib/NG4copy_to_user.S +++ b/arch/sparc/lib/NG4copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2012 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 8e13ee1f4454ea2b6478d302a9a1048bfeb60aff..75bb93b1437f7f6f29ab17c96bc0b4c322f2a373 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include #include #include #define GLOBAL_SPARE %g7 @@ -46,22 +47,19 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x +#define EX_ST_FP(x,y) x #endif -#ifndef EX_RETVAL -#define EX_RETVAL(x) x -#endif #ifndef LOAD #define LOAD(type,addr,dest) type [addr], dest @@ -94,6 +92,158 @@ .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_asi_fp: + VISExitHalf +__restore_asi: + retl + wr %g0, ASI_AIUS, %asi + +ENTRY(NG4_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(NG4_retl_o2) +ENTRY(NG4_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(NG4_retl_o2_plus_1) +ENTRY(NG4_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(NG4_retl_o2_plus_4) +ENTRY(NG4_retl_o2_plus_o5) + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5) +ENTRY(NG4_retl_o2_plus_o5_plus_4) + add %o5, 4, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_4) +ENTRY(NG4_retl_o2_plus_o5_plus_8) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_8) +ENTRY(NG4_retl_o2_plus_o5_plus_16) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_16) +ENTRY(NG4_retl_o2_plus_o5_plus_24) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_24) +ENTRY(NG4_retl_o2_plus_o5_plus_32) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_32) +ENTRY(NG4_retl_o2_plus_g1) + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1) +ENTRY(NG4_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1_plus_1) +ENTRY(NG4_retl_o2_plus_g1_plus_8) + add %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1_plus_8) +ENTRY(NG4_retl_o2_plus_o4) + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4) +ENTRY(NG4_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_8) +ENTRY(NG4_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_16) +ENTRY(NG4_retl_o2_plus_o4_plus_24) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_24) +ENTRY(NG4_retl_o2_plus_o4_plus_32) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_32) +ENTRY(NG4_retl_o2_plus_o4_plus_40) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_40) +ENTRY(NG4_retl_o2_plus_o4_plus_48) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_48) +ENTRY(NG4_retl_o2_plus_o4_plus_56) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_56) +ENTRY(NG4_retl_o2_plus_o4_plus_64) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_64) +ENTRY(NG4_retl_o2_plus_o4_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) +#endif .align 64 .globl FUNC_NAME @@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 51f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01)) + EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) LOAD(prefetch, %o1 + 0x080, #n_reads_strong) @@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, .Llarge_aligned sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) add %o1, 8, %o1 subcc %g1, 8, %g1 add %o0, 8, %o0 bne,pt %icc, 1b - EX_ST(STORE(stx, %g2, %o0 - 0x08)) + EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) .Llarge_aligned: /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ andn %o2, 0x3f, %o4 sub %o2, %o4, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) add %o1, 0x40, %o1 - EX_LD(LOAD(ldx, %o1 - 0x38, %g2)) + EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD(LOAD(ldx, %o1 - 0x30, %g3)) - EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE)) - EX_LD(LOAD(ldx, %o1 - 0x20, %o5)) - EX_ST(STORE_INIT(%g1, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) + EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0)) + EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x18, %g2)) - EX_ST(STORE_INIT(%g3, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) + EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x10, %g3)) - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE)) - EX_ST(STORE_INIT(%o5, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) + EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0)) + EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g3, %o0)) + EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b LOAD(prefetch, %o1 + 0x200, #n_reads_strong) @@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %o4, %o2 alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 - EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0)) -1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2)) + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4)) - EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6)) - EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8)) - EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10)) - EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12)) - EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14)) + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0)) + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) faligndata %f2, %f4, %f18 add %g1, 0x40, %g1 faligndata %f4, %f6, %f20 @@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE(std, %f16, %o0 + 0x00)) - EX_ST_FP(STORE(std, %f18, %o0 + 0x08)) - EX_ST_FP(STORE(std, %f20, %o0 + 0x10)) - EX_ST_FP(STORE(std, %f22, %o0 + 0x18)) - EX_ST_FP(STORE(std, %f24, %o0 + 0x20)) - EX_ST_FP(STORE(std, %f26, %o0 + 0x28)) - EX_ST_FP(STORE(std, %f28, %o0 + 0x30)) - EX_ST_FP(STORE(std, %f30, %o0 + 0x38)) + EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) @@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andncc %o2, 0x20 - 1, %o5 be,pn %icc, 2f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g2)) - EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE)) - EX_LD(LOAD(ldx, %o1 + 0x18, %o4)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 - EX_ST(STORE(stx, %g1, %o0 + 0x00)) - EX_ST(STORE(stx, %g2, %o0 + 0x08)) - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10)) - EX_ST(STORE(stx, %o4, %o0 + 0x18)) + EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) bne,pt %icc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %icc, 3f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) + +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %icc, 1b - EX_ST(STORE(stx, %g1, %o0 - 0x08)) + EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit cmp %o2, 0x04 bl,pn %icc, .Ltiny nop - EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %icc, .Ltiny - EX_ST(STORE(stw, %g1, %o0 - 0x04)) + EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) ba,a,pt %icc, .Lexit .Lmedium_unaligned: /* First get dest 8 byte aligned. */ @@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 2f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01)) + EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %g1 brz,pn %g1, .Lmedium_noprefetch @@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov 64, %g2 sub %g2, %g1, %g2 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1 + 0x00, %o4)) + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) sllx %o4, %g1, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, GLOBAL_SPARE or GLOBAL_SPARE, %o4, GLOBAL_SPARE - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00)) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b sllx %g3, %g1, %o4 @@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ba,pt %icc, .Lsmall_unaligned .Ltiny: - EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) + EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x00)) - EX_LD(LOAD(ldub, %o1 + 0x01, %g1)) + EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x01)) - EX_LD(LOAD(ldub, %o1 + 0x02, %g1)) + EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) ba,pt %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x02)) + EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) .Lsmall: andcc %g2, 0x3, %g0 @@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: - EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %icc, 1b - EX_ST(STORE(stw, %g1, %o0 - 0x04)) + EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit nop ba,a,pt %icc, .Ltiny .Lsmall_unaligned: -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %icc, 1b - EX_ST(STORE(stb, %g1, %o0 - 0x01)) + EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) ba,a,pt %icc, .Lexit .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S index 5d1e4d1ac21edf09a664663dc005e1fcae805b6a..9cd42fcbc781152ca95e3c8051962ea7de1538a9 100644 --- a/arch/sparc/lib/NGcopy_from_user.S +++ b/arch/sparc/lib/NGcopy_from_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __ret_one_asi;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S index ff630dcb273c9649de6fc9e145fce42e72667787..5c358afd464e24f3513cb6716f2983ca9f201dff 100644 --- a/arch/sparc/lib/NGcopy_to_user.S +++ b/arch/sparc/lib/NGcopy_to_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __ret_one_asi;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S index 96a14caf6966282cab2d97071f7c3837b2a6727a..d88c4ed50a0023cd8e2daa06689abc0d46d79f46 100644 --- a/arch/sparc/lib/NGmemcpy.S +++ b/arch/sparc/lib/NGmemcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include #include #include #define GLOBAL_SPARE %g7 @@ -27,15 +28,11 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST(x,y) x #endif #ifndef LOAD @@ -79,6 +76,92 @@ .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_asi: + ret + wr %g0, ASI_AIUS, %asi + restore +ENTRY(NG_ret_i2_plus_i4_plus_1) + ba,pt %xcc, __restore_asi + add %i2, %i5, %i0 +ENDPROC(NG_ret_i2_plus_i4_plus_1) +ENTRY(NG_ret_i2_plus_g1) + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1) +ENTRY(NG_ret_i2_plus_g1_minus_8) + sub %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_8) +ENTRY(NG_ret_i2_plus_g1_minus_16) + sub %g1, 16, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_16) +ENTRY(NG_ret_i2_plus_g1_minus_24) + sub %g1, 24, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_24) +ENTRY(NG_ret_i2_plus_g1_minus_32) + sub %g1, 32, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_32) +ENTRY(NG_ret_i2_plus_g1_minus_40) + sub %g1, 40, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_40) +ENTRY(NG_ret_i2_plus_g1_minus_48) + sub %g1, 48, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_48) +ENTRY(NG_ret_i2_plus_g1_minus_56) + sub %g1, 56, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_56) +ENTRY(NG_ret_i2_plus_i4) + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_plus_i4) +ENTRY(NG_ret_i2_plus_i4_minus_8) + sub %i4, 8, %i4 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_plus_i4_minus_8) +ENTRY(NG_ret_i2_plus_8) + ba,pt %xcc, __restore_asi + add %i2, 8, %i0 +ENDPROC(NG_ret_i2_plus_8) +ENTRY(NG_ret_i2_plus_4) + ba,pt %xcc, __restore_asi + add %i2, 4, %i0 +ENDPROC(NG_ret_i2_plus_4) +ENTRY(NG_ret_i2_plus_1) + ba,pt %xcc, __restore_asi + add %i2, 1, %i0 +ENDPROC(NG_ret_i2_plus_1) +ENTRY(NG_ret_i2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_plus_1) +ENTRY(NG_ret_i2) + ba,pt %xcc, __restore_asi + mov %i2, %i0 +ENDPROC(NG_ret_i2) +ENTRY(NG_ret_i2_and_7_plus_i4) + and %i2, 7, %i2 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_and_7_plus_i4) +#endif + .align 64 .globl FUNC_NAME @@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ sub %g0, %i4, %i4 ! bytes to align dst sub %i2, %i4, %i2 1: subcc %i4, 1, %i4 - EX_LD(LOAD(ldub, %i1, %g1)) - EX_ST(STORE(stb, %g1, %o0)) + EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1) + EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1) add %i1, 1, %i1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ and %i4, 0x7, GLOBAL_SPARE sll GLOBAL_SPARE, 3, GLOBAL_SPARE mov 64, %i5 - EX_LD(LOAD_TWIN(%i1, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1) sub %i5, GLOBAL_SPARE, %i5 mov 16, %o4 mov 32, %o5 @@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ srlx WORD3, PRE_SHIFT, TMP; \ or WORD2, TMP, WORD2; -8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) +8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) LOAD(prefetch, %i1 + %i3, #one_read) - EX_ST(STORE_INIT(%g2, %o0 + 0x00)) - EX_ST(STORE_INIT(%g3, %o0 + 0x08)) + EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1) + EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) - EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o2, %o0 + 0x10)) - EX_ST(STORE_INIT(%o3, %o0 + 0x18)) + EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%g2, %o0 + 0x20)) - EX_ST(STORE_INIT(%g3, %o0 + 0x28)) + EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) - EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) add %i1, 64, %i1 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o2, %o0 + 0x30)) - EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 8b @@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ ba,pt %XCC, 60f add %i1, %i4, %i1 -9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) +9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) LOAD(prefetch, %i1 + %i3, #one_read) - EX_ST(STORE_INIT(%g3, %o0 + 0x00)) - EX_ST(STORE_INIT(%o2, %o0 + 0x08)) + EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1) + EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) - EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o3, %o0 + 0x10)) - EX_ST(STORE_INIT(%g2, %o0 + 0x18)) + EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%g3, %o0 + 0x20)) - EX_ST(STORE_INIT(%o2, %o0 + 0x28)) + EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) - EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) add %i1, 64, %i1 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o3, %o0 + 0x30)) - EX_ST(STORE_INIT(%g2, %o0 + 0x38)) + EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 9b @@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ * one twin load ahead, then add 8 back into source when * we finish the loop. */ - EX_LD(LOAD_TWIN(%i1, %o4, %o5)) + EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1) mov 16, %o7 mov 32, %g2 mov 48, %g3 mov 64, %o1 -1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) +1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) LOAD(prefetch, %i1 + %o1, #one_read) - EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line - EX_ST(STORE_INIT(%o2, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) - EX_ST(STORE_INIT(%o3, %o0 + 0x10)) - EX_ST(STORE_INIT(%o4, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) - EX_ST(STORE_INIT(%o5, %o0 + 0x20)) - EX_ST(STORE_INIT(%o2, %o0 + 0x28)) - EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) + EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line + EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48) add %i1, 64, %i1 - EX_ST(STORE_INIT(%o3, %o0 + 0x30)) - EX_ST(STORE_INIT(%o4, %o0 + 0x38)) + EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 @@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ mov 32, %g2 mov 48, %g3 mov 64, %o1 -1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) +1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) LOAD(prefetch, %i1 + %o1, #one_read) - EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line - EX_ST(STORE_INIT(%o5, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) - EX_ST(STORE_INIT(%o2, %o0 + 0x10)) - EX_ST(STORE_INIT(%o3, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) + EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line + EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) add %i1, 64, %i1 - EX_ST(STORE_INIT(%o4, %o0 + 0x20)) - EX_ST(STORE_INIT(%o5, %o0 + 0x28)) - EX_ST(STORE_INIT(%o2, %o0 + 0x30)) - EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 @@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ andn %i2, 0xf, %i4 and %i2, 0xf, %i2 1: subcc %i4, 0x10, %i4 - EX_LD(LOAD(ldx, %i1, %o4)) + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4) add %i1, 0x08, %i1 - EX_LD(LOAD(ldx, %i1, %g1)) + EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4) sub %i1, 0x08, %i1 - EX_ST(STORE(stx, %o4, %i1 + %i3)) + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4) add %i1, 0x8, %i1 - EX_ST(STORE(stx, %g1, %i1 + %i3)) + EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8) bgu,pt %XCC, 1b add %i1, 0x8, %i1 73: andcc %i2, 0x8, %g0 be,pt %XCC, 1f nop sub %i2, 0x8, %i2 - EX_LD(LOAD(ldx, %i1, %o4)) - EX_ST(STORE(stx, %o4, %i1 + %i3)) + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8) + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8) add %i1, 0x8, %i1 1: andcc %i2, 0x4, %g0 be,pt %XCC, 1f nop sub %i2, 0x4, %i2 - EX_LD(LOAD(lduw, %i1, %i5)) - EX_ST(STORE(stw, %i5, %i1 + %i3)) + EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4) + EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4) add %i1, 0x4, %i1 1: cmp %i2, 0 be,pt %XCC, 85f @@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ sub %i2, %g1, %i2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %i1, %i5)) - EX_ST(STORE(stb, %i5, %i1 + %i3)) + EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1) + EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1) bgu,pt %icc, 1b add %i1, 1, %i1 @@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ 8: mov 64, %i3 andn %i1, 0x7, %i1 - EX_LD(LOAD(ldx, %i1, %g2)) + EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2) sub %i3, %g1, %i3 andn %i2, 0x7, %i4 sllx %g2, %g1, %g2 1: add %i1, 0x8, %i1 - EX_LD(LOAD(ldx, %i1, %g3)) + EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4) subcc %i4, 0x8, %i4 srlx %g3, %i3, %i5 or %i5, %g2, %i5 - EX_ST(STORE(stx, %i5, %o0)) + EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ 1: subcc %i2, 4, %i2 - EX_LD(LOAD(lduw, %i1, %g1)) - EX_ST(STORE(stw, %g1, %i1 + %i3)) + EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4) + EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4) bgu,pt %XCC, 1b add %i1, 4, %i1 @@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ .align 32 90: subcc %i2, 1, %i2 - EX_LD(LOAD(ldub, %i1, %g1)) - EX_ST(STORE(stb, %g1, %i1 + %i3)) + EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1) + EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1) bgu,pt %XCC, 90b add %i1, 1, %i1 ret diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S index ecc5692fa2b49a3acfc6a6592c6c247835417b4c..bb6ff73229e3e5e7eac225389db53c27d989abc1 100644 --- a/arch/sparc/lib/U1copy_from_user.S +++ b/arch/sparc/lib/U1copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S index 9eea392e44d471679ba85c22867a9767b34912a5..ed92ce73955889dba9faa8f028b7e5e6c4326ecf 100644 --- a/arch/sparc/lib/U1copy_to_user.S +++ b/arch/sparc/lib/U1copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index 3e6209ebb7d7865fd62bb21df56dabda07fe4f5f..f30d2ab2c371818614c56387967c01174a32824c 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S @@ -5,6 +5,7 @@ */ #ifdef __KERNEL__ +#include #include #include #define GLOBAL_SPARE g7 @@ -23,21 +24,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST_FP(x,y) x #endif #ifndef LOAD @@ -78,53 +75,169 @@ faligndata %f7, %f8, %f60; \ faligndata %f8, %f9, %f62; -#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ - EX_LD_FP(LOAD_BLK(%src, %fdest)); \ - EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ - add %src, 0x40, %src; \ - subcc %len, 0x40, %len; \ - be,pn %xcc, jmptgt; \ - add %dest, 0x40, %dest; \ - -#define LOOP_CHUNK1(src, dest, len, branch_dest) \ - MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest) -#define LOOP_CHUNK2(src, dest, len, branch_dest) \ - MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest) -#define LOOP_CHUNK3(src, dest, len, branch_dest) \ - MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) +#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \ + EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ + add %src, 0x40, %src; \ + subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \ + be,pn %xcc, jmptgt; \ + add %dest, 0x40, %dest; \ + +#define LOOP_CHUNK1(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest) +#define LOOP_CHUNK2(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest) +#define LOOP_CHUNK3(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest) #define DO_SYNC membar #Sync; #define STORE_SYNC(dest, fsrc) \ - EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ add %dest, 0x40, %dest; \ DO_SYNC #define STORE_JUMP(dest, fsrc, target) \ - EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \ add %dest, 0x40, %dest; \ ba,pt %xcc, target; \ nop; -#define FINISH_VISCHUNK(dest, f0, f1, left) \ - subcc %left, 8, %left;\ - bl,pn %xcc, 95f; \ - faligndata %f0, %f1, %f48; \ - EX_ST_FP(STORE(std, %f48, %dest)); \ +#define FINISH_VISCHUNK(dest, f0, f1) \ + subcc %g3, 8, %g3; \ + bl,pn %xcc, 95f; \ + faligndata %f0, %f1, %f48; \ + EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \ add %dest, 8, %dest; -#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ - subcc %left, 8, %left; \ - bl,pn %xcc, 95f; \ +#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ + subcc %g3, 8, %g3; \ + bl,pn %xcc, 95f; \ fsrc2 %f0, %f1; -#define UNEVEN_VISCHUNK(dest, f0, f1, left) \ - UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ +#define UNEVEN_VISCHUNK(dest, f0, f1) \ + UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ ba,a,pt %xcc, 93f; .register %g2,#scratch .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +ENTRY(U1_g1_1_fp) + VISExitHalf + add %g1, 1, %g1 + add %g1, %g2, %g1 + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_1_fp) +ENTRY(U1_g2_0_fp) + VISExitHalf + retl + add %g2, %o2, %o0 +ENDPROC(U1_g2_0_fp) +ENTRY(U1_g2_8_fp) + VISExitHalf + add %g2, 8, %g2 + retl + add %g2, %o2, %o0 +ENDPROC(U1_g2_8_fp) +ENTRY(U1_gs_0_fp) + VISExitHalf + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_0_fp) +ENTRY(U1_gs_80_fp) + VISExitHalf + add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_80_fp) +ENTRY(U1_gs_40_fp) + VISExitHalf + add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_40_fp) +ENTRY(U1_g3_0_fp) + VISExitHalf + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_0_fp) +ENTRY(U1_g3_8_fp) + VISExitHalf + add %g3, 8, %g3 + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_8_fp) +ENTRY(U1_o2_0_fp) + VISExitHalf + retl + mov %o2, %o0 +ENDPROC(U1_o2_0_fp) +ENTRY(U1_o2_1_fp) + VISExitHalf + retl + add %o2, 1, %o0 +ENDPROC(U1_o2_1_fp) +ENTRY(U1_gs_0) + VISExitHalf + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_0) +ENTRY(U1_gs_8) + VISExitHalf + add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, 0x8, %o0 +ENDPROC(U1_gs_8) +ENTRY(U1_gs_10) + VISExitHalf + add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, 0x10, %o0 +ENDPROC(U1_gs_10) +ENTRY(U1_o2_0) + retl + mov %o2, %o0 +ENDPROC(U1_o2_0) +ENTRY(U1_o2_8) + retl + add %o2, 8, %o0 +ENDPROC(U1_o2_8) +ENTRY(U1_o2_4) + retl + add %o2, 4, %o0 +ENDPROC(U1_o2_4) +ENTRY(U1_o2_1) + retl + add %o2, 1, %o0 +ENDPROC(U1_o2_1) +ENTRY(U1_g1_0) + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_0) +ENTRY(U1_g1_1) + add %g1, 1, %g1 + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_1) +ENTRY(U1_gs_0_o2_adj) + and %o2, 7, %o2 + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_0_o2_adj) +ENTRY(U1_gs_8_o2_adj) + and %o2, 7, %o2 + add %GLOBAL_SPARE, 8, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_8_o2_adj) +#endif + .align 64 .globl FUNC_NAME @@ -166,8 +279,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ and %g2, 0x38, %g2 1: subcc %g1, 0x1, %g1 - EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) - EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp) + EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp) bgu,pt %XCC, 1b add %o1, 0x1, %o1 @@ -178,20 +291,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ be,pt %icc, 3f alignaddr %o1, %g0, %o1 - EX_LD_FP(LOAD(ldd, %o1, %f4)) -1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) + EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f4, %f6, %f0 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) be,pn %icc, 3f add %o0, 0x8, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f6, %f4, %f0 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) bne,pt %icc, 1b add %o0, 0x8, %o0 @@ -214,13 +327,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %g1, %GLOBAL_SPARE, %g1 subcc %o2, %g3, %o2 - EX_LD_FP(LOAD_BLK(%o1, %f0)) + EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp) add %o1, 0x40, %o1 add %g1, %g3, %g1 - EX_LD_FP(LOAD_BLK(%o1, %f16)) + EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp) add %o1, 0x40, %o1 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE - EX_LD_FP(LOAD_BLK(%o1, %f32)) + EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp) add %o1, 0x40, %o1 /* There are 8 instances of the unrolled loop, @@ -240,11 +353,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 64 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f0, %f2, %f48 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) @@ -261,11 +374,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 56f) 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f2, %f4, %f48 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) @@ -282,11 +395,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 57f) 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f4, %f6, %f48 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) @@ -303,11 +416,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 58f) 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f6, %f8, %f48 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) @@ -324,11 +437,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 59f) 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f8, %f10, %f48 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) @@ -345,11 +458,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 60f) 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f10, %f12, %f48 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) @@ -366,11 +479,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 61f) 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f12, %f14, %f48 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) @@ -387,11 +500,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 62f) 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f14, %f16, %f48 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) @@ -407,53 +520,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_JUMP(o0, f48, 63f) -40: FINISH_VISCHUNK(o0, f0, f2, g3) -41: FINISH_VISCHUNK(o0, f2, f4, g3) -42: FINISH_VISCHUNK(o0, f4, f6, g3) -43: FINISH_VISCHUNK(o0, f6, f8, g3) -44: FINISH_VISCHUNK(o0, f8, f10, g3) -45: FINISH_VISCHUNK(o0, f10, f12, g3) -46: FINISH_VISCHUNK(o0, f12, f14, g3) -47: UNEVEN_VISCHUNK(o0, f14, f0, g3) -48: FINISH_VISCHUNK(o0, f16, f18, g3) -49: FINISH_VISCHUNK(o0, f18, f20, g3) -50: FINISH_VISCHUNK(o0, f20, f22, g3) -51: FINISH_VISCHUNK(o0, f22, f24, g3) -52: FINISH_VISCHUNK(o0, f24, f26, g3) -53: FINISH_VISCHUNK(o0, f26, f28, g3) -54: FINISH_VISCHUNK(o0, f28, f30, g3) -55: UNEVEN_VISCHUNK(o0, f30, f0, g3) -56: FINISH_VISCHUNK(o0, f32, f34, g3) -57: FINISH_VISCHUNK(o0, f34, f36, g3) -58: FINISH_VISCHUNK(o0, f36, f38, g3) -59: FINISH_VISCHUNK(o0, f38, f40, g3) -60: FINISH_VISCHUNK(o0, f40, f42, g3) -61: FINISH_VISCHUNK(o0, f42, f44, g3) -62: FINISH_VISCHUNK(o0, f44, f46, g3) -63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) - -93: EX_LD_FP(LOAD(ldd, %o1, %f2)) +40: FINISH_VISCHUNK(o0, f0, f2) +41: FINISH_VISCHUNK(o0, f2, f4) +42: FINISH_VISCHUNK(o0, f4, f6) +43: FINISH_VISCHUNK(o0, f6, f8) +44: FINISH_VISCHUNK(o0, f8, f10) +45: FINISH_VISCHUNK(o0, f10, f12) +46: FINISH_VISCHUNK(o0, f12, f14) +47: UNEVEN_VISCHUNK(o0, f14, f0) +48: FINISH_VISCHUNK(o0, f16, f18) +49: FINISH_VISCHUNK(o0, f18, f20) +50: FINISH_VISCHUNK(o0, f20, f22) +51: FINISH_VISCHUNK(o0, f22, f24) +52: FINISH_VISCHUNK(o0, f24, f26) +53: FINISH_VISCHUNK(o0, f26, f28) +54: FINISH_VISCHUNK(o0, f28, f30) +55: UNEVEN_VISCHUNK(o0, f30, f0) +56: FINISH_VISCHUNK(o0, f32, f34) +57: FINISH_VISCHUNK(o0, f34, f36) +58: FINISH_VISCHUNK(o0, f36, f38) +59: FINISH_VISCHUNK(o0, f38, f40) +60: FINISH_VISCHUNK(o0, f40, f42) +61: FINISH_VISCHUNK(o0, f42, f44) +62: FINISH_VISCHUNK(o0, f44, f46) +63: UNEVEN_VISCHUNK_LAST(o0, f46, f0) + +93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f0, %f2, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) bl,pn %xcc, 95f add %o0, 8, %o0 - EX_LD_FP(LOAD(ldd, %o1, %f0)) + EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f2, %f0, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) bge,pt %xcc, 93b add %o0, 8, %o0 95: brz,pt %o2, 2f mov %g1, %o1 -1: EX_LD_FP(LOAD(ldub, %o1, %o3)) +1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp) add %o1, 1, %o1 subcc %o2, 1, %o2 - EX_ST_FP(STORE(stb, %o3, %o0)) + EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp) bne,pt %xcc, 1b add %o0, 1, %o0 @@ -469,27 +582,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 72: andn %o2, 0xf, %GLOBAL_SPARE and %o2, 0xf, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0) + EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0) subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10) add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + %o3)) + EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8) bgu,pt %XCC, 1b add %o1, 0x8, %o1 73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop - EX_LD(LOAD(ldx, %o1, %o5)) + EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0) sub %o2, 0x8, %o2 - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8) add %o1, 0x8, %o1 1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop - EX_LD(LOAD(lduw, %o1, %o5)) + EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0) sub %o2, 0x4, %o2 - EX_ST(STORE(stw, %o5, %o1 + %o3)) + EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4) add %o1, 0x4, %o1 1: cmp %o2, 0 be,pt %XCC, 85f @@ -503,9 +616,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g0, %g1, %g1 sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1, %o5)) +1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0) subcc %g1, 1, %g1 - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1) bgu,pt %icc, 1b add %o1, 1, %o1 @@ -521,16 +634,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 8: mov 64, %o3 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) + EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0) sub %o3, %g1, %o3 andn %o2, 0x7, %GLOBAL_SPARE sllx %g2, %g1, %g2 -1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj) subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE add %o1, 0x8, %o1 srlx %g3, %o3, %o5 or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -548,9 +661,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bne,pn %XCC, 90f sub %o0, %o1, %o3 -1: EX_LD(LOAD(lduw, %o1, %g1)) +1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0) subcc %o2, 4, %o2 - EX_ST(STORE(stw, %g1, %o1 + %o3)) + EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -558,9 +671,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov EX_RETVAL(%o4), %o0 .align 32 -90: EX_LD(LOAD(ldub, %o1, %g1)) +90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0) subcc %o2, 1, %o2 - EX_ST(STORE(stb, %g1, %o1 + %o3)) + EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S index 88ad73d86fe44b64c2313483e7490cc8ae0ee438..db73010a1af8f18d5baa7515203001045995ce31 100644 --- a/arch/sparc/lib/U3copy_from_user.S +++ b/arch/sparc/lib/U3copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S index 845139d7553720ce5fe98d6e30bcb11215f71e2f..c4ee858e352a2be0e028ac757dd399938e89db04 100644 --- a/arch/sparc/lib/U3copy_to_user.S +++ b/arch/sparc/lib/U3copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 491ee69e49951fc2040640e77b896552872c8a0a..54f98706b03b2f53025adb99e086002a0629e9f0 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include #include #include #define GLOBAL_SPARE %g7 @@ -22,21 +23,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST_FP(x,y) x #endif #ifndef LOAD @@ -77,6 +74,87 @@ */ .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_fp: + VISExitHalf + retl + nop +ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) + add %g1, 1, %g1 + add %g2, %g1, %g2 + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) +ENTRY(U3_retl_o2_plus_g2_fp) + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_fp) +ENTRY(U3_retl_o2_plus_g2_plus_8_fp) + add %g2, 8, %g2 + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_plus_8_fp) +ENTRY(U3_retl_o2) + retl + mov %o2, %o0 +ENDPROC(U3_retl_o2) +ENTRY(U3_retl_o2_plus_1) + retl + add %o2, 1, %o0 +ENDPROC(U3_retl_o2_plus_1) +ENTRY(U3_retl_o2_plus_4) + retl + add %o2, 4, %o0 +ENDPROC(U3_retl_o2_plus_4) +ENTRY(U3_retl_o2_plus_8) + retl + add %o2, 8, %o0 +ENDPROC(U3_retl_o2_plus_8) +ENTRY(U3_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + retl + add %o2, %g1, %o0 +ENDPROC(U3_retl_o2_plus_g1_plus_1) +ENTRY(U3_retl_o2_fp) + ba,pt %xcc, __restore_fp + mov %o2, %o0 +ENDPROC(U3_retl_o2_fp) +ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) + sll %o3, 6, %o3 + add %o3, 0x80, %o3 + ba,pt %xcc, __restore_fp + add %o2, %o3, %o0 +ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) +ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) + sll %o3, 6, %o3 + add %o3, 0x40, %o3 + ba,pt %xcc, __restore_fp + add %o2, %o3, %o0 +ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) +ENTRY(U3_retl_o2_plus_GS_plus_0x10) + add GLOBAL_SPARE, 0x10, GLOBAL_SPARE + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_plus_GS_plus_0x10) +ENTRY(U3_retl_o2_plus_GS_plus_0x08) + add GLOBAL_SPARE, 0x08, GLOBAL_SPARE + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_plus_GS_plus_0x08) +ENTRY(U3_retl_o2_and_7_plus_GS) + and %o2, 7, %o2 + retl + add %o2, GLOBAL_SPARE, %o2 +ENDPROC(U3_retl_o2_and_7_plus_GS) +ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) + add GLOBAL_SPARE, 8, GLOBAL_SPARE + and %o2, 7, %o2 + retl + add %o2, GLOBAL_SPARE, %o2 +ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) +#endif + .align 64 /* The cheetah's flexible spine, oversized liver, enlarged heart, @@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ and %g2, 0x38, %g2 1: subcc %g1, 0x1, %g1 - EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) - EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1) + EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1) bgu,pt %XCC, 1b add %o1, 0x1, %o1 @@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ be,pt %icc, 3f alignaddr %o1, %g0, %o1 - EX_LD_FP(LOAD(ldd, %o1, %f4)) -1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) + EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f4, %f6, %f0 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8) be,pn %icc, 3f add %o0, 0x8, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f6, %f4, %f2 - EX_ST_FP(STORE(std, %f2, %o0)) + EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8) bne,pt %icc, 1b add %o0, 0x8, %o0 @@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ LOAD(prefetch, %o1 + 0x080, #one_read) LOAD(prefetch, %o1 + 0x0c0, #one_read) LOAD(prefetch, %o1 + 0x100, #one_read) - EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2) LOAD(prefetch, %o1 + 0x140, #one_read) - EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2) LOAD(prefetch, %o1 + 0x180, #one_read) - EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2) LOAD(prefetch, %o1 + 0x1c0, #one_read) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2) faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2) faligndata %f8, %f10, %f24 - EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2) faligndata %f10, %f12, %f26 - EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2) subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE add %o1, 0x40, %o1 @@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 64 1: - EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f12, %f14, %f28 - EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f14, %f0, %f30 - EX_ST_FP(STORE_BLK(%f16, %o0)) - EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f0, %f2, %f16 add %o0, 0x40, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) subcc %o3, 0x01, %o3 faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f8, %f10, %f24 - EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80) LOAD(prefetch, %o1 + 0x1c0, #one_read) faligndata %f10, %f12, %f26 bg,pt %XCC, 1b @@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ /* Finally we copy the last full 64-byte block. */ 2: - EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f12, %f14, %f28 - EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f14, %f0, %f30 - EX_ST_FP(STORE_BLK(%f16, %o0)) - EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f8, %f10, %f24 cmp %g1, 0 be,pt %XCC, 1f add %o0, 0x40, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40) 1: faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE_BLK(%f16, %o0)) + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40) add %o0, 0x40, %o0 add %o1, 0x40, %o1 membar #Sync @@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g2, %o2 be,a,pt %XCC, 1f - EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2) -1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2)) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f0, %f2, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) be,pn %XCC, 2f add %o0, 0x8, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f2, %f0, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) bne,pn %XCC, 1b add %o0, 0x8, %o0 @@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andcc %o2, 0x8, %g0 be,pt %icc, 1f nop - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2) add %o1, 0x8, %o1 + sub %o2, 8, %o2 1: andcc %o2, 0x4, %g0 be,pt %icc, 1f nop - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2) + EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2) add %o1, 0x4, %o1 + sub %o2, 4, %o2 1: andcc %o2, 0x2, %g0 be,pt %icc, 1f nop - EX_LD(LOAD(lduh, %o1, %o5)) - EX_ST(STORE(sth, %o5, %o1 + %o3)) + EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2) + EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2) add %o1, 0x2, %o1 + sub %o2, 2, %o2 1: andcc %o2, 0x1, %g0 be,pt %icc, 85f nop - EX_LD(LOAD(ldub, %o1, %o5)) + EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) ba,pt %xcc, 85f - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) .align 64 70: /* 16 < len <= 64 */ @@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0xf, GLOBAL_SPARE and %o2, 0xf, %o2 1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE - EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10) + EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10) add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + %o3)) + EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08) bgu,pt %XCC, 1b add %o1, 0x8, %o1 73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop sub %o2, 0x8, %o2 - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8) add %o1, 0x8, %o1 1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop sub %o2, 0x4, %o2 - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4) + EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 be,pt %XCC, 85f @@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %o1, %o5)) - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1) bgu,pt %icc, 1b add %o1, 1, %o1 @@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 8: mov 64, %o3 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) + EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2) sub %o3, %g1, %o3 andn %o2, 0x7, GLOBAL_SPARE sllx %g2, %g1, %g2 -1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS) subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE add %o1, 0x8, %o1 srlx %g3, %o3, %o5 or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 1: subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + %o3)) + EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S index 302c0e60dc2ceb48a3212840ac7891f337419d14..4c89b486fa0d7f88f04615363e6f64f48c917ac0 100644 --- a/arch/sparc/lib/copy_in_user.S +++ b/arch/sparc/lib/copy_in_user.S @@ -8,18 +8,33 @@ #define XCC xcc -#define EX(x,y) \ +#define EX(x,y,z) \ 98: x,y; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, z; \ .text; \ .align 4; +#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8) +#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4) +#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1) + .register %g2,#scratch .register %g3,#scratch .text +__retl_o4_plus_8: + add %o4, %o2, %o4 + retl + add %o4, 8, %o0 +__retl_o2_plus_4: + retl + add %o2, 4, %o0 +__retl_o2_plus_1: + retl + add %o2, 1, %o0 + .align 32 /* Don't try to get too fancy here, just nice and @@ -44,8 +59,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x7, %o4 and %o2, 0x7, %o2 1: subcc %o4, 0x8, %o4 - EX(ldxa [%o1] %asi, %o5) - EX(stxa %o5, [%o0] %asi) + EX_O4(ldxa [%o1] %asi, %o5) + EX_O4(stxa %o5, [%o0] %asi) add %o1, 0x8, %o1 bgu,pt %XCC, 1b add %o0, 0x8, %o0 @@ -53,8 +68,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ be,pt %XCC, 1f nop sub %o2, 0x4, %o2 - EX(lduwa [%o1] %asi, %o5) - EX(stwa %o5, [%o0] %asi) + EX_O2_4(lduwa [%o1] %asi, %o5) + EX_O2_4(stwa %o5, [%o0] %asi) add %o1, 0x4, %o1 add %o0, 0x4, %o0 1: cmp %o2, 0 @@ -70,8 +85,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ 82: subcc %o2, 4, %o2 - EX(lduwa [%o1] %asi, %g1) - EX(stwa %g1, [%o0] %asi) + EX_O2_4(lduwa [%o1] %asi, %g1) + EX_O2_4(stwa %g1, [%o0] %asi) add %o1, 4, %o1 bgu,pt %XCC, 82b add %o0, 4, %o0 @@ -82,8 +97,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX(lduba [%o1] %asi, %g1) - EX(stba %g1, [%o0] %asi) + EX_O2_1(lduba [%o1] %asi, %g1) + EX_O2_1(stba %g1, [%o0] %asi) add %o1, 1, %o1 bgu,pt %XCC, 90b add %o0, 1, %o0 diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c deleted file mode 100644 index ac96ae23670900ebdcf13f1cd65319e494bb48e9..0000000000000000000000000000000000000000 --- a/arch/sparc/lib/user_fixup.c +++ /dev/null @@ -1,71 +0,0 @@ -/* user_fixup.c: Fix up user copy faults. - * - * Copyright (C) 2004 David S. Miller - */ - -#include -#include -#include -#include -#include - -#include - -/* Calculating the exact fault address when using - * block loads and stores can be very complicated. - * - * Instead of trying to be clever and handling all - * of the cases, just fix things up simply here. - */ - -static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) -{ - unsigned long fault_addr = current_thread_info()->fault_address; - unsigned long end = start + size; - - if (fault_addr < start || fault_addr >= end) { - *offset = 0; - } else { - *offset = fault_addr - start; - size = end - fault_addr; - } - return size; -} - -unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) -{ - unsigned long offset; - - size = compute_size((unsigned long) from, size, &offset); - if (likely(size)) - memset(to + offset, 0, size); - - return size; -} -EXPORT_SYMBOL(copy_from_user_fixup); - -unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) -{ - unsigned long offset; - - return compute_size((unsigned long) to, size, &offset); -} -EXPORT_SYMBOL(copy_to_user_fixup); - -unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) -{ - unsigned long fault_addr = current_thread_info()->fault_address; - unsigned long start = (unsigned long) to; - unsigned long end = start + size; - - if (fault_addr >= start && fault_addr < end) - return end - fault_addr; - - start = (unsigned long) from; - end = start + size; - if (fault_addr >= start && fault_addr < end) - return end - fault_addr; - - return size; -} -EXPORT_SYMBOL(copy_in_user_fixup); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index dbabe5713a158eec17eb61de90d1cdfd6e974e80..e15f33715103a14f64b6e63baaf186c1deb81465 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -479,14 +479,14 @@ good_area: up_read(&mm->mmap_sem); mm_rss = get_mm_rss(mm); -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) + mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE)); #endif if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) tsb_grow(mm, MM_TSB_BASE, mm_rss); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - mm_rss = mm->context.huge_pte_count; + mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { if (mm->context.tsb_block[MM_TSB_HUGE].tsb) diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 364d093f46c6bf50b9dfc0c77935e25871f990bb..da1142401bf4515fe28da9928f14d7a9b55f46aa 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -180,7 +180,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, unsigned long nptes; if (!pte_present(*ptep) && pte_present(entry)) - mm->context.huge_pte_count++; + mm->context.hugetlb_pte_count++; addr &= HPAGE_MASK; @@ -212,7 +212,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, entry = *ptep; if (pte_present(entry)) - mm->context.huge_pte_count--; + mm->context.hugetlb_pte_count--; addr &= HPAGE_MASK; nptes = 1 << HUGETLB_PAGE_ORDER; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3c4b8975fa76ac555259fec038e50300c424713d..3d3414c14792ab192d84b8a19600b1d3580fd0ae 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -346,7 +346,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) + if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && + is_hugetlb_pte(pte)) __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, address, pte_val(pte)); else @@ -799,8 +800,10 @@ struct mdesc_mblock { }; static struct mdesc_mblock *mblocks; static int num_mblocks; +static int find_numa_node_for_addr(unsigned long pa, + struct node_mem_mask *pnode_mask); -static unsigned long ra_to_pa(unsigned long addr) +static unsigned long __init ra_to_pa(unsigned long addr) { int i; @@ -816,8 +819,11 @@ static unsigned long ra_to_pa(unsigned long addr) return addr; } -static int find_node(unsigned long addr) +static int __init find_node(unsigned long addr) { + static bool search_mdesc = true; + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; + static int last_index; int i; addr = ra_to_pa(addr); @@ -827,13 +833,30 @@ static int find_node(unsigned long addr) if ((addr & p->mask) == p->val) return i; } - /* The following condition has been observed on LDOM guests.*/ - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" - " rule. Some physical memory will be owned by node 0."); - return 0; + /* The following condition has been observed on LDOM guests because + * node_masks only contains the best latency mask and value. + * LDOM guest's mdesc can contain a single latency group to + * cover multiple address range. Print warning message only if the + * address cannot be found in node_masks nor mdesc. + */ + if ((search_mdesc) && + ((addr & last_mem_mask.mask) != last_mem_mask.val)) { + /* find the available node in the mdesc */ + last_index = find_numa_node_for_addr(addr, &last_mem_mask); + numadbg("find_node: latency group for address 0x%lx is %d\n", + addr, last_index); + if ((last_index < 0) || (last_index >= num_node_masks)) { + /* WARN_ONCE() and use default group 0 */ + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); + search_mdesc = false; + last_index = 0; + } + } + + return last_index; } -static u64 memblock_nid_range(u64 start, u64 end, int *nid) +static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) { *nid = find_node(start); start += PAGE_SIZE; @@ -1157,6 +1180,41 @@ int __node_distance(int from, int to) return numa_latency[from][to]; } +static int find_numa_node_for_addr(unsigned long pa, + struct node_mem_mask *pnode_mask) +{ + struct mdesc_handle *md = mdesc_grab(); + u64 node, arc; + int i = 0; + + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); + if (node == MDESC_NODE_NULL) + goto out; + + mdesc_for_each_node_by_name(md, node, "group") { + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { + u64 target = mdesc_arc_target(md, arc); + struct mdesc_mlgroup *m = find_mlgroup(target); + + if (!m) + continue; + if ((pa & m->mask) == m->match) { + if (pnode_mask) { + pnode_mask->mask = m->mask; + pnode_mask->val = m->match; + } + mdesc_release(md); + return i; + } + } + i++; + } + +out: + mdesc_release(md); + return -1; +} + static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { int i; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index f81cd973670079132681d69d0b9800bb12bfbb55..3659d37b4d818e30c614f46cf4b2aca8bf700aa0 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { if (pmd_val(pmd) & _PAGE_PMD_HUGE) - mm->context.huge_pte_count++; + mm->context.thp_pte_count++; else - mm->context.huge_pte_count--; + mm->context.thp_pte_count--; /* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a0604a493a361e51c055e825e65295a302aeec4c..9cdeca0fa9556ed823299ee4d81d89c515f60339 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr) return (tag == (vaddr >> 22)); } +static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end) +{ + unsigned long idx; + + for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { + struct tsb *ent = &swapper_tsb[idx]; + unsigned long match = idx << 13; + + match |= (ent->tag << 22); + if (match >= start && match < end) + ent->tag = (1UL << TSB_TAG_INVALID_BIT); + } +} + /* TSB flushes need only occur on the processor initiating the address * space modification, not on each cpu the address space has run on. * Only the TLB flush needs that treatment. @@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) { unsigned long v; + if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) + return flush_tsb_kernel_range_scan(start, end); + for (v = start; v < end; v += PAGE_SIZE) { unsigned long hash = tsb_hash(v, PAGE_SHIFT, KERNEL_TSB_NENTRIES); @@ -470,7 +487,7 @@ retry_tsb_alloc: int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - unsigned long huge_pte_count; + unsigned long total_huge_pte_count; #endif unsigned int i; @@ -479,12 +496,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.sparc64_ctx_val = 0UL; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - /* We reset it to zero because the fork() page copying + /* We reset them to zero because the fork() page copying * will re-increment the counters as the parent PTEs are * copied into the child address space. */ - huge_pte_count = mm->context.huge_pte_count; - mm->context.huge_pte_count = 0; + total_huge_pte_count = mm->context.hugetlb_pte_count + + mm->context.thp_pte_count; + mm->context.hugetlb_pte_count = 0; + mm->context.thp_pte_count = 0; #endif /* copy_mm() copies over the parent's mm_struct before calling @@ -500,8 +519,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (unlikely(huge_pte_count)) - tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); + if (unlikely(total_huge_pte_count)) + tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); #endif if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b4f4733abc6ea8f9e6ef6abafbc75f0b16b08003..5d2fd6cd31896b87a3373a59cbfc3130808c6908 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -30,7 +30,7 @@ .text .align 32 .globl __flush_tlb_mm -__flush_tlb_mm: /* 18 insns */ +__flush_tlb_mm: /* 19 insns */ /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 @@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */ .align 32 .globl __flush_tlb_pending -__flush_tlb_pending: /* 26 insns */ +__flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */ .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* 16 insns */ +__flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f + sub %o1, %o0, %o3 + srlx %o3, 18, %o4 + brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow sethi %hi(PAGE_SIZE), %o4 - sub %o1, %o0, %o3 sub %o3, %o4, %o3 or %o0, 0x20, %o0 ! Nucleus 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP @@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */ retl nop nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + +__spitfire_flush_tlb_kernel_range_slow: + mov 63 * 8, %o4 +1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3 + andcc %o3, 0x40, %g0 /* _PAGE_L_4U */ + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %o3 + stxa %g0, [%o3] ASI_IMMU + stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS + membar #Sync +2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3 + andcc %o3, 0x40, %g0 + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %o3 + stxa %g0, [%o3] ASI_DMMU + stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS + membar #Sync +2: sub %o4, 8, %o4 + brgez,pt %o4, 1b + nop + retl + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_kernel_range: /* 31 insns */ + /* %o0=start, %o1=end */ + cmp %o0, %o1 + be,pn %xcc, 2f + sub %o1, %o0, %o3 + srlx %o3, 18, %o4 + brnz,pn %o4, 3f + sethi %hi(PAGE_SIZE), %o4 + sub %o3, %o4, %o3 + or %o0, 0x20, %o0 ! Nucleus +1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP + stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP + membar #Sync + brnz,pt %o3, 1b + sub %o3, %o4, %o3 +2: sethi %hi(KERNBASE), %o3 + flush %o3 + retl + nop +3: mov 0x80, %o4 + stxa %g0, [%o4] ASI_DMMU_DEMAP + membar #Sync + stxa %g0, [%o4] ASI_IMMU_DEMAP + membar #Sync + retl + nop + nop + nop + nop + nop + nop + nop + nop + #ifdef DCACHE_ALIASING_POSSIBLE __cheetah_flush_dcache_page: /* 11 insns */ sethi %hi(PAGE_OFFSET), %g1 @@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error: ret restore -__hypervisor_flush_tlb_mm: /* 10 insns */ +__hypervisor_flush_tlb_mm: /* 19 insns */ mov %o0, %o2 /* ARG2: mmu context */ mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 1f mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop +1: sethi %hi(__hypervisor_tlb_tl0_error), %o5 + jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop + nop + nop + nop + nop + nop + nop -__hypervisor_flush_tlb_page: /* 11 insns */ +__hypervisor_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ mov %o0, %g2 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ @@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 retl nop +1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 + jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop + nop + nop + nop + nop + nop + nop + nop + nop -__hypervisor_flush_tlb_pending: /* 16 insns */ +__hypervisor_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 mov %o2, %g2 @@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g1, 1b nop retl nop +1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 + jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop + nop + nop + nop + nop + nop + nop + nop + nop -__hypervisor_flush_tlb_kernel_range: /* 16 insns */ +__hypervisor_flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f - sethi %hi(PAGE_SIZE), %g3 - mov %o0, %g1 - sub %o1, %g1, %g2 + sub %o1, %o0, %g2 + srlx %g2, 18, %g3 + brnz,pn %g3, 4f + mov %o0, %g1 + sethi %hi(PAGE_SIZE), %g3 sub %g2, %g3, %g2 1: add %g1, %g2, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 3f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g2, 1b sub %g2, %g3, %g2 2: retl nop +3: sethi %hi(__hypervisor_tlb_tl0_error), %o2 + jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop +4: mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov 0, %o2 /* ARG2: mmu context == nucleus */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + brnz,pn %o0, 3b + mov HV_FAST_MMU_DEMAP_CTX, %o1 + retl + nop #ifdef DCACHE_ALIASING_POSSIBLE /* XXX Niagara and friends have an 8K cache, so no aliasing is @@ -394,43 +511,6 @@ tlb_patch_one: retl nop - .globl cheetah_patch_cachetlbops -cheetah_patch_cachetlbops: - save %sp, -128, %sp - - sethi %hi(__flush_tlb_mm), %o0 - or %o0, %lo(__flush_tlb_mm), %o0 - sethi %hi(__cheetah_flush_tlb_mm), %o1 - or %o1, %lo(__cheetah_flush_tlb_mm), %o1 - call tlb_patch_one - mov 19, %o2 - - sethi %hi(__flush_tlb_page), %o0 - or %o0, %lo(__flush_tlb_page), %o0 - sethi %hi(__cheetah_flush_tlb_page), %o1 - or %o1, %lo(__cheetah_flush_tlb_page), %o1 - call tlb_patch_one - mov 22, %o2 - - sethi %hi(__flush_tlb_pending), %o0 - or %o0, %lo(__flush_tlb_pending), %o0 - sethi %hi(__cheetah_flush_tlb_pending), %o1 - or %o1, %lo(__cheetah_flush_tlb_pending), %o1 - call tlb_patch_one - mov 27, %o2 - -#ifdef DCACHE_ALIASING_POSSIBLE - sethi %hi(__flush_dcache_page), %o0 - or %o0, %lo(__flush_dcache_page), %o0 - sethi %hi(__cheetah_flush_dcache_page), %o1 - or %o1, %lo(__cheetah_flush_dcache_page), %o1 - call tlb_patch_one - mov 11, %o2 -#endif /* DCACHE_ALIASING_POSSIBLE */ - - ret - restore - #ifdef CONFIG_SMP /* These are all called by the slaves of a cross call, at * trap level 1, with interrupts fully disabled. @@ -447,7 +527,7 @@ cheetah_patch_cachetlbops: */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: /* 21 insns */ +xcall_flush_tlb_mm: /* 24 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop nop + nop + nop + nop .globl xcall_flush_tlb_page -xcall_flush_tlb_page: /* 17 insns */ +xcall_flush_tlb_page: /* 20 insns */ /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 @@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */ retry nop nop + nop + nop + nop .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: /* 25 insns */ +xcall_flush_tlb_kernel_range: /* 44 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 - add %g2, 1, %g2 + srlx %g3, 18, %g2 + brnz,pn %g2, 2f + add %g2, 1, %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP @@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */ brnz,pt %g3, 1b sub %g3, %g2, %g3 retry - nop - nop +2: mov 63 * 8, %g1 +1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2 + andcc %g2, 0x40, %g0 /* _PAGE_L_4U */ + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %g2 + stxa %g0, [%g2] ASI_IMMU + stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS + membar #Sync +2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2 + andcc %g2, 0x40, %g0 + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %g2 + stxa %g0, [%g2] ASI_DMMU + stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS + membar #Sync +2: sub %g1, 8, %g1 + brgez,pt %g1, 1b + nop + retry nop nop nop @@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4: retry +__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ + sethi %hi(PAGE_SIZE - 1), %g2 + or %g2, %lo(PAGE_SIZE - 1), %g2 + andn %g1, %g2, %g1 + andn %g7, %g2, %g7 + sub %g7, %g1, %g3 + srlx %g3, 18, %g2 + brnz,pn %g2, 2f + add %g2, 1, %g2 + sub %g3, %g2, %g3 + or %g1, 0x20, %g1 ! Nucleus +1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP + stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP + membar #Sync + brnz,pt %g3, 1b + sub %g3, %g2, %g3 + retry +2: mov 0x80, %g2 + stxa %g0, [%g2] ASI_DMMU_DEMAP + membar #Sync + stxa %g0, [%g2] ASI_IMMU_DEMAP + membar #Sync + retry + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + #ifdef DCACHE_ALIASING_POSSIBLE .align 32 .globl xcall_flush_dcache_page_cheetah @@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error: ba,a,pt %xcc, rtrap .globl __hypervisor_xcall_flush_tlb_mm -__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ +__hypervisor_xcall_flush_tlb_mm: /* 24 insns */ /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ mov %o0, %g2 mov %o1, %g3 @@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP mov HV_FAST_MMU_DEMAP_CTX, %g6 - brnz,pn %o0, __hypervisor_tlb_xcall_error + brnz,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 @@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ mov %g7, %o5 membar #Sync retry +1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 + jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 + nop .globl __hypervisor_xcall_flush_tlb_page -__hypervisor_xcall_flush_tlb_page: /* 17 insns */ +__hypervisor_xcall_flush_tlb_page: /* 20 insns */ /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 @@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */ sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 - brnz,a,pn %o0, __hypervisor_tlb_xcall_error + brnz,a,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 membar #Sync retry +1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 + jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 + nop .globl __hypervisor_xcall_flush_tlb_kernel_range -__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ +__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */ /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 + srlx %g3, 18, %g7 add %g2, 1, %g2 sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 - mov %o2, %g7 + brnz,pn %g7, 2f + mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 - brnz,pn %o0, __hypervisor_tlb_xcall_error + brnz,pn %o0, 1f mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 - mov %g2, %o0 +5: mov %g2, %o0 mov %g4, %o1 mov %g7, %o2 membar #Sync retry +1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 + jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 + nop +2: mov %o3, %g1 + mov %o5, %g3 + mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov 0, %o2 /* ARG2: mmu context == nucleus */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + mov %g1, %o3 + brz,pt %o0, 5b + mov %g3, %o5 + mov HV_FAST_MMU_DEMAP_CTX, %g6 + ba,pt %xcc, 1b + clr %g5 /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function @@ -809,6 +985,58 @@ xcall_kgdb_capture: #endif /* CONFIG_SMP */ + .globl cheetah_patch_cachetlbops +cheetah_patch_cachetlbops: + save %sp, -128, %sp + + sethi %hi(__flush_tlb_mm), %o0 + or %o0, %lo(__flush_tlb_mm), %o0 + sethi %hi(__cheetah_flush_tlb_mm), %o1 + or %o1, %lo(__cheetah_flush_tlb_mm), %o1 + call tlb_patch_one + mov 19, %o2 + + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + + sethi %hi(__flush_tlb_pending), %o0 + or %o0, %lo(__flush_tlb_pending), %o0 + sethi %hi(__cheetah_flush_tlb_pending), %o1 + or %o1, %lo(__cheetah_flush_tlb_pending), %o1 + call tlb_patch_one + mov 27, %o2 + + sethi %hi(__flush_tlb_kernel_range), %o0 + or %o0, %lo(__flush_tlb_kernel_range), %o0 + sethi %hi(__cheetah_flush_tlb_kernel_range), %o1 + or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 31, %o2 + +#ifdef DCACHE_ALIASING_POSSIBLE + sethi %hi(__flush_dcache_page), %o0 + or %o0, %lo(__flush_dcache_page), %o0 + sethi %hi(__cheetah_flush_dcache_page), %o1 + or %o1, %lo(__cheetah_flush_dcache_page), %o1 + call tlb_patch_one + mov 11, %o2 +#endif /* DCACHE_ALIASING_POSSIBLE */ + +#ifdef CONFIG_SMP + sethi %hi(xcall_flush_tlb_kernel_range), %o0 + or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 + sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1 + or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 44, %o2 +#endif /* CONFIG_SMP */ + + ret + restore .globl hypervisor_patch_cachetlbops hypervisor_patch_cachetlbops: @@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 call tlb_patch_one - mov 10, %o2 + mov 19, %o2 sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__hypervisor_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_flush_tlb_page), %o1 call tlb_patch_one - mov 11, %o2 + mov 22, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 call tlb_patch_one - mov 16, %o2 + mov 27, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 16, %o2 + mov 31, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 @@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 call tlb_patch_one - mov 21, %o2 + mov 24, %o2 sethi %hi(xcall_flush_tlb_page), %o0 or %o0, %lo(xcall_flush_tlb_page), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 17, %o2 + mov 20, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 25, %o2 + mov 44, %o2 #endif /* CONFIG_SMP */ ret diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h index c505d77e4d06a8293eda3f7ce2c88a80fb32840f..e9d54a06736f63aaa6bc9e789d773d8d786204e0 100644 --- a/arch/tile/include/asm/elf.h +++ b/arch/tile/include/asm/elf.h @@ -129,6 +129,7 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *); struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ diff --git a/arch/tile/include/uapi/asm/auxvec.h b/arch/tile/include/uapi/asm/auxvec.h index c93e92709f14326d278afe9cabc4de6f5d7b9381..f497123ed980c436c4480b9f3f5b03d13ce35ea3 100644 --- a/arch/tile/include/uapi/asm/auxvec.h +++ b/arch/tile/include/uapi/asm/auxvec.h @@ -18,4 +18,6 @@ /* The vDSO location. */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + #endif /* _ASM_TILE_AUXVEC_H */ diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 178989e6d3e3ae1403ae9dc1f108126fedb6e2c8..ea960d6609177faa86780e6164f26e4d3ef51ac2 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) */ unsigned long long sched_clock(void) { - return clocksource_cyc2ns(get_cycles(), - sched_clock_mult, SCHED_CLOCK_SHIFT); + return mult_frac(get_cycles(), + sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); } int setup_profiling_timer(unsigned int multiplier) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 4086abca0b32345c92207fa75468cc98ebe97da4..53949c8863414c2dc8d77219b2b6c0c6d82d4fd9 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -97,6 +97,8 @@ else KBUILD_CFLAGS += $(call cc-option,-mno-80387) KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387) + KBUILD_CFLAGS += -fno-pic + # Use -mpreferred-stack-boundary=3 if supported. KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index f9ce75d80101cc7a620bed1169a1128056b9caa5..7f6c157e5da5c16d42a600ffbdca5522de4408e5 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -22,7 +22,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 -KBUILD_CFLAGS += -fno-strict-aliasing -fPIC +KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small @@ -36,6 +36,18 @@ GCOV_PROFILE := n UBSAN_SANITIZE :=n LDFLAGS := -m elf_$(UTS_MACHINE) +ifeq ($(CONFIG_RELOCATABLE),y) +# If kernel is relocatable, build compressed kernel as PIE. +ifeq ($(CONFIG_X86_32),y) +LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) +else +# To build 64-bit compressed kernel as PIE, we disable relocation +# overflow check to avoid relocation overflow error with a new linker +# command-line option, -z noreloc-overflow. +LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ + && echo "-z noreloc-overflow -pie --no-dynamic-linker") +endif +endif LDFLAGS_vmlinux := -T hostprogs-y := mkpiggy diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 8ef964ddc18ec656b1e3b0f038adf134484dbdd2..0256064da8da38c69098cbccc75a19cc0493ca82 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -31,6 +31,34 @@ #include #include +/* + * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X + * relocation to get the symbol address in PIC. When the compressed x86 + * kernel isn't built as PIC, the linker optimizes R_386_GOT32X + * relocations to their fixed symbol addresses. However, when the + * compressed x86 kernel is loaded at a different address, it leads + * to the following load failure: + * + * Failed to allocate space for phdrs + * + * during the decompression stage. + * + * If the compressed x86 kernel is relocatable at run-time, it should be + * compiled with -fPIE, instead of -fPIC, if possible and should be built as + * Position Independent Executable (PIE) so that linker won't optimize + * R_386_GOT32X relocation to its fixed symbol address. Older + * linkers generate R_386_32 relocations against locally defined symbols, + * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less + * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle + * R_386_32 relocations when relocating the kernel. To generate + * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as + * hidden: + */ + .hidden _bss + .hidden _ebss + .hidden _got + .hidden _egot + __HEAD ENTRY(startup_32) #ifdef CONFIG_EFI_STUB diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index b0c0d16ef58d1099342c97aff83767dd35c73691..86558a1991393c509bc3005c021c2ab490b26b2a 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -33,6 +33,14 @@ #include #include +/* + * Locally defined symbols should be marked hidden: + */ + .hidden _bss + .hidden _ebss + .hidden _got + .hidden _egot + __HEAD .code32 ENTRY(startup_32) diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..0206eb8cfb61b982bd332c2713af6a1393eea179 --- /dev/null +++ b/arch/x86/configs/i386_ranchu_defconfig @@ -0,0 +1,423 @@ +# CONFIG_64BIT is not set +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_X86_BIGSMP=y +CONFIG_MCORE2=y +CONFIG_X86_GENERIC=y +CONFIG_HPET_TIMER=y +CONFIG_NR_CPUS=512 +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_REBOOTFIXUPS=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_AES_586=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config index 4e2ecfa23c15978faf88416ad11ca859e1201718..4b429df40d7a2fb9ee9fce2eb5315b592d1200e9 100644 --- a/arch/x86/configs/tiny.config +++ b/arch/x86/configs/tiny.config @@ -1 +1,3 @@ CONFIG_NOHIGHMEM=y +# CONFIG_HIGHMEM4G is not set +# CONFIG_HIGHMEM64G is not set diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..dd389774bacb8234fa6cffbdde839ec13fc8bde4 --- /dev/null +++ b/arch/x86/configs/x86_64_ranchu_defconfig @@ -0,0 +1,418 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_MCORE2=y +CONFIG_MAXSMP=y +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 3142218e546f873ef9f647ce2a6f44630e6c72ed..6433e28dc9c89c8d4c8f826c86212a957d23f5ab 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -32,7 +32,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); /* Initialize cr4 shadow for this CPU. */ static inline void cr4_init_shadow(void) { - this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); + this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe()); } /* Set in this cpu's CR4. */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index dbe64f27280e34138dc5163b587579db35d3c768..7402eb4b509d3e601df3a254557fc134eaa1cc43 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -339,7 +339,7 @@ do { \ #define __get_user_asm_u64(x, ptr, retval, errret) \ __get_user_asm(x, ptr, retval, "q", "", "=r", errret) #define __get_user_asm_ex_u64(x, ptr) \ - __get_user_asm_ex(x, ptr, "q", "", "=r") + __get_user_asm_ex(x, ptr, "q", "", "=&r") #endif #define __get_user_size(x, ptr, size, retval, errret) \ @@ -386,13 +386,13 @@ do { \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ - __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ + __get_user_asm_ex(x, ptr, "b", "b", "=&q"); \ break; \ case 2: \ - __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ + __get_user_asm_ex(x, ptr, "w", "w", "=&r"); \ break; \ case 4: \ - __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ + __get_user_asm_ex(x, ptr, "l", "k", "=&r"); \ break; \ case 8: \ __get_user_asm_ex_u64(x, ptr); \ @@ -406,7 +406,7 @@ do { \ asm volatile("1: mov"itype" %1,%"rtype"0\n" \ "2:\n" \ _ASM_EXTABLE_EX(1b, 2b) \ - : ltype(x) : "m" (__m(addr))) + : ltype(x) : "m" (__m(addr)), "0" (0)) #define __put_user_nocheck(x, ptr, size) \ ({ \ diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index df6b4eeac0bde402207f321491880edfb588ed77..0988e204f1e39424a08e8d50ff91440206bfc8a5 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -659,11 +659,28 @@ void irq_complete_move(struct irq_cfg *cfg) */ void irq_force_complete_move(struct irq_desc *desc) { - struct irq_data *irqdata = irq_desc_get_irq_data(desc); - struct apic_chip_data *data = apic_chip_data(irqdata); - struct irq_cfg *cfg = data ? &data->cfg : NULL; + struct irq_data *irqdata; + struct apic_chip_data *data; + struct irq_cfg *cfg; unsigned int cpu; + /* + * The function is called for all descriptors regardless of which + * irqdomain they belong to. For example if an IRQ is provided by + * an irq_chip as part of a GPIO driver, the chip data for that + * descriptor is specific to the irq_chip in question. + * + * Check first that the chip_data is what we expect + * (apic_chip_data) before touching it any further. + */ + irqdata = irq_domain_get_irq_data(x86_vector_domain, + irq_desc_get_irq(desc)); + if (!irqdata) + return; + + data = apic_chip_data(irqdata); + cfg = data ? &data->cfg : NULL; + if (!cfg) return; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index a8816b3251620c941f543e595e949aa60c43467d..e2defc7593a4f2bda65d9b05ad2b7a999bbcd895 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -352,7 +352,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP unsigned bits; int cpu = smp_processor_id(); - unsigned int socket_id, core_complex_id; bits = c->x86_coreid_bits; /* Low order bits define the core id (index of core in socket) */ @@ -370,10 +369,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) return; - socket_id = (c->apicid >> bits) - 1; - core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3; - - per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; #endif } @@ -656,6 +652,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } +#define MSR_AMD64_DE_CFG 0xC0011029 + +static void init_amd_ln(struct cpuinfo_x86 *c) +{ + /* + * Apply erratum 665 fix unconditionally so machines without a BIOS + * fix work. + */ + msr_set_bit(MSR_AMD64_DE_CFG, 31); +} + static void init_amd_bd(struct cpuinfo_x86 *c) { u64 value; @@ -713,6 +720,7 @@ static void init_amd(struct cpuinfo_x86 *c) case 6: init_amd_k7(c); break; case 0xf: init_amd_k8(c); break; case 0x10: init_amd_gh(c); break; + case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c2b7522cbf357617bf8302f3c4f964834b940926..2b49b113d65d02267be149ef5039139871779209 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -737,21 +737,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ - if (!have_cpuid_p()) - return; + if (have_cpuid_p()) { + cpu_detect(c); + get_cpu_vendor(c); + get_cpu_cap(c); - cpu_detect(c); - get_cpu_vendor(c); - get_cpu_cap(c); - - if (this_cpu->c_early_init) - this_cpu->c_early_init(c); + if (this_cpu->c_early_init) + this_cpu->c_early_init(c); - c->cpu_index = 0; - filter_cpuid_features(c, false); + c->cpu_index = 0; + filter_cpuid_features(c, false); - if (this_cpu->c_bsp_init) - this_cpu->c_bsp_init(c); + if (this_cpu->c_bsp_init) + this_cpu->c_bsp_init(c); + } setup_force_cpu_cap(X86_FEATURE_ALWAYS); fpu__init_system(c); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 569c1e4f96feb897956a64e35bd8fccb07dca6c6..52a2526c3fbe457d899d0d66dad28e69feabf41f 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -753,7 +753,7 @@ u64 __init early_reserve_e820(u64 size, u64 align) /* * Find the highest page frame number we have available */ -static unsigned long __init e820_end_pfn(unsigned long limit_pfn) +static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) { int i; unsigned long last_pfn = 0; @@ -764,11 +764,7 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn) unsigned long start_pfn; unsigned long end_pfn; - /* - * Persistent memory is accounted as ram for purposes of - * establishing max_pfn and mem_map. - */ - if (ei->type != E820_RAM && ei->type != E820_PRAM) + if (ei->type != type) continue; start_pfn = ei->addr >> PAGE_SHIFT; @@ -793,12 +789,12 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn) } unsigned long __init e820_end_of_ram_pfn(void) { - return e820_end_pfn(MAX_ARCH_PFN); + return e820_end_pfn(MAX_ARCH_PFN, E820_RAM); } unsigned long __init e820_end_of_low_ram_pfn(void) { - return e820_end_pfn(1UL << (32-PAGE_SHIFT)); + return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_RAM); } static void early_panic(char *msg) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9fdf1d330727032b3c42bb928a58805ed2b0401e..a257d6077d1b3110d7813af09d6d842848039a2e 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -331,12 +331,11 @@ static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_si static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) { - /* - * FIXME is the graphics stolen memory region - * always at TOUD? Ie. is it always the last - * one to be allocated by the BIOS? - */ - return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; + u16 toud = 0; + + toud = read_pci_config_16(0, 0, 0, I865_TOUD); + + return (phys_addr_t)(toud << 16) + i845_tseg_size(); } static size_t __init i830_stolen_size(int num, int slot, int func) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 6bc9ae24b6d2a74930701c0c6ea60fe090a3ebbc..8f1a3f443f7db8b8d381aa25da186615998281c9 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -571,7 +571,7 @@ early_idt_handler_common: movl %eax,%ds movl %eax,%es - cmpl $(__KERNEL_CS),32(%esp) + cmpw $(__KERNEL_CS),32(%esp) jne 10f leal 28(%esp),%eax # Pointer to %eip diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index c2130aef3f9d25d6c6a47f3499b8096c5cfb8e80..f534a0e3af5358f904364a4e0680f1c231370cb4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -55,12 +55,12 @@ asm (".pushsection .entry.text, \"ax\"\n" ".popsection"); /* identity function, which can be inlined */ -u32 _paravirt_ident_32(u32 x) +u32 notrace _paravirt_ident_32(u32 x) { return x; } -u64 _paravirt_ident_64(u64 x) +u64 notrace _paravirt_ident_64(u64 x) { return x; } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 558f50edebca8f55a8c33e67bfcd21f9c78037a9..479a409ddac8eaaaf72e48646166af5615baff70 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -188,8 +188,8 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) return sp; prev_esp = (u32 *)(context); - if (prev_esp) - return (unsigned long)prev_esp; + if (*prev_esp) + return (unsigned long)*prev_esp; return (unsigned long)regs; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b9b09fec173bf2fedb0b9ee122b3c4668eb6e471..f49e98062ea5cbda4fb52c38de06f42b310e711b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2093,16 +2093,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned short sel, old_sel; - struct desc_struct old_desc, new_desc; - const struct x86_emulate_ops *ops = ctxt->ops; + unsigned short sel; + struct desc_struct new_desc; u8 cpl = ctxt->ops->cpl(ctxt); - /* Assignment of RIP may only fail in 64-bit mode */ - if (ctxt->mode == X86EMUL_MODE_PROT64) - ops->get_segment(ctxt, &old_sel, &old_desc, NULL, - VCPU_SREG_CS); - memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, @@ -2112,12 +2106,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); - if (rc != X86EMUL_CONTINUE) { - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); - /* assigning eip failed; restore the old cs */ - ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); - return rc; - } + /* Error handling is not implemented. */ + if (rc != X86EMUL_CONTINUE) + return X86EMUL_UNHANDLEABLE; + return rc; } @@ -2177,14 +2169,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; - u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); - struct desc_struct old_desc, new_desc; - const struct x86_emulate_ops *ops = ctxt->ops; - - if (ctxt->mode == X86EMUL_MODE_PROT64) - ops->get_segment(ctxt, &old_cs, &old_desc, NULL, - VCPU_SREG_CS); + struct desc_struct new_desc; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) @@ -2201,10 +2187,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, &new_desc); - if (rc != X86EMUL_CONTINUE) { - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); - ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); - } + /* Error handling is not implemented. */ + if (rc != X86EMUL_CONTINUE) + return X86EMUL_UNHANDLEABLE; + return rc; } @@ -5033,7 +5019,7 @@ done_prefixes: /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); - if (ctxt->rip_relative) + if (ctxt->rip_relative && likely(ctxt->memopp)) ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea + ctxt->_eip); diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 88d0a92d3f94676d2e68cdb19b2bdd35b49a7d15..3aab53f8cad244ef402d0c03c56c63e112d459a6 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -580,7 +580,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) ioapic->irr = 0; ioapic->irr_delivered = 0; ioapic->id = 0; - memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); + memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); rtc_irq_eoi_tracking_reset(ioapic); } diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 84b96d319909414fd3aba1e7b7888486843f86b8..d09544e826f675fc6d7b7212559cc3e7eafec4e4 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -38,6 +38,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, bool line_status) { struct kvm_pic *pic = pic_irqchip(kvm); + + /* + * XXX: rejecting pic routes when pic isn't in use would be better, + * but the default routing table is installed while kvm->arch.vpic is + * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE. + */ + if (!pic) + return -1; + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); } @@ -46,6 +55,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, bool line_status) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; + + if (!ioapic) + return -1; + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, line_status); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4589b6feeb7b54efa08001bcd827ad5e5b40674f..268df707b5ce8f5d7739e3d23c7ab48f8085941a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -408,6 +408,7 @@ struct nested_vmx { struct list_head vmcs02_pool; int vmcs02_num; u64 vmcs01_tsc_offset; + bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* @@ -8184,6 +8185,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; + /* Postpone execution until vmcs01 is the current VMCS. */ + if (is_guest_mode(vcpu)) { + to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; + return; + } + /* * There is not point to enable virtualize x2apic without enable * apicv @@ -10483,6 +10490,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* Update TSC_OFFSET if TSC was changed while L2 ran */ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); + if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { + vmx->nested.change_vmcs01_virtual_x2apic_mode = false; + vmx_set_virtual_x2apic_mode(vcpu, + vcpu->arch.apic_base & X2APIC_ENABLE); + } + /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index be222666b1c2554c18f179766cc72c98c1d9b2d8..7429d481a3116ff4293c5868f9d0d4b018619811 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -199,7 +199,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn) struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; + unsigned long flags; + /* + * Disabling irqs at this point since the following code could be + * interrupted and executed through kvm_arch_hardware_disable() + */ + local_irq_save(flags); + if (locals->registered) { + locals->registered = false; + user_return_notifier_unregister(urn); + } + local_irq_restore(flags); for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { @@ -207,8 +218,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn) values->curr = values->host; } } - locals->registered = false; - user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) @@ -2735,7 +2744,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); - vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -3318,6 +3326,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; + int idx; r = -EINVAL; if (!lapic_in_kernel(vcpu)) @@ -3325,7 +3334,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } case KVM_X86_SETUP_MCE: { @@ -7253,10 +7264,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { + void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; + kvmclock_reset(vcpu); - free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); kvm_x86_ops->vcpu_free(vcpu); + free_cpumask_var(wbinvd_dirty_mask); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 2c835e356349b9284b6b6997fff19208b6dd1b61..d445c5f1aeb1f8f162ae348686918b4297ffa6df 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -12,6 +12,7 @@ targets += purgatory.ro KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large KBUILD_CFLAGS += -m$(BITS) +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index cb5e266a8bf752297eaf6fc0d822c9398239fadf..1e56ff58345982b99185db7f25d598f46875c300 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, /* NOTE: The loop is more greedy than the cleanup_highmap variant. * We include the PMD passed in on _both_ boundaries. */ - for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); + for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); pmd++, vaddr += PMD_SIZE) { if (pmd_none(*pmd)) continue; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5a37188b559fba8feb1da48b00d5290c3a6a09b6..8161090a19708684224402c59bfdb55dbbc07ef4 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, { struct gendisk *disk; struct blkcg_gq *blkg; + struct module *owner; unsigned int major, minor; int key_len, part, ret; char *body; @@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, if (!disk) return -ENODEV; if (part) { + owner = disk->fops->owner; put_disk(disk); + module_put(owner); return -ENODEV; } @@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ret = PTR_ERR(blkg); rcu_read_unlock(); spin_unlock_irq(disk->queue->queue_lock); + owner = disk->fops->owner; put_disk(disk); + module_put(owner); /* * If queue was bypassing, we should retry. Do so after a * short msleep(). It isn't strictly necessary but queue @@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); void blkg_conf_finish(struct blkg_conf_ctx *ctx) __releases(ctx->disk->queue->queue_lock) __releases(rcu) { + struct module *owner; + spin_unlock_irq(ctx->disk->queue->queue_lock); rcu_read_unlock(); + owner = ctx->disk->fops->owner; put_disk(ctx->disk); + module_put(owner); } EXPORT_SYMBOL_GPL(blkg_conf_finish); @@ -1331,10 +1340,8 @@ int blkcg_policy_register(struct blkcg_policy *pol) struct blkcg_policy_data *cpd; cpd = pol->cpd_alloc_fn(GFP_KERNEL); - if (!cpd) { - mutex_unlock(&blkcg_pol_mutex); + if (!cpd) goto err_free_cpds; - } blkcg->cpd[pol->plid] = cpd; cpd->blkcg = blkcg; diff --git a/block/blk-map.c b/block/blk-map.c index f565e11f465aa145120973bce58ef7ecc192f349..69953bd97e6597f684803b5064662a13abb8c65b 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -90,6 +90,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, if (!iter || !iter->count) return -EINVAL; + if (!iter_is_iovec(iter)) + return -EINVAL; + iov_for_each(iov, i, *iter) { unsigned long uaddr = (unsigned long) iov.iov_base; diff --git a/block/blk-mq.c b/block/blk-mq.c index 10d1318c1128d2616900ee3e797cd1884c8bf1ce..53c0f0b5f3ad6013d446424812c0873508c70f6a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -780,7 +780,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) switch (ret) { case BLK_MQ_RQ_QUEUE_OK: queued++; - continue; + break; case BLK_MQ_RQ_QUEUE_BUSY: list_add(&rq->queuelist, &rq_list); __blk_mq_requeue_request(rq); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1f9093e901daed7849a54633be5d80ce96b010f4..3ad307ee602903e94b500008001359bc40bfebe8 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3003,7 +3003,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) if (time_before(jiffies, rq->fifo_time)) rq = NULL; - cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); return rq; } @@ -3377,6 +3376,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned int max_dispatch; + if (cfq_cfqq_must_dispatch(cfqq)) + return true; + /* * Drain async requests before we start sync IO */ @@ -3468,15 +3470,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + rq = cfq_check_fifo(cfqq); + if (rq) + cfq_mark_cfqq_must_dispatch(cfqq); + if (!cfq_may_dispatch(cfqd, cfqq)) return false; /* * follow expired path, else get first next available */ - rq = cfq_check_fifo(cfqq); if (!rq) rq = cfqq->next_rq; + else + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); /* * insert request into driver dispatch list @@ -3944,7 +3951,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ - if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) + if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) return true; if (new_cfqq->cfqg != cfqq->cfqg) diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 758acabf2d819727ecd7f4cf0c6aa21c1c40649f..8f3056cd03991ddf820c3d8442d35daf3db7ee84 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen, struct pkcs7_signed_info *sinfo = ctx->sinfo; if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) || - !test_bit(sinfo_has_message_digest, &sinfo->aa_set) || - (ctx->msg->data_type == OID_msIndirectData && - !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) { + !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) { pr_warn("Missing required AuthAttr\n"); return -EBADMSG; } diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index c0748bbd4c083b47f78c662cdd7cb490590a2587..84f8d4d8b6bc8c3f54d4ae2ef90bb6e8d693e570 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); - - return tx; } else { struct page *p_src = P(blocks, disks); struct page *q_src = Q(blocks, disks); @@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, submit->cb_param = cb_param_orig; submit->flags = flags_orig; async_tx_sync_epilog(submit); - - return NULL; + tx = NULL; } + dmaengine_unmap_put(unmap); + + return tx; } EXPORT_SYMBOL_GPL(async_syndrome_val); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 8cc1622b2ee008ef7f5ff31a3ea7d8777f3f5f1b..dca7bc87dad9d326e39d44d1b62baad5006460da 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, return blkcipher_walk_done(desc, walk, -EINVAL); } + bsize = min(walk->walk_blocksize, n); + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | BLKCIPHER_WALK_DIFF); if (!scatterwalk_aligned(&walk->in, walk->alignmask) || @@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, } } - bsize = min(walk->walk_blocksize, n); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c81861b1350b60a1c2def7695dd16d7b0db39ade..e7aa904cb20bae034cf9520c5f67fe7da0358cda 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -594,9 +594,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out) static int cryptd_hash_import(struct ahash_request *req, const void *in) { - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct shash_desc *desc = cryptd_shash_desc(req); + + desc->tfm = ctx->child; + desc->flags = req->base.flags; - return crypto_shash_import(&rctx->desc, in); + return crypto_shash_import(desc, in); } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, diff --git a/crypto/echainiv.c b/crypto/echainiv.c index b96a84560b67790845321250e1f1a799557bb4b8..343a74e96e2a2fea2285c59e8e340b9cc87e3c72 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -1,8 +1,8 @@ /* * echainiv: Encrypted Chain IV Generator * - * This generator generates an IV based on a sequence number by xoring it - * with a salt and then encrypting it with the same key as used to encrypt + * This generator generates an IV based on a sequence number by multiplying + * it with a salt and then encrypting it with the same key as used to encrypt * the plain text. This algorithm requires that the block size be equal * to the IV size. It is mainly useful for CBC. * @@ -23,81 +23,17 @@ #include #include #include -#include #include -#include -#include +#include #include -#define MAX_IV_SIZE 16 - -static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); - -/* We don't care if we get preempted and read/write IVs from the next CPU. */ -static void echainiv_read_iv(u8 *dst, unsigned size) -{ - u32 *a = (u32 *)dst; - u32 __percpu *b = echainiv_iv; - - for (; size >= 4; size -= 4) { - *a++ = this_cpu_read(*b); - b++; - } -} - -static void echainiv_write_iv(const u8 *src, unsigned size) -{ - const u32 *a = (const u32 *)src; - u32 __percpu *b = echainiv_iv; - - for (; size >= 4; size -= 4) { - this_cpu_write(*b, *a); - a++; - b++; - } -} - -static void echainiv_encrypt_complete2(struct aead_request *req, int err) -{ - struct aead_request *subreq = aead_request_ctx(req); - struct crypto_aead *geniv; - unsigned int ivsize; - - if (err == -EINPROGRESS) - return; - - if (err) - goto out; - - geniv = crypto_aead_reqtfm(req); - ivsize = crypto_aead_ivsize(geniv); - - echainiv_write_iv(subreq->iv, ivsize); - - if (req->iv != subreq->iv) - memcpy(req->iv, subreq->iv, ivsize); - -out: - if (req->iv != subreq->iv) - kzfree(subreq->iv); -} - -static void echainiv_encrypt_complete(struct crypto_async_request *base, - int err) -{ - struct aead_request *req = base->data; - - echainiv_encrypt_complete2(req, err); - aead_request_complete(req, err); -} - static int echainiv_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); - crypto_completion_t compl; - void *data; + __be64 nseqno; + u64 seqno; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); int err; @@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_request *req) aead_request_set_tfm(subreq, ctx->child); - compl = echainiv_encrypt_complete; - data = req; info = req->iv; if (req->src != req->dst) { @@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_request *req) return err; } - if (unlikely(!IS_ALIGNED((unsigned long)info, - crypto_aead_alignmask(geniv) + 1))) { - info = kmalloc(ivsize, req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: - GFP_ATOMIC); - if (!info) - return -ENOMEM; - - memcpy(info, req->iv, ivsize); - } - - aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, info); aead_request_set_ad(subreq, req->assoclen); - crypto_xor(info, ctx->salt, ivsize); + memcpy(&nseqno, info + ivsize - 8, 8); + seqno = be64_to_cpu(nseqno); + memset(info, 0, ivsize); + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); - echainiv_read_iv(info, ivsize); - err = crypto_aead_encrypt(subreq); - echainiv_encrypt_complete2(req, err); - return err; + do { + u64 a; + + memcpy(&a, ctx->salt + ivsize - 8, 8); + + a |= 1; + a *= seqno; + + memcpy(info + ivsize - 8, &a, 8); + } while ((ivsize -= 8)); + + return crypto_aead_encrypt(subreq); } static int echainiv_decrypt(struct aead_request *req) @@ -192,8 +127,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, alg = crypto_spawn_aead_alg(spawn); err = -EINVAL; - if (inst->alg.ivsize & (sizeof(u32) - 1) || - inst->alg.ivsize > MAX_IV_SIZE) + if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize) goto free_inst; inst->alg.encrypt = echainiv_encrypt; @@ -202,7 +136,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, inst->alg.init = aead_init_geniv; inst->alg.exit = aead_exit_geniv; - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); inst->alg.base.cra_ctxsize += inst->alg.ivsize; diff --git a/crypto/gcm.c b/crypto/gcm.c index d9ea5f9c057418c2e054e94b80eed4c17402f671..1238b3c5a321984c0414dc283bf9891764762e6a 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, struct crypto_ablkcipher *ctr = ctx->ctr; struct { be128 hash; - u8 iv[8]; + u8 iv[16]; struct crypto_gcm_setkey_result result; diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index bac70995e0640a49fbc56797c4f7b605791ff98b..12ad3e3a84e3d7d570e75fad4c521718a3bf9790 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -14,24 +14,13 @@ #include #include +#include #include #include #include #include #include -#define GHASH_BLOCK_SIZE 16 -#define GHASH_DIGEST_SIZE 16 - -struct ghash_ctx { - struct gf128mul_4k *gf128; -}; - -struct ghash_desc_ctx { - u8 buffer[GHASH_BLOCK_SIZE]; - u32 bytes; -}; - static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 5f97468df8ff04e6bf0856a7b64324c16992cfb8..b2e50d8007fe6fa20a9a1ea5809292175a4a3dbf 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -504,11 +504,20 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) * Evaluate the \_Sx namespace object containing the register values * for this state */ - info->relative_pathname = - ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]); + info->relative_pathname = ACPI_CAST_PTR(char, + acpi_gbl_sleep_state_names + [sleep_state]); + status = acpi_ns_evaluate(info); if (ACPI_FAILURE(status)) { - goto cleanup; + if (status == AE_NOT_FOUND) { + + /* The _Sx states are optional, ignore NOT_FOUND */ + + goto final_cleanup; + } + + goto warning_cleanup; } /* Must have a return object */ @@ -517,7 +526,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", info->relative_pathname)); status = AE_AML_NO_RETURN_VALUE; - goto cleanup; + goto warning_cleanup; } /* Return object must be of type Package */ @@ -526,7 +535,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) ACPI_ERROR((AE_INFO, "Sleep State return object is not a Package")); status = AE_AML_OPERAND_TYPE; - goto cleanup1; + goto return_value_cleanup; } /* @@ -570,16 +579,17 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) break; } -cleanup1: +return_value_cleanup: acpi_ut_remove_reference(info->return_object); -cleanup: +warning_cleanup: if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While evaluating Sleep State [%s]", info->relative_pathname)); } +final_cleanup: ACPI_FREE(info); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 3dd9c462d22afd650b989399ac9c973afc983257..8f8da9f92090d63c34501dad13ce9c517176af25 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -657,7 +657,7 @@ static int ghes_proc(struct ghes *ghes) ghes_do_proc(ghes, ghes->estatus); out: ghes_clear_estatus(ghes); - return 0; + return rc; } static void ghes_add_timer(struct ghes *ghes) diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 5230e8449d306cbd5d2511469ccfb3b2f3e16a42..c097f477c74c19a75c9300d6e99306c4479f35a5 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1806,6 +1806,9 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) dev_dbg(dev, "%s: event: %d\n", __func__, event); + if (event != NFIT_NOTIFY_UPDATE) + return; + device_lock(dev); if (!dev->driver) { /* dev->driver may be null if we're being removed */ diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 3d549a3836590bb9dd55f6dd14dd8619698597ab..13d6ec1ff055d602738a92a576dd737ac777af22 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -45,6 +45,10 @@ enum { ND_BLK_DCR_LATCH = 2, }; +enum nfit_root_notifiers { + NFIT_NOTIFY_UPDATE = 0x80, +}; + struct nfit_spa { struct acpi_nfit_system_address *spa; struct list_head list; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c8c11de7d44ca901f5e31247d786c3f62004da25..0f6591a72cf826dd99a9307d1eeef40d45f5cf53 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1113,7 +1113,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal) static struct binder_ref *binder_get_ref(struct binder_proc *proc, - uint32_t desc, bool need_strong_ref) + u32 desc, bool need_strong_ref) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; @@ -2117,7 +2117,7 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_offset; } if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { - binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", + binder_user_error("%d:%d got transaction with unaligned buffers size, %llu\n", proc->pid, thread->pid, (u64)extra_buffers_size); return_error = BR_FAILED_REPLY; diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 016f6f5f1e5e452e5fb026d621c7af59ba434447..7bc0d4a24c2277801c11f1244877b3d3bbcb0710 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -344,7 +344,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags, return; } - unmap_kernel_range((unsigned long)cpu_addr, size); + unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); vunmap(cpu_addr); } #endif diff --git a/drivers/base/platform.c b/drivers/base/platform.c index bd70f8db469ba5e2fc48fc1bec41e5c1aaef90d6..9920916a6220d470f1739f3d17b574a3c4729a92 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -96,7 +96,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) int ret; ret = of_irq_get(dev->dev.of_node, num); - if (ret >= 0 || ret == -EPROBE_DEFER) + if (ret > 0 || ret == -EPROBE_DEFER) return ret; } @@ -174,7 +174,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name) int ret; ret = of_irq_get_byname(dev->dev.of_node, name); - if (ret >= 0 || ret == -EPROBE_DEFER) + if (ret > 0 || ret == -EPROBE_DEFER) return ret; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 7eea95d490e614d5cefaba9e870ebb8115a32f92..6c5bc3fadfcfc5dda58bb4e48935be753b4233ab 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1025,6 +1025,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a TRACE_DEVICE(dev); TRACE_SUSPEND(0); + dpm_wait_for_children(dev, async); + if (async_error) goto Complete; @@ -1036,8 +1038,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (dev->power.syscore || dev->power.direct_complete) goto Complete; - dpm_wait_for_children(dev, async); - if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -1172,6 +1172,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as __pm_runtime_disable(dev, false); + dpm_wait_for_children(dev, async); + if (async_error) goto Complete; @@ -1183,8 +1185,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as if (dev->power.syscore || dev->power.direct_complete) goto Complete; - dpm_wait_for_children(dev, async); - if (dev->pm_domain) { info = "late power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 74d97f4bac3488ac767ef97b20dfb3e2277e50cf..1d58854c4a9fab8e425cdd2868840feb976c0bb8 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1802,7 +1802,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, * do we need to block DRBD_SIG if sock == &meta.socket ?? * otherwise wake_asender() might interrupt some send_*Ack ! */ - rv = kernel_sendmsg(sock, &msg, &iov, 1, size); + rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); if (rv == -EAGAIN) { if (we_should_drop_the_connection(connection, sock)) break; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 3c59417b485a66c9c9458dd0d981c47615c34907..eada042b6eabf1fc52d695341b84a8ac887518e4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1381,7 +1381,8 @@ static ssize_t hot_remove_store(struct class *class, zram = idr_find(&zram_index_idr, dev_id); if (zram) { ret = zram_remove(zram); - idr_remove(&zram_index_idr, dev_id); + if (!ret) + idr_remove(&zram_index_idr, dev_id); } else { ret = -ENODEV; } diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 7082c7268845639399d9ceab44471937011e1705..0f54cb7ddcbb7b7732192826b0d0589dc6e4480a 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -187,6 +187,7 @@ struct arm_ccn { struct arm_ccn_component *xp; struct arm_ccn_dt dt; + int mn_id; }; @@ -326,6 +327,7 @@ struct arm_ccn_pmu_event { static ssize_t arm_ccn_pmu_event_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); struct arm_ccn_pmu_event *event = container_of(attr, struct arm_ccn_pmu_event, attr); ssize_t res; @@ -352,6 +354,9 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, res += snprintf(buf + res, PAGE_SIZE - res, ",cmp_l=?,cmp_h=?,mask=?"); break; + case CCN_TYPE_MN: + res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); + break; default: res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); break; @@ -381,9 +386,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj, } static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { - CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), - CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), - CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), @@ -757,6 +762,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) /* Validate node/xp vs topology */ switch (type) { + case CCN_TYPE_MN: + if (node_xp != ccn->mn_id) { + dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); + return -EINVAL; + } + break; case CCN_TYPE_XP: if (node_xp >= ccn->num_xps) { dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); @@ -884,6 +895,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) struct arm_ccn_component *xp; u32 val, dt_cfg; + /* Nothing to do for cycle counter */ + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) + return; + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; else @@ -986,7 +1001,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) /* Comparison values */ writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); - writel((cmp_l >> 32) & 0xefffffff, + writel((cmp_l >> 32) & 0x7fffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); writel((cmp_h >> 32) & 0x0fffffff, @@ -994,7 +1009,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) /* Mask */ writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); - writel((mask_l >> 32) & 0xefffffff, + writel((mask_l >> 32) & 0x7fffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); writel((mask_h >> 32) & 0x0fffffff, @@ -1368,6 +1383,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, switch (type) { case CCN_TYPE_MN: + ccn->mn_id = id; + return 0; case CCN_TYPE_DT: return 0; case CCN_TYPE_XP: diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 6f497aa1b27654112bc048f6c207288394015386..cf25020576fa2bf08b83c00bad078ce303ad47f0 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -84,14 +84,14 @@ static size_t rng_buffer_size(void) static void add_early_randomness(struct hwrng *rng) { - unsigned char bytes[16]; int bytes_read; + size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + bytes_read = rng_get_data(rng, rng_buffer, size, 1); mutex_unlock(&reading_mutex); if (bytes_read > 0) - add_device_randomness(bytes, bytes_read); + add_device_randomness(rng_buffer, bytes_read); } static inline void cleanup_rng(struct kref *kref) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 8a1432e8bb800114241492e6db5cfdcc841ae8bd..f5c26a5f687582c1cbcd3ea981aae7a3e6738ac7 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); + pm_runtime_put_noidle(&pdev->dev); + goto err_ioremap; + } ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : get_omap_rng_device_details(priv); @@ -435,8 +440,15 @@ static int __maybe_unused omap_rng_suspend(struct device *dev) static int __maybe_unused omap_rng_resume(struct device *dev) { struct omap_rng_dev *priv = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to runtime_get device: %d\n", ret); + pm_runtime_put_noidle(dev); + return ret; + } - pm_runtime_get_sync(dev); priv->pdata->init(priv); return 0; diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index de0337ebd6585b7a68aeabdd297bd611474b1bee..4f3137d9a35e1f0c4016040293ab3db583fbdbd2 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -139,7 +139,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, /* atomic tpm command send and result receive */ out_size = tpm_transmit(priv->chip, priv->data_buffer, - sizeof(priv->data_buffer)); + sizeof(priv->data_buffer), 0); if (out_size < 0) { mutex_unlock(&priv->buffer_mutex); return out_size; diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index c50637db3a8a9336f002d44046135bdb49fa7088..17abe52e6365b2363af3b4677615a726d412c051 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -328,8 +328,8 @@ EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); /* * Internal kernel interface to transmit TPM commands */ -ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, - size_t bufsiz) +ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, + unsigned int flags) { ssize_t rc; u32 count, ordinal; @@ -348,7 +348,8 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, return -E2BIG; } - mutex_lock(&chip->tpm_mutex); + if (!(flags & TPM_TRANSMIT_UNLOCKED)) + mutex_lock(&chip->tpm_mutex); rc = chip->ops->send(chip, (u8 *) buf, count); if (rc < 0) { @@ -391,20 +392,21 @@ out_recv: dev_err(chip->pdev, "tpm_transmit: tpm_recv: error %zd\n", rc); out: - mutex_unlock(&chip->tpm_mutex); + if (!(flags & TPM_TRANSMIT_UNLOCKED)) + mutex_unlock(&chip->tpm_mutex); return rc; } #define TPM_DIGEST_SIZE 20 #define TPM_RET_CODE_IDX 6 -ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, - int len, const char *desc) +ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, + int len, unsigned int flags, const char *desc) { - struct tpm_output_header *header; + const struct tpm_output_header *header; int err; - len = tpm_transmit(chip, (u8 *) cmd, len); + len = tpm_transmit(chip, (const u8 *)cmd, len, flags); if (len < 0) return len; else if (len < TPM_HEADER_SIZE) @@ -452,7 +454,8 @@ ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap, tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = subcap_id; } - rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc); + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0, + desc); if (!rc) *cap = tpm_cmd.params.getcap_out.cap; return rc; @@ -468,7 +471,7 @@ void tpm_gen_interrupt(struct tpm_chip *chip) tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; - rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0, "attempting to determine the timeouts"); } EXPORT_SYMBOL_GPL(tpm_gen_interrupt); @@ -489,7 +492,7 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type) start_cmd.header.in = tpm_startup_header; start_cmd.params.startup_in.startup_type = startup_type; - return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE, + return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE, 0, "attempting to start the TPM"); } @@ -505,7 +508,8 @@ int tpm_get_timeouts(struct tpm_chip *chip) tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; - rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL); + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0, + NULL); if (rc == TPM_ERR_INVALID_POSTINIT) { /* The TPM is not started, we are the first to talk to it. @@ -519,7 +523,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, - NULL); + 0, NULL); } if (rc) { dev_err(chip->pdev, @@ -580,7 +584,7 @@ duration: tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION; - rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0, "attempting to determine the durations"); if (rc) return rc; @@ -636,7 +640,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip) struct tpm_cmd_t cmd; cmd.header.in = continue_selftest_header; - rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, 0, "continue selftest"); return rc; } @@ -656,7 +660,7 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) cmd.header.in = pcrread_header; cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); - rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, 0, "attempting to read a pcr value"); if (rc == 0) @@ -754,7 +758,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) cmd.header.in = pcrextend_header; cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0, "attempting extend a PCR value"); tpm_chip_put(chip); @@ -793,7 +797,7 @@ int tpm_do_selftest(struct tpm_chip *chip) /* Attempt to read a PCR value */ cmd.header.in = pcrread_header; cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0); - rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE); + rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE, 0); /* Some buggy TPMs will not respond to tpm_tis_ready() for * around 300ms while the self test is ongoing, keep trying * until the self test duration expires. */ @@ -834,7 +838,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen) if (chip == NULL) return -ENODEV; - rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd"); + rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd"); tpm_chip_put(chip); return rc; @@ -936,14 +940,15 @@ int tpm_pm_suspend(struct device *dev) cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr); memcpy(cmd.params.pcrextend_in.hash, dummy_hash, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0, "extending dummy pcr before suspend"); } /* now do the actual savestate */ for (try = 0; try < TPM_RETRY; try++) { cmd.header.in = savestate_header; - rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL); + rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, 0, + NULL); /* * If the TPM indicates that it is too busy to respond to @@ -1027,8 +1032,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes); err = tpm_transmit_cmd(chip, &tpm_cmd, - TPM_GETRANDOM_RESULT_SIZE + num_bytes, - "attempting get random"); + TPM_GETRANDOM_RESULT_SIZE + num_bytes, + 0, "attempting get random"); if (err) break; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index ee66fd4673f3596084f75c9033ea24b223ae11d2..f880856aa75e5885dffef99676cdf9a0277eddac 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -39,7 +39,7 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, struct tpm_chip *chip = dev_get_drvdata(dev); tpm_cmd.header.in = tpm_readpubek_header; - err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, + err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0, "attempting to read the PUBEK"); if (err) goto out; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index a4257a32964f40c189f7a1dc54bd252efa48532a..2216861f89f108b089db387072c33bd7a689fa3e 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -498,11 +498,15 @@ extern struct class *tpm_class; extern dev_t tpm_devt; extern const struct file_operations tpm_fops; +enum tpm_transmit_flags { + TPM_TRANSMIT_UNLOCKED = BIT(0), +}; + +ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, + unsigned int flags); +ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len, + unsigned int flags, const char *desc); ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); -ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, - size_t bufsiz); -ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len, - const char *desc); extern int tpm_get_timeouts(struct tpm_chip *); extern void tpm_gen_interrupt(struct tpm_chip *); extern int tpm_do_selftest(struct tpm_chip *); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c12130485fc18e966c9e41736e57bd7fd0586082..cb7e4f6b70ba7afc2608bd118c25204b7643663e 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -264,7 +264,7 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) sizeof(cmd.params.pcrread_in.pcr_select)); cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7); - rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "attempting to read a pcr value"); if (rc == 0) { buf = cmd.params.pcrread_out.digest; @@ -312,7 +312,7 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash) cmd.params.pcrextend_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1); memcpy(cmd.params.pcrextend_in.digest, hash, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "attempting extend a PCR value"); return rc; @@ -358,7 +358,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max) cmd.header.in = tpm2_getrandom_header; cmd.params.getrandom_in.size = cpu_to_be16(num_bytes); - err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "attempting get random"); if (err) break; @@ -416,12 +416,12 @@ static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle, } /** - * tpm2_seal_trusted() - seal a trusted key - * @chip_num: A specific chip number for the request or TPM_ANY_NUM - * @options: authentication values and other options + * tpm2_seal_trusted() - seal the payload of a trusted key + * @chip_num: TPM chip to use * @payload: the key data in clear and encrypted form + * @options: authentication values and other options * - * Returns < 0 on error and 0 on success. + * Return: < 0 on error and 0 on success. */ int tpm2_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, @@ -472,7 +472,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip, goto out; } - rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "sealing data"); + rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 0, "sealing data"); if (rc) goto out; @@ -494,10 +494,18 @@ out: return rc; } -static int tpm2_load(struct tpm_chip *chip, - struct trusted_key_payload *payload, - struct trusted_key_options *options, - u32 *blob_handle) +/** + * tpm2_load_cmd() - execute a TPM2_Load command + * @chip_num: TPM chip to use + * @payload: the key data in clear and encrypted form + * @options: authentication values and other options + * + * Return: same as with tpm_transmit_cmd + */ +static int tpm2_load_cmd(struct tpm_chip *chip, + struct trusted_key_payload *payload, + struct trusted_key_options *options, + u32 *blob_handle, unsigned int flags) { struct tpm_buf buf; unsigned int private_len; @@ -532,7 +540,7 @@ static int tpm2_load(struct tpm_chip *chip, goto out; } - rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "loading blob"); + rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "loading blob"); if (!rc) *blob_handle = be32_to_cpup( (__be32 *) &buf.data[TPM_HEADER_SIZE]); @@ -546,7 +554,16 @@ out: return rc; } -static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) +/** + * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command + * @chip_num: TPM chip to use + * @payload: the key data in clear and encrypted form + * @options: authentication values and other options + * + * Return: same as with tpm_transmit_cmd + */ +static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, + unsigned int flags) { struct tpm_buf buf; int rc; @@ -560,7 +577,8 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) tpm_buf_append_u32(&buf, handle); - rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context"); + rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, + "flushing context"); if (rc) dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle, rc); @@ -568,10 +586,18 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) tpm_buf_destroy(&buf); } -static int tpm2_unseal(struct tpm_chip *chip, - struct trusted_key_payload *payload, - struct trusted_key_options *options, - u32 blob_handle) +/** + * tpm2_unseal_cmd() - execute a TPM2_Unload command + * @chip_num: TPM chip to use + * @payload: the key data in clear and encrypted form + * @options: authentication values and other options + * + * Return: same as with tpm_transmit_cmd + */ +static int tpm2_unseal_cmd(struct tpm_chip *chip, + struct trusted_key_payload *payload, + struct trusted_key_options *options, + u32 blob_handle, unsigned int flags) { struct tpm_buf buf; u16 data_len; @@ -589,7 +615,7 @@ static int tpm2_unseal(struct tpm_chip *chip, options->blobauth /* hmac */, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "unsealing"); + rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "unsealing"); if (rc > 0) rc = -EPERM; @@ -608,12 +634,12 @@ static int tpm2_unseal(struct tpm_chip *chip, } /** - * tpm_unseal_trusted() - unseal a trusted key - * @chip_num: A specific chip number for the request or TPM_ANY_NUM - * @options: authentication values and other options + * tpm_unseal_trusted() - unseal the payload of a trusted key + * @chip_num: TPM chip to use * @payload: the key data in clear and encrypted form + * @options: authentication values and other options * - * Returns < 0 on error and 0 on success. + * Return: < 0 on error and 0 on success. */ int tpm2_unseal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, @@ -622,14 +648,17 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, u32 blob_handle; int rc; - rc = tpm2_load(chip, payload, options, &blob_handle); + mutex_lock(&chip->tpm_mutex); + rc = tpm2_load_cmd(chip, payload, options, &blob_handle, + TPM_TRANSMIT_UNLOCKED); if (rc) - return rc; - - rc = tpm2_unseal(chip, payload, options, blob_handle); - - tpm2_flush_context(chip, blob_handle); + goto out; + rc = tpm2_unseal_cmd(chip, payload, options, blob_handle, + TPM_TRANSMIT_UNLOCKED); + tpm2_flush_context_cmd(chip, blob_handle, TPM_TRANSMIT_UNLOCKED); +out: + mutex_unlock(&chip->tpm_mutex); return rc; } @@ -655,9 +684,9 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id); cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1); - rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc); + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, desc); if (!rc) - *value = cmd.params.get_tpm_pt_out.value; + *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value); return rc; } @@ -689,7 +718,7 @@ int tpm2_startup(struct tpm_chip *chip, u16 startup_type) cmd.header.in = tpm2_startup_header; cmd.params.startup_in.startup_type = cpu_to_be16(startup_type); - return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), + return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "attempting to start the TPM"); } EXPORT_SYMBOL_GPL(tpm2_startup); @@ -718,7 +747,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) cmd.header.in = tpm2_shutdown_header; cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type); - rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), "stopping the TPM"); + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "stopping the TPM"); /* In places where shutdown command is sent there's no much we can do * except print the error code on a system failure. @@ -784,7 +813,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) cmd.header.in = tpm2_selftest_header; cmd.params.selftest_in.full_test = full; - rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, + rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, "continue selftest"); /* At least some prototype chips seem to give RC_TESTING error @@ -836,7 +865,7 @@ int tpm2_do_selftest(struct tpm_chip *chip) cmd.params.pcrread_in.pcr_select[1] = 0x00; cmd.params.pcrread_in.pcr_select[2] = 0x00; - rc = tpm_transmit_cmd(chip, (u8 *) &cmd, sizeof(cmd), NULL); + rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, NULL); if (rc < 0) break; @@ -885,7 +914,7 @@ int tpm2_probe(struct tpm_chip *chip) cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100); cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1); - rc = tpm_transmit(chip, (const char *) &cmd, sizeof(cmd)); + rc = tpm_transmit(chip, (const u8 *)&cmd, sizeof(cmd), 0); if (rc < 0) return rc; else if (rc < TPM_HEADER_SIZE) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 61e64293b7654b36d5db6b320d1bc78100379eb3..2b21398c3adcfc4bd5aaa81f7fcc6a980b75834e 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -149,6 +149,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) struct crb_priv *priv = chip->vendor.priv; int rc = 0; + /* Zero the cancel register so that the next command will not get + * canceled. + */ + iowrite32(0, &priv->cca->cancel); + if (len > le32_to_cpu(ioread32(&priv->cca->cmd_size))) { dev_err(&chip->dev, "invalid command count value %x %zx\n", @@ -182,8 +187,6 @@ static void crb_cancel(struct tpm_chip *chip) if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip)) dev_err(&chip->dev, "ACPI Start failed\n"); - - iowrite32(0, &priv->cca->cancel); } static bool crb_req_canceled(struct tpm_chip *chip, u8 status) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index d2406fe2553396a807a169ddd93b656a4876a537..090183f812beb18dee9b88bbeff7da41801dcf40 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port) spin_lock_irq(&port->inbuf_lock); /* Remove unused data this port might have received. */ discard_port_data(port); + spin_unlock_irq(&port->inbuf_lock); /* Remove buffers we queued up for the Host to send us data in. */ - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf, true); - spin_unlock_irq(&port->inbuf_lock); + do { + spin_lock_irq(&port->inbuf_lock); + buf = virtqueue_detach_unused_buf(port->in_vq); + spin_unlock_irq(&port->inbuf_lock); + if (buf) + free_buf(buf, true); + } while (buf); spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); /* Free pending buffers from the out-queue. */ - while ((buf = virtqueue_detach_unused_buf(port->out_vq))) - free_buf(buf, true); - spin_unlock_irq(&port->outvq_lock); + do { + spin_lock_irq(&port->outvq_lock); + buf = virtqueue_detach_unused_buf(port->out_vq); + spin_unlock_irq(&port->outvq_lock); + if (buf) + free_buf(buf, true); + } while (buf); } /* diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 0c83ffc22dd239aeea20f157883e72978968167a..f45d923ed3cde9f253add27ad563ca6070fd1c3d 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -354,7 +354,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, /* if read only, just return current value */ if (divider->flags & CLK_DIVIDER_READ_ONLY) { - bestdiv = readl(divider->reg) >> divider->shift; + bestdiv = clk_readl(divider->reg) >> divider->shift; bestdiv &= div_mask(divider->width); bestdiv = _get_div(divider->table, bestdiv, divider->flags, divider->width); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 7bc1c4527ae48d0baad2ebb4e141385e695ff8f4..a5070f9cb0d4aa2797d614150a320046dbf1b42d 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg, struct mux_hwclock *hwc, const struct clk_ops *ops, unsigned long min_rate, + unsigned long max_rate, unsigned long pct80_rate, const char *fmt, int idx) { @@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg, continue; if (rate < min_rate) continue; + if (rate > max_rate) + continue; parent_names[j] = div->name; hwc->parent_to_clksel[j] = i; @@ -759,14 +762,18 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) struct mux_hwclock *hwc; const struct clockgen_pll_div *div; unsigned long plat_rate, min_rate; - u64 pct80_rate; + u64 max_rate, pct80_rate; u32 clksel; hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); if (!hwc) return NULL; - hwc->reg = cg->regs + 0x20 * idx; + if (cg->info.flags & CG_VER3) + hwc->reg = cg->regs + 0x70000 + 0x20 * idx; + else + hwc->reg = cg->regs + 0x20 * idx; + hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; /* @@ -783,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) return NULL; } - pct80_rate = clk_get_rate(div->clk); - pct80_rate *= 8; + max_rate = clk_get_rate(div->clk); + pct80_rate = max_rate * 8; do_div(pct80_rate, 10); plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); @@ -794,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) else min_rate = plat_rate / 2; - return create_mux_common(cg, hwc, &cmux_ops, min_rate, + return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate, pct80_rate, "cg-cmux%d", idx); } @@ -809,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) hwc->reg = cg->regs + 0x20 * idx + 0x10; hwc->info = cg->info.hwaccel[idx]; - return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, + return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0, "cg-hwaccel%d", idx); } diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index 10224b01b97c5c6206372f36f9f81fa7ed861b56..b134a8b15e2c8ca95e0ae8aa1a95f2c958cc955d 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate, /* Set new divider */ data = xgene_clk_read(pclk->param.divider_reg + pclk->param.reg_divider_offset); - data &= ~((1 << pclk->param.reg_divider_width) - 1) - << pclk->param.reg_divider_shift; + data &= ~(((1 << pclk->param.reg_divider_width) - 1) + << pclk->param.reg_divider_shift); data |= divider; xgene_clk_write(data, pclk->param.divider_reg + pclk->param.reg_divider_offset); diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c index b0978d3b83e2dcdeb898dcf7233965dc9382b5c6..d302ed3b8225984498876724114cd415b514eb1d 100644 --- a/drivers/clk/imx/clk-imx35.c +++ b/drivers/clk/imx/clk-imx35.c @@ -115,7 +115,7 @@ static void __init _mx35_clocks_init(void) } clk[ckih] = imx_clk_fixed("ckih", 24000000); - clk[ckil] = imx_clk_fixed("ckih", 32768); + clk[ckil] = imx_clk_fixed("ckil", 32768); clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL); clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL); diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index c1935081d34aee3403d0ee5d6904b117bd5dfa12..aab64205d8663b8233a43491d4ce1cb722191ba1 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -550,6 +550,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]); + /* + * Initialize the GPU clock muxes, so that the maximum specified clock + * rates for the respective SoC are not exceeded. + */ + if (clk_on_imx6dl()) { + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], + clk[IMX6QDL_CLK_PLL2_PFD1_594M]); + clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], + clk[IMX6QDL_CLK_PLL2_PFD1_594M]); + } else if (clk_on_imx6q()) { + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], + clk[IMX6QDL_CLK_MMDC_CH0_AXI]); + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL], + clk[IMX6QDL_CLK_PLL2_PFD1_594M]); + clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], + clk[IMX6QDL_CLK_PLL3_USB_OTG]); + } + imx_register_uart_clocks(uart_clks); } CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init); diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 251533d87c6538f6ff2cb46bee025a2589a1df2f..f261b1d292c7494cae6d762799c19526c98d9a74 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np) } pxa_unit->apmu_base = of_iomap(np, 1); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apmu_base) { pr_err("failed to map apmu registers\n"); return; } diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c index 64eaf4141c69842a766f49a79a1b2698b6974e8a..427f4bb08665790ce5b08cb54a946e9ff7008f1c 100644 --- a/drivers/clk/mmp/clk-of-pxa168.c +++ b/drivers/clk/mmp/clk-of-pxa168.c @@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np) } pxa_unit->apmu_base = of_iomap(np, 1); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apmu_base) { pr_err("failed to map apmu registers\n"); return; } diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c index 13d6173326a435ad46af652f27bfc20e32302e04..cdf5ba566d3b90b9d15ff8e361971a281c61c4a0 100644 --- a/drivers/clk/mmp/clk-of-pxa910.c +++ b/drivers/clk/mmp/clk-of-pxa910.c @@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np) } pxa_unit->apmu_base = of_iomap(np, 1); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apmu_base) { pr_err("failed to map apmu registers\n"); return; } @@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np) } pxa_unit->apbcp_base = of_iomap(np, 3); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apbcp_base) { pr_err("failed to map apbcp registers\n"); return; } diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 9e5c0b6f7a0ee81215782a4991f4bc89678ec02c..c9ba7f97eebe8aa003790090c9ad9e4928d00a65 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -513,7 +513,7 @@ static int clk_rcg2_enable(struct clk_hw *hw) * is always on while APPS is online. Therefore, the RCG can safely be * switched. */ - rate = clk_get_rate(hw->clk); + rate = rcg->current_freq; f = qcom_find_freq(rcg->freq_tbl, rate); if (!f) return -EINVAL; @@ -627,6 +627,9 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate) rcg->new_index, false); } + /* Update current frequency with the frequency requested. */ + rcg->current_freq = rate; + return ret; } diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 6f3719d73390fb64d9b7cc0ad83636f1efcbcf85..e84877a2caccf87759451070e1585c5af306fef5 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = { .set_next_event = sun4i_clkevt_next_event, }; +static void sun4i_timer_clear_interrupt(void) +{ + writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG); +} static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = (struct clock_event_device *)dev_id; - writel(0x1, timer_base + TIMER_IRQ_ST_REG); + sun4i_timer_clear_interrupt(); evt->event_handler(evt); return IRQ_HANDLED; @@ -193,6 +197,9 @@ static void __init sun4i_timer_init(struct device_node *node) /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); + /* clear timer0 interrupt */ + sun4i_timer_clear_interrupt(); + sun4i_clockevent.cpumask = cpu_possible_mask; sun4i_clockevent.irq = irq; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6e80e42982748fcd9695b694dd936141ba2ce353..7ff8b15a34225cbf74130d8dc10fa0c9a1e769d6 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -285,14 +285,14 @@ static void intel_pstate_hwp_set(void) int min, hw_min, max, hw_max, cpu, range, adj_range; u64 value, cap; - rdmsrl(MSR_HWP_CAPABILITIES, cap); - hw_min = HWP_LOWEST_PERF(cap); - hw_max = HWP_HIGHEST_PERF(cap); - range = hw_max - hw_min; - get_online_cpus(); for_each_online_cpu(cpu) { + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); + hw_min = HWP_LOWEST_PERF(cap); + hw_max = HWP_HIGHEST_PERF(cap); + range = hw_max - hw_min; + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); adj_range = limits->min_perf_pct * range / 100; min = hw_min + adj_range; diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index e342565e8715e95af6c76a80ca393d8fcfada66a..1855b9ee807f28fa0fb75212af944542f6d9f1ac 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -135,6 +135,7 @@ static int __init arm_idle_init(void) dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate cpuidle device\n"); + ret = -ENOMEM; goto out_fail; } dev->cpu = cpu; diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 823b7d98828473644e22b478c33c12c2941fa959..d8e571981c3586933d0b9d5d3174f92ed30780f4 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * Copyright (C) 2006-2007 Adam Belay * Copyright (C) 2009 Intel Corporation * @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -161,13 +162,13 @@ s32 msm_cpuidle_get_deep_idle_latency(void) void lpm_suspend_wake_time(uint64_t wakeup_time) { if (wakeup_time <= 0) { - suspend_wake_time = msm_pm_sleep_time_override; + suspend_wake_time = msm_pm_sleep_time_override * MSEC_PER_SEC; return; } if (msm_pm_sleep_time_override && (msm_pm_sleep_time_override < wakeup_time)) - suspend_wake_time = msm_pm_sleep_time_override; + suspend_wake_time = msm_pm_sleep_time_override * MSEC_PER_SEC; else suspend_wake_time = wakeup_time; } @@ -793,7 +794,7 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster, if (!suspend_wake_time) return ~0ULL; else - return USEC_PER_SEC * suspend_wake_time; + return USEC_PER_MSEC * suspend_wake_time; } cpumask_and(&online_cpus_in_cluster, @@ -1412,7 +1413,6 @@ unlock_and_return: } #if !defined(CONFIG_CPU_V7) -asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) { /* diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b3044219772cd7ac57e0bf2559eb7dea8caeca08..2cde3796cb827c8370d95d848c2d4c659256b472 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; + /* + * Check support for AES modes not available + * on LP devices. + */ + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) + if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; + t_alg = caam_alg_alloc(alg); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 2183a2e77641e0682ca113951430765bcbcca4fc..9cb3a0b715e231cdcd150fda81bac4701989ec3d 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -26,16 +26,13 @@ #include #include #include +#include #include #include #include #define IN_INTERRUPT in_interrupt() -#define GHASH_BLOCK_SIZE (16) -#define GHASH_DIGEST_SIZE (16) -#define GHASH_KEY_LEN (16) - void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], @@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx { static int p8_ghash_init_tfm(struct crypto_tfm *tfm) { - const char *alg; + const char *alg = "ghash-generic"; struct crypto_shash *fallback; struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR @@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash *) tfm)); - ctx->fallback = fallback; - shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) - + crypto_shash_descsize(fallback); + /* Check if the descsize defined in the algorithm is still enough. */ + if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) + + crypto_shash_descsize(fallback)) { + printk(KERN_ERR + "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", + alg, + shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), + crypto_shash_descsize(fallback)); + return -EINVAL; + } + ctx->fallback = fallback; return 0; } @@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, { struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); - if (keylen != GHASH_KEY_LEN) + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; preempt_disable(); @@ -215,7 +215,8 @@ struct shash_alg p8_ghash_alg = { .update = p8_ghash_update, .final = p8_ghash_final, .setkey = p8_ghash_setkey, - .descsize = sizeof(struct p8_ghash_desc_ctx), + .descsize = sizeof(struct p8_ghash_desc_ctx) + + sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e44a1bfb02504caaf51e23b8bf57af88bc3a8d1e..66c073fc8afc6e18809b9e429f783781a199f46c 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -864,8 +864,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, * access. Hopefully we can access DDR through both ports (at least on * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. + * ERRATA: Even if useless for memory transfers, the PERID has to not + * match the one of another channel. If not, it could lead to spurious + * flag status. */ - u32 chan_cc = AT_XDMAC_CC_DIF(0) + u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) + | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) | AT_XDMAC_CC_MBSIZE_SIXTEEN | AT_XDMAC_CC_TYPE_MEM_TRAN; @@ -1042,8 +1046,12 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * access DDR through both ports (at least on SAMA5D4x), so we can use * the same interface for source and dest, that solves the fact we * don't know the direction. + * ERRATA: Even if useless for memory transfers, the PERID has to not + * match the one of another channel. If not, it could lead to spurious + * flag status. */ - u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM + u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) + | AT_XDMAC_CC_DAM_INCREMENTED_AM | AT_XDMAC_CC_SAM_INCREMENTED_AM | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) @@ -1144,8 +1152,12 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, * access. Hopefully we can access DDR through both ports (at least on * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. + * ERRATA: Even if useless for memory transfers, the PERID has to not + * match the one of another channel. If not, it could lead to spurious + * flag status. */ - u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM + u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) + | AT_XDMAC_CC_DAM_UBS_AM | AT_XDMAC_CC_SAM_INCREMENTED_AM | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) @@ -1183,8 +1195,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; @@ -2055,7 +2067,7 @@ err_dma_unregister: err_clk_disable: clk_disable_unprepare(atxdmac->clk); err_free_irq: - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); return ret; } @@ -2071,7 +2083,7 @@ static int at_xdmac_remove(struct platform_device *pdev) synchronize_irq(atxdmac->irq); - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); for (i = 0; i < atxdmac->dma.chancnt; i++) { struct at_xdmac_chan *atchan = &atxdmac->chan[i]; diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 2bf37e68ad0f1529e472a141031d56586ac21e21..dd184b50e5b40a508c1bdacee874b6464d2b1055 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc) raw_spin_unlock(&bank_lock); while ((line = ffs(status))) { struct ipu_irq_map *map; - unsigned int irq = NO_IRQ; + unsigned int irq; line--; status &= ~(1UL << line); raw_spin_lock(&bank_lock); map = src2map(32 * i + line); - if (map) - irq = map->irq; - raw_spin_unlock(&bank_lock); - if (!map) { + raw_spin_unlock(&bank_lock); pr_err("IPU: Interrupt on unmapped source %u bank %d\n", line, i); continue; } + irq = map->irq; + raw_spin_unlock(&bank_lock); generic_handle_irq(irq); } } diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index f4ea80d602f73bd6bc5adac2a32bbbe2b58de716..b9d2f76a0cf7a036a2794436db34887bf75590fe 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -73,13 +73,13 @@ struct rfc2734_header { #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) -#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) +#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1) #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) -#define fwnet_set_hdr_lf(lf) ((lf) << 30) +#define fwnet_set_hdr_lf(lf) ((lf) << 30) #define fwnet_set_hdr_ether_type(et) (et) -#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) +#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16) #define fwnet_set_hdr_fg_off(fgo) (fgo) #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) @@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, int retval; u16 ether_type; + if (len <= RFC2374_UNFRAG_HDR_SIZE) + return 0; + hdr.w0 = be32_to_cpu(buf[0]); lf = fwnet_get_hdr_lf(&hdr); if (lf == RFC2374_HDR_UNFRAG) { @@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, return fwnet_finish_incoming_packet(net, skb, source_node_id, is_broadcast, ether_type); } + /* A datagram fragment has been received, now the fun begins. */ + + if (len <= RFC2374_FRAG_HDR_SIZE) + return 0; + hdr.w1 = ntohl(buf[1]); buf += 2; len -= RFC2374_FRAG_HDR_SIZE; @@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, fg_off = fwnet_get_hdr_fg_off(&hdr); } datagram_label = fwnet_get_hdr_dgl(&hdr); - dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ + dg_size = fwnet_get_hdr_dg_size(&hdr); + + if (fg_off + len > dg_size) + return 0; spin_lock_irqsave(&dev->lock, flags); @@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, fw_send_response(card, r, rcode); } +static int gasp_source_id(__be32 *p) +{ + return be32_to_cpu(p[0]) >> 16; +} + +static u32 gasp_specifier_id(__be32 *p) +{ + return (be32_to_cpu(p[0]) & 0xffff) << 8 | + (be32_to_cpu(p[1]) & 0xff000000) >> 24; +} + +static u32 gasp_version(__be32 *p) +{ + return be32_to_cpu(p[1]) & 0xffffff; +} + static void fwnet_receive_broadcast(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { @@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, __be32 *buf_ptr; int retval; u32 length; - u16 source_node_id; - u32 specifier_id; - u32 ver; unsigned long offset; unsigned long flags; @@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, spin_unlock_irqrestore(&dev->lock, flags); - specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 - | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; - ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; - source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; - - if (specifier_id == IANA_SPECIFIER_ID && - (ver == RFC2734_SW_VERSION + if (length > IEEE1394_GASP_HDR_SIZE && + gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && + (gasp_version(buf_ptr) == RFC2734_SW_VERSION #if IS_ENABLED(CONFIG_IPV6) - || ver == RFC3146_SW_VERSION + || gasp_version(buf_ptr) == RFC3146_SW_VERSION #endif - )) { - buf_ptr += 2; - length -= IEEE1394_GASP_HDR_SIZE; - fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, + )) + fwnet_incoming_packet(dev, buf_ptr + 2, + length - IEEE1394_GASP_HDR_SIZE, + gasp_source_id(buf_ptr), context->card->generation, true); - } packet.payload_length = dev->rcv_buffer_size; packet.interrupt = 1; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 9411b1ab2c76873b294529be10caa2be292234c0..68489ef7422f38e1826758bc9f5647b1f41f15ed 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -173,6 +173,9 @@ config QCOM_SCM_64 def_bool y depends on QCOM_SCM && ARM64 +config HAVE_ARM_SMCCC + bool + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index ec379a4164cc07474fd5f2c193cbe6437df0e6fd..f292917b00e7142425fd6676ee48bbd3050b8946 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -18,3 +18,6 @@ obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o obj-$(CONFIG_EFI_STUB) += libstub/ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o + +arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o +obj-$(CONFIG_ARM64) += $(arm-obj-y) diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c new file mode 100644 index 0000000000000000000000000000000000000000..9e15d571b53c2bd759e1b0e6c0d0bda0795740a9 --- /dev/null +++ b/drivers/firmware/efi/arm-init.c @@ -0,0 +1,209 @@ +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013 - 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include + +#include + +struct efi_memory_map memmap; + +u64 efi_system_table; + +static int __init is_normal_ram(efi_memory_desc_t *md) +{ + if (md->attribute & EFI_MEMORY_WB) + return 1; + return 0; +} + +/* + * Translate a EFI virtual address into a physical address: this is necessary, + * as some data members of the EFI system table are virtually remapped after + * SetVirtualAddressMap() has been called. + */ +static phys_addr_t efi_to_phys(unsigned long addr) +{ + efi_memory_desc_t *md; + + for_each_efi_memory_desc(&memmap, md) { + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + /* no virtual mapping has been installed by the stub */ + break; + if (md->virt_addr <= addr && + (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) + return md->phys_addr + addr - md->virt_addr; + } + return addr; +} + +static int __init uefi_init(void) +{ + efi_char16_t *c16; + void *config_tables; + size_t table_size; + char vendor[100] = "unknown"; + int i, retval; + + efi.systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (efi.systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + + /* + * Verify the EFI Table + */ + if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + retval = -EINVAL; + goto out; + } + if ((efi.systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff); + + /* Show what we know for posterity */ + c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), + sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } + + pr_info("EFI v%u.%.02u by %s\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff, vendor); + + table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; + config_tables = early_memremap(efi_to_phys(efi.systab->tables), + table_size); + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } + retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, + sizeof(efi_config_table_t), NULL); + + early_memunmap(config_tables, table_size); +out: + early_memunmap(efi.systab, sizeof(efi_system_table_t)); + return retval; +} + +/* + * Return true for RAM regions we want to permanently reserve. + */ +static __init int is_reserve_region(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + return 0; + default: + break; + } + return is_normal_ram(md); +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (efi_enabled(EFI_DBG)) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(&memmap, md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (efi_enabled(EFI_DBG)) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + efi_md_typeattr_format(buf, sizeof(buf), md)); + } + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_normal_ram(md)) + early_init_dt_add_memory_arch(paddr, size); + + if (is_reserve_region(md)) { + memblock_mark_nomap(paddr, size); + if (efi_enabled(EFI_DBG)) + pr_cont("*"); + } + + if (efi_enabled(EFI_DBG)) + pr_cont("\n"); + } + + set_bit(EFI_MEMMAP, &efi.flags); +} + +void __init efi_init(void) +{ + struct efi_fdt_params params; + + /* Grab UEFI information placed in FDT by stub */ + if (!efi_get_fdt_params(¶ms)) + return; + + efi_system_table = params.system_table; + + memmap.phys_map = params.mmap; + memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } + memmap.map_end = memmap.map + params.mmap_size; + memmap.desc_size = params.desc_size; + memmap.desc_version = params.desc_ver; + + if (uefi_init() < 0) + return; + + reserve_regions(); + early_memunmap(memmap.map, params.mmap_size); + memblock_mark_nomap(params.mmap & PAGE_MASK, + PAGE_ALIGN(params.mmap_size + + (params.mmap & ~PAGE_MASK))); +} diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c new file mode 100644 index 0000000000000000000000000000000000000000..6ae21e41a429402d132d8d5be215d5adde64d08c --- /dev/null +++ b/drivers/firmware/efi/arm-runtime.c @@ -0,0 +1,135 @@ +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +extern u64 efi_system_table; + +static struct mm_struct efi_mm = { + .mm_rb = RB_ROOT, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), + .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), + .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), +}; + +static bool __init efi_virtmap_init(void) +{ + efi_memory_desc_t *md; + + efi_mm.pgd = pgd_alloc(&efi_mm); + init_new_context(NULL, &efi_mm); + + for_each_efi_memory_desc(&memmap, md) { + phys_addr_t phys = md->phys_addr; + int ret; + + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + return false; + + ret = efi_create_mapping(&efi_mm, md); + if (!ret) { + pr_info(" EFI remap %pa => %p\n", + &phys, (void *)(unsigned long)md->virt_addr); + } else { + pr_warn(" EFI remap %pa: failed to create mapping (%d)\n", + &phys, ret); + return false; + } + } + return true; +} + +/* + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. + */ +static int __init arm_enable_runtime_services(void) +{ + u64 mapsize; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return 0; + } + + pr_info("Remapping and enabling EFI services.\n"); + + mapsize = memmap.map_end - memmap.map; + memmap.map = (__force void *)ioremap_cache(memmap.phys_map, + mapsize); + if (!memmap.map) { + pr_err("Failed to remap EFI memory map\n"); + return -ENOMEM; + } + memmap.map_end = memmap.map + mapsize; + efi.memmap = &memmap; + + efi.systab = (__force void *)ioremap_cache(efi_system_table, + sizeof(efi_system_table_t)); + if (!efi.systab) { + pr_err("Failed to remap EFI System Table\n"); + return -ENOMEM; + } + set_bit(EFI_SYSTEM_TABLES, &efi.flags); + + if (!efi_virtmap_init()) { + pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); + return -ENOMEM; + } + + /* Set up runtime services function pointers */ + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + efi.runtime_version = efi.systab->hdr.revision; + + return 0; +} +early_initcall(arm_enable_runtime_services); + +void efi_virtmap_load(void) +{ + preempt_disable(); + efi_set_pgd(&efi_mm); +} + +void efi_virtmap_unload(void) +{ + efi_set_pgd(current->active_mm); + preempt_enable(); +} diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 3b52677f459ae24b2dd2dcd75b0c4fe95beeb64c..c51f3b2fe3c0868fe643ebe2b1d18554b61e1ca8 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -25,6 +25,8 @@ #include #include +#include + struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d24f35d74b27079afeae5c08d601c3bcd899dee6..af0060d6a22ab6d6002076a9001d6e7934702e58 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -13,6 +13,8 @@ #define pr_fmt(fmt) "psci: " fmt +#include +#include #include #include #include @@ -20,10 +22,12 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -58,8 +62,6 @@ struct psci_operations psci_ops; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); -asmlinkage psci_fn __invoke_psci_fn_hvc; -asmlinkage psci_fn __invoke_psci_fn_smc; static psci_fn *invoke_psci_fn; enum psci_function { @@ -225,6 +227,119 @@ static int __init psci_features(u32 psci_func_id) psci_func_id, 0, 0); } +#ifdef CONFIG_CPU_IDLE +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); + +static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +{ + int i, ret, count = 0; + u32 *psci_states; + struct device_node *state_node; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 state; + + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + + ret = of_property_read_u32(state_node, + "arm,psci-suspend-param", + &state); + if (ret) { + pr_warn(" * %s missing arm,psci-suspend-param property\n", + state_node->full_name); + of_node_put(state_node); + goto free_mem; + } + + of_node_put(state_node); + pr_debug("psci-power-state %#x index %d\n", state, i); + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + ret = -EINVAL; + goto free_mem; + } + psci_states[i] = state; + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + +int psci_cpu_init_idle(unsigned int cpu) +{ + struct device_node *cpu_node; + int ret; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (!cpu_node) + return -ENODEV; + + ret = psci_dt_cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} + +static int psci_suspend_finisher(unsigned long state_id) +{ + return psci_ops.cpu_suspend(state_id, virt_to_phys(cpu_resume)); +} + +int psci_cpu_suspend_enter(unsigned long state_id) +{ + int ret; + /* + * idle state_id 0 corresponds to wfi, should never be called + * from the cpu_suspend operations + */ + if (WARN_ON_ONCE(!state_id)) + return -EINVAL; + + if (!psci_power_state_loses_context(state_id)) + ret = psci_ops.cpu_suspend(state_id, 0); + else + ret = cpu_suspend(state_id, psci_suspend_finisher); + + return ret; +} + +/* ARM specific CPU idle operations */ +#ifdef CONFIG_ARM +static struct cpuidle_ops psci_cpuidle_ops __initdata = { + .suspend = psci_cpu_suspend_enter, + .init = psci_dt_cpu_init_idle, +}; + +CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); +#endif +#endif + static int psci_system_suspend(unsigned long unused) { return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 48ef368347ab2cd63cdb1d4f922572c608af9e89..9e02cb6afb0bb15872581684d0c806dff4596c51 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -329,7 +329,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_data(irq, h->host_data); - irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); + irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq); return 0; } diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 990fa9023e2228a82d1138cb8f25bfeb75dab23d..3b6bce0518abfcf8a127dca1428bb69ebde52869 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c @@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d, { irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, handle_edge_irq); - irq_set_noprobe(irq); + irq_set_probe(irq); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ff5566c69f7d2a7840d188ad8d69e48f49442fa2..e8e962f7b5cbcdca13a782a09f9612e7f0de4d0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -532,6 +532,7 @@ struct amdgpu_bo { u64 metadata_flags; void *metadata; u32 metadata_size; + unsigned prime_shared_count; /* list of all virtual address to which this bo * is associated to */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f82a2dd83874dea20c7e7b2a6ddf8aee74e0fe1d..3c7a7235988ddcd3cc752c7869d9d78381775573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -117,7 +117,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; } entry->tv.bo = &entry->robj->tbo; - entry->tv.shared = true; + entry->tv.shared = !entry->robj->prime_shared_count; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index fe36caf1b7d7b084762d19fb7fa6516a42dd3fa3..14f57d9915e3fc0aa8d5a8e77b734073f05ecb9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, printk("\n"); } + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / - amdgpu_crtc->hw_mode.clock; - vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - + vblank_in_pixels = + amdgpu_crtc->hw_mode.crtc_htotal * + (amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + - (amdgpu_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + (amdgpu_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 4488e82f87b01475a43a789280c6f809452530df..a5c824078472637aad30ce3c13f3e879a539c7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -227,7 +227,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 59f735a933a939480e4e000190fe5fc042b1b395..e6a7d30c37472d778527cdf0a9795b5d610c7e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -77,20 +77,36 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, list_add_tail(&bo->list, &adev->gem.objects); mutex_unlock(&adev->gem.mutex); + bo->prime_shared_count = 1; return &bo->gem_base; } int amdgpu_gem_prime_pin(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int ret = 0; + long ret = 0; ret = amdgpu_bo_reserve(bo, false); if (unlikely(ret != 0)) return ret; + /* + * Wait for all shared fences to complete before we switch to future + * use of exclusive fence on this prime shared bo. + */ + ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (unlikely(ret < 0)) { + DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); + amdgpu_bo_unreserve(bo); + return ret; + } + /* pin buffer into GTT */ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); + if (likely(ret == 0)) + bo->prime_shared_count++; + amdgpu_bo_unreserve(bo); return ret; } @@ -105,6 +121,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) return; amdgpu_bo_unpin(bo); + if (bo->prime_shared_count) + bo->prime_shared_count--; amdgpu_bo_unreserve(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 21aacc1f45c1fd7afded7f6088c8a365e9005224..7f85c2c1d68156a4d91bd4b18332df6f1886fb7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned lane_num, i, max_pix_clock; - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { - max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_NUTMEG) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + max_pix_clock = (lane_num * 270000 * 8) / bpp; if (max_pix_clock >= pix_clock) { *dp_lanes = lane_num; - *dp_rate = link_rates[i]; + *dp_rate = 270000; return 0; } } + } else { + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; + *dp_rate = link_rates[i]; + return 0; + } + } + } } return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4dcc8fba579203297ece7849ab8a861ecbe33f10..5b261adb4b6936cdcb8d7e154151d2ae1ffd3dbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -419,16 +419,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } - switch (amdgpu_connector->hpd.hpd) { case AMDGPU_HPD_1: idx = 0; @@ -452,6 +442,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) continue; } + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); + continue; + } + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 8f1e51128b33882d5680731b9afb08fcf1b1023c..c161eeda417bdf9aeb7559b1860b1349b985c48d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -409,16 +409,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } - switch (amdgpu_connector->hpd.hpd) { case AMDGPU_HPD_1: idx = 0; @@ -442,6 +432,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) continue; } + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); + continue; + } + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); @@ -3030,6 +3033,7 @@ static int dce_v11_0_sw_fini(void *handle) dce_v11_0_afmt_fini(adev); + drm_mode_config_cleanup(adev->ddev); adev->mode_info.mode_config_initialized = false; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 42d954dc436d03206c48aafe7655966870dfc931..9b4dcf76ce6c4b14ef727230fb13fd373be340c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -392,15 +392,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } switch (amdgpu_connector->hpd.hpd) { case AMDGPU_HPD_1: WREG32(mmDC_HPD1_CONTROL, tmp); @@ -423,6 +414,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) default: break; } + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; + break; + case AMDGPU_HPD_2: + dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; + break; + case AMDGPU_HPD_3: + dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; + break; + case AMDGPU_HPD_4: + dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; + break; + case AMDGPU_HPD_5: + dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; + break; + case AMDGPU_HPD_6: + dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; + break; + default: + continue; + } + + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + continue; + } + dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 59d1269626b15f17843ee3b4351c62de07173b3a..e231176cb66b073031fa53ffa799617defd32e1d 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -316,19 +316,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, u32 *coeff_tab = heo_upscaling_ycoef; u32 max_memsize; - if (state->crtc_w < state->src_w) + if (state->crtc_h < state->src_h) coeff_tab = heo_downscaling_ycoef; for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) atmel_hlcdc_layer_update_cfg(&plane->layer, 33 + i, 0xffffffff, coeff_tab[i]); - factor = ((8 * 256 * state->src_w) - (256 * 4)) / - state->crtc_w; + factor = ((8 * 256 * state->src_h) - (256 * 4)) / + state->crtc_h; factor++; - max_memsize = ((factor * state->crtc_w) + (256 * 4)) / + max_memsize = ((factor * state->crtc_h) + (256 * 4)) / 2048; - if (max_memsize > state->src_w) + if (max_memsize > state->src_h) factor--; factor_reg |= (factor << 16) | 0x80000000; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 2485fb652716852f601a342596648af78d1a273b..7cb2815e815e156aa9db7c89f164af42e96aef35 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -909,6 +909,7 @@ static void drm_dp_destroy_port(struct kref *kref) /* no need to clean up vcpi * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); + port->pdt = DP_PEER_DEVICE_NONE; } kfree(port); } @@ -1154,7 +1155,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_put_port(port); goto out; } - if (port->port_num >= DP_MST_LOGICAL_PORT_0) { + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_mode_connector_set_tile_property(port->connector); } @@ -2872,6 +2875,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) mgr->cbs->destroy_connector(mgr, port->connector); drm_dp_port_teardown_pdt(port, port->pdt); + port->pdt = DP_PEER_DEVICE_NONE; if (!port->input && port->vcpi.vcpi > 0) { drm_dp_mst_reset_vcpi_slots(mgr, port); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 57676f8d7ecfe70c54fd79a3d714f196ff1406a5..a6289752be16d20423e3439af521db5507854bd0 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, return 0; } +#if defined(CONFIG_X86) || defined(CONFIG_IA64) typedef struct drm_mode_fb_cmd232 { u32 fb_id; u32 width; @@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, return 0; } +#endif static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, @@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw, #endif [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, +#if defined(CONFIG_X86) || defined(CONFIG_IA64) [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2, +#endif }; /** diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 9f935f55d74c32561300f9a59517c19fb6293e0d..968b31f39884865320a74913f8a66c37224d274e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -339,14 +339,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { * using the PRIME helpers. */ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags) + struct drm_gem_object *obj, + int flags) { - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &drm_gem_prime_dmabuf_ops; - exp_info.size = obj->size; - exp_info.flags = flags; - exp_info.priv = obj; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, /* white lie for debug */ + .owner = dev->driver->fops->owner, + .ops = &drm_gem_prime_dmabuf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + }; if (dev->driver->gem_prime_res_obj) exp_info.resv = dev->driver->gem_prime_res_obj(obj); diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 7f55ba6771c6b94e5f45bee6bdec078c27c74f5b..011211e4167d41dce17d56e8baee0b168b37e7b6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) return 0; err: - list_for_each_entry_reverse(subdrv, &subdrv->list, list) { + list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) { if (subdrv->close) subdrv->close(dev, subdrv->dev, file); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 17cea400ae32f94b015cd3a493824806cee37a18..d3de377dc857e30f2c0c86afae3613033bb4e143 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. */ -static int __deprecated +static int i2c_dp_aux_add_bus(struct i2c_adapter *adapter) { int error; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d400d6773bbbe449771a1b835d3310480a62004d..fb9f647bb5cd6fde9454ae9de9395bdc94b06b10 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2150,21 +2150,19 @@ struct drm_i915_gem_object { /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; - union { - /** for phy allocated objects */ - struct drm_dma_handle *phys_handle; - - struct i915_gem_userptr { - uintptr_t ptr; - unsigned read_only :1; - unsigned workers :4; + struct i915_gem_userptr { + uintptr_t ptr; + unsigned read_only :1; + unsigned workers :4; #define I915_GEM_USERPTR_MAX_WORKERS 15 - struct i915_mm_struct *mm; - struct i915_mmu_object *mmu_object; - struct work_struct *work; - } userptr; - }; + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; + struct work_struct *work; + } userptr; + + /** for phys allocated objects */ + struct drm_dma_handle *phys_handle; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 87e919a06b277fea6cfade79445a165bac925056..5d2323a40c25e25b318ab9539b0a48699c636ff7 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -108,17 +108,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) pci_read_config_dword(dev->pdev, 0x5c, &base); base &= ~((1<<20) - 1); } else if (IS_I865G(dev)) { + u32 tseg_size = 0; u16 toud = 0; + u8 tmp; + + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), + I845_ESMRAMC, &tmp); + + if (tmp & TSEG_ENABLE) { + switch (tmp & I845_TSEG_SIZE_MASK) { + case I845_TSEG_SIZE_512K: + tseg_size = KB(512); + break; + case I845_TSEG_SIZE_1M: + tseg_size = MB(1); + break; + } + } - /* - * FIXME is the graphics stolen memory region - * always at TOUD? Ie. is it always the last - * one to be allocated by the BIOS? - */ pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), I865_TOUD, &toud); - base = toud << 16; + base = (toud << 16) + tseg_size; } else if (IS_I85X(dev)) { u32 tseg_size = 0; u32 tom; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a3254c3bcc7c0b28bf2b6f707b8377e62dde1343..909d1d71d1302180eda82d706abfb013d51e3b94 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2950,13 +2950,13 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, } } -unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane) +u32 intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj, + unsigned int plane) { const struct i915_ggtt_view *view = &i915_ggtt_view_normal; struct i915_vma *vma; - unsigned char *offset; + u64 offset; if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) view = &i915_ggtt_view_rotated; @@ -2966,14 +2966,16 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, view->type)) return -1; - offset = (unsigned char *)vma->node.start; + offset = vma->node.start; if (plane == 1) { offset += vma->ggtt_view.rotation_info.uv_start_page * PAGE_SIZE; } - return (unsigned long)offset; + WARN_ON(upper_32_bits(offset)); + + return lower_32_bits(offset); } static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) @@ -3099,7 +3101,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, u32 tile_height, plane_offset, plane_size; unsigned int rotation; int x_offset, y_offset; - unsigned long surf_addr; + u32 surf_addr; struct intel_crtc_state *crtc_state = intel_crtc->config; struct intel_plane_state *plane_state; int src_x = 0, src_y = 0, src_w = 0, src_h = 0; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ebbd23407a80cb4a1228661a6cd22fd362539af2..0f8367da0663f23560dea3e0ff416ddf16f39029 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4648,7 +4648,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, * * Return %true if @port is connected, %false otherwise. */ -bool intel_digital_port_connected(struct drm_i915_private *dev_priv, +static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *port) { if (HAS_PCH_IBX(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 67f72a7ee7cbad151d91379bcd2622ab81df7620..722aa159cd28a3154e6bccb6e69b8c478b5bc21b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1177,9 +1177,9 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane); +u32 intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj, + unsigned int plane); u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_tiling(uint64_t fb_modifier); @@ -1231,8 +1231,6 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp); void intel_edp_drrs_invalidate(struct drm_device *dev, unsigned frontbuffer_bits); void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); -bool intel_digital_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *port); void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); /* intel_dp_mst.c */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index dff69fef47e0108c4a45b71ab67a85c3fb7c1aab..1ea8532f5ab2dae243623d12b9caae99b1a10175 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1331,19 +1331,18 @@ intel_hdmi_unset_edid(struct drm_connector *connector) } static bool -intel_hdmi_set_edid(struct drm_connector *connector, bool force) +intel_hdmi_set_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct edid *edid = NULL; + struct edid *edid; bool connected = false; intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - if (force) - edid = drm_get_edid(connector, - intel_gmbus_get_adapter(dev_priv, - intel_hdmi->ddc_bus)); + edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); @@ -1371,37 +1370,16 @@ static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { enum drm_connector_status status; - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = to_i915(connector->dev); - bool live_status = false; - unsigned int try; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - for (try = 0; !live_status && try < 9; try++) { - if (try) - msleep(10); - live_status = intel_digital_port_connected(dev_priv, - hdmi_to_dig_port(intel_hdmi)); - } - - if (!live_status) { - DRM_DEBUG_KMS("HDMI live status down\n"); - /* - * Live status register is not reliable on all intel platforms. - * So consider live_status only for certain platforms, for - * others, read EDID to determine presence of sink. - */ - if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) - live_status = true; - } - intel_hdmi_unset_edid(connector); - if (intel_hdmi_set_edid(connector, live_status)) { + if (intel_hdmi_set_edid(connector)) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; @@ -1427,7 +1405,7 @@ intel_hdmi_force(struct drm_connector *connector) if (connector->status != connector_status_connected) return; - intel_hdmi_set_edid(connector, true); + intel_hdmi_set_edid(connector); hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; } @@ -2019,6 +1997,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; } +static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[port]; + u8 ddc_pin; + + if (info->alternate_ddc_pin) { + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", + info->alternate_ddc_pin, port_name(port)); + return info->alternate_ddc_pin; + } + + switch (port) { + case PORT_B: + if (IS_BROXTON(dev_priv)) + ddc_pin = GMBUS_PIN_1_BXT; + else + ddc_pin = GMBUS_PIN_DPB; + break; + case PORT_C: + if (IS_BROXTON(dev_priv)) + ddc_pin = GMBUS_PIN_2_BXT; + else + ddc_pin = GMBUS_PIN_DPC; + break; + case PORT_D: + if (IS_CHERRYVIEW(dev_priv)) + ddc_pin = GMBUS_PIN_DPD_CHV; + else + ddc_pin = GMBUS_PIN_DPD; + break; + default: + MISSING_CASE(port); + ddc_pin = GMBUS_PIN_DPB; + break; + } + + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", + ddc_pin, port_name(port)); + + return ddc_pin; +} + void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector) { @@ -2028,7 +2050,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; - uint8_t alternate_ddc_pin; DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", port_name(port)); @@ -2041,12 +2062,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, connector->doublescan_allowed = 0; connector->stereo_allowed = 1; + intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); + switch (port) { case PORT_B: - if (IS_BROXTON(dev_priv)) - intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; - else - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; /* * On BXT A0/A1, sw needs to activate DDIA HPD logic and * interrupts to check the external panel connection. @@ -2057,46 +2076,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_encoder->hpd_pin = HPD_PORT_B; break; case PORT_C: - if (IS_BROXTON(dev_priv)) - intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT; - else - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; intel_encoder->hpd_pin = HPD_PORT_C; break; case PORT_D: - if (WARN_ON(IS_BROXTON(dev_priv))) - intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED; - else if (IS_CHERRYVIEW(dev_priv)) - intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV; - else - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; intel_encoder->hpd_pin = HPD_PORT_D; break; case PORT_E: - /* On SKL PORT E doesn't have seperate GMBUS pin - * We rely on VBT to set a proper alternate GMBUS pin. */ - alternate_ddc_pin = - dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin; - switch (alternate_ddc_pin) { - case DDC_PIN_B: - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; - break; - case DDC_PIN_C: - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; - break; - case DDC_PIN_D: - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; - break; - default: - MISSING_CASE(alternate_ddc_pin); - } intel_encoder->hpd_pin = HPD_PORT_E; break; - case PORT_A: - intel_encoder->hpd_pin = HPD_PORT_A; - /* Internal port only for eDP. */ default: - BUG(); + MISSING_CASE(port); + return; } if (IS_VALLEYVIEW(dev)) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1e851e037c292881d6d726bcbec5429f53c7e962..3f802163f7d47b0c59d300c204b48d1aa6233e08 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2096,33 +2096,35 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & GEN9_MEM_LATENCY_LEVEL_MASK; + /* + * If a level n (n > 1) has a 0us latency, all levels m (m >= n) + * need to be disabled. We make sure to sanitize the values out + * of the punit to satisfy this requirement. + */ + for (level = 1; level <= max_level; level++) { + if (wm[level] == 0) { + for (i = level + 1; i <= max_level; i++) + wm[i] = 0; + break; + } + } + /* * WaWmMemoryReadLatency:skl * * punit doesn't take into account the read latency so we need - * to add 2us to the various latency levels we retrieve from - * the punit. - * - W0 is a bit special in that it's the only level that - * can't be disabled if we want to have display working, so - * we always add 2us there. - * - For levels >=1, punit returns 0us latency when they are - * disabled, so we respect that and don't add 2us then - * - * Additionally, if a level n (n > 1) has a 0us latency, all - * levels m (m >= n) need to be disabled. We make sure to - * sanitize the values out of the punit to satisfy this - * requirement. + * to add 2us to the various latency levels we retrieve from the + * punit when level 0 response data us 0us. */ - wm[0] += 2; - for (level = 1; level <= max_level; level++) - if (wm[level] != 0) + if (wm[0] == 0) { + wm[0] += 2; + for (level = 1; level <= max_level; level++) { + if (wm[level] == 0) + break; wm[level] += 2; - else { - for (i = level + 1; i <= max_level; i++) - wm[i] = 0; - - break; } + } + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { uint64_t sskpd = I915_READ64(MCH_SSKPD); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 56dc132e8e20a092c2f61ce8f1f11d62580294e1..2cc6aa072f4c796d9d6099a831a3c54dd6373487 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -195,7 +195,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &to_intel_plane_state(drm_plane->state)->ckey; - unsigned long surf_addr; + u32 surf_addr; u32 tile_height, plane_offset, plane_size; unsigned int rotation; int x_offset, y_offset; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c index bfcc6408a77203a88de81b7d1938f558de1039ab..b7f4b826febedfc1f2fb4b7dbe6309c5f01007c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c @@ -36,7 +36,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie) { struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; + + mutex_lock(&chan->fifo->base.engine.subdev.mutex); nvkm_ramht_remove(imem->ramht, cookie); + mutex_unlock(&chan->fifo->base.engine.subdev.mutex); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c index 4bef72a9d106817fdf2e2091d971e60711c0d855..3fda594700e023f71d3cc5b885ef80232e4873d9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c @@ -59,9 +59,11 @@ static void nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom) { struct nvkm_device *device = pm->engine.subdev.device; - if (pm->sequence != pm->sequence) { + struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base); + + if (nv40pm->sequence != pm->sequence) { nvkm_wr32(device, 0x400084, 0x00000020); - pm->sequence = pm->sequence; + nv40pm->sequence = pm->sequence; } } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 56e1d633875e2772b6a3f767e3af44e013f3519d..6e6c76080d6a1bc78108388e496c1b4373f223ba 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, * correctly globaly, since that would require * tracking all of our palettes. */ ret = qxl_bo_kmap(palette_bo, (void **)&pal); + if (ret) + return ret; pal->num_ents = 2; pal->unique = unique++; if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 79bab6fd76bb92f81e051707f4049898e2a42fee..6755d4768f594796824019f900970f55021f9eee 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -275,6 +275,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + /* Make sure vblank interrupt is still enabled if needed */ + radeon_irq_set(rdev); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 44ee72e04df9e953bafe64cdfdadf2f01c1f9bce..b5760851195cfbcb0cfca10a88a154b38436c0b6 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector, unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned lane_num, i, max_pix_clock; - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { - max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_NUTMEG) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + max_pix_clock = (lane_num * 270000 * 8) / bpp; if (max_pix_clock >= pix_clock) { *dp_lanes = lane_num; - *dp_rate = link_rates[i]; + *dp_rate = 270000; return 0; } } + } else { + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; + *dp_rate = link_rates[i]; + return 0; + } + } + } } return -EINVAL; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 158872eb78e41ae94276b1abdad3fdce76ef463f..a3a321208fd88aaad3b5e7db27765e0130661de8 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev) void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl) { - u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; - - WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); + WREG32(SRBM_GFX_CNTL, RINGID(ring)); WREG32(CP_INT_CNTL, cp_int_cntl); } diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index fa2154493cf1537dee269149b1924468a6035325..470af4aa4a6aa252671f0f46e90fcb816745d68e 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) struct drm_device *dev = rdev->ddev; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / - radeon_crtc->hw_mode.clock; - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - - radeon_crtc->hw_mode.crtc_vdisplay + - (radeon_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + vblank_in_pixels = + radeon_crtc->hw_mode.crtc_htotal * + (radeon_crtc->hw_mode.crtc_vblank_end - + radeon_crtc->hw_mode.crtc_vdisplay + + (radeon_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock; break; } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e2dd5d19c32ca2ab0bd77375dfad45ae0311e41d..4aa2cbe4c85fae3212aae4012bf957e16d2d6f83 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -660,8 +660,9 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; - /* for pass through, always force asic_init */ - if (radeon_device_is_virtual()) + /* for pass through, always force asic_init for CI */ + if (rdev->family >= CHIP_BONAIRE && + radeon_device_is_virtual()) return false; /* required for EFI mode on macbook2,1 which uses an r5xx asic */ diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index db64e0062689b076842b9710c33e4660c96e9985..3b0c229d7dcd23ffb7184ad79e2ebaa8f001cca9 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg tmp &= AUX_HPD_SEL(0x7); tmp |= AUX_HPD_SEL(chan->rec.hpd); - tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); + tmp |= AUX_EN | AUX_LS_READ_EN; WREG32(AUX_CONTROL + aux_offset[instance], tmp); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 678b4386540d52ea22a5f2cb3b9e583f87232cfe..89f22bdde2985a76f09f2a4dbcce5a4050c2400a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); } drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + /* Make sure vblank interrupt is still enabled if needed */ + radeon_irq_set(rdev); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index caa73de584a53dd8ca9f26cdb6ca97d4f17c6b41..10191b935937c55ea125ace1a55d344bbe4bf146 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, int i; struct si_dpm_quirk *p = si_dpm_quirk_list; + /* limit all SI kickers */ + if (rdev->family == CHIP_PITCAIRN) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811) || + (rdev->pdev->device == 0x6816) || + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + max_mclk = 120000; + } else if (rdev->family == CHIP_VERDE) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6820) || + (rdev->pdev->device == 0x6821) || + (rdev->pdev->device == 0x6822) || + (rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682A) || + (rdev->pdev->device == 0x682B)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (rdev->family == CHIP_HAINAN) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6664) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)) { + max_sclk = 75000; + max_mclk = 80000; + } + } /* Apply dpm quirks */ while (p && p->chip_device != 0) { if (rdev->pdev->vendor == p->chip_vendor && @@ -3011,10 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, } ++p; } - /* limit mclk on all R7 370 parts for stability */ - if (rdev->pdev->device == 0x6811 && - rdev->pdev->revision == 0x81) - max_mclk = 120000; if (rps->vce_active) { rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; @@ -4106,7 +4145,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev, &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) { si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table); - table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h index 3c779838d9ab16ad13a0588b1f69ac323509d933..966e3a556011d641be34f822c6d7283d0cace9f3 100644 --- a/drivers/gpu/drm/radeon/sislands_smc.h +++ b/drivers/gpu/drm/radeon/sislands_smc.h @@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 +#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 #define SISLANDS_SMC_VOLTAGEMASK_MAX 4 struct SISLANDS_SMC_VOLTAGEMASKTABLE diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 4948c15298369cf4dc907b9aa07b2496441656dc..ecf15cf0c3fd49e0dbb83a16411901dd0e586281 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3830,14 +3830,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, int ret; *header = NULL; - if (!dev_priv->cman || kernel_commands) - return kernel_commands; - if (command_size > SVGA_CB_MAX_SIZE) { DRM_ERROR("Command buffer is too large.\n"); return ERR_PTR(-EINVAL); } + if (!dev_priv->cman || kernel_commands) + return kernel_commands; + /* If possible, add a little space for fencing. */ cmdbuf_size = command_size + 512; cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index b58391adf3abe4ca0f217a70ad93c153742248a5..87300096fbf1d19501d2bc77bcb63ad5ae22ee9d 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1167,7 +1167,7 @@ static const struct kgsl_hwcg_reg a512_hwcg_regs[] = { {A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, {A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, {A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, - {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555}, + {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, {A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, {A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, {A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 909ab0176ef2313a382309c7319c9920a6995cf1..e370306241658bf25976069768bc807334cdc0d0 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -168,6 +168,7 @@ #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 #define USB_DEVICE_ID_ATEN_CS682 0x2213 +#define USB_DEVICE_ID_ATEN_CS692 0x8021 #define USB_VENDOR_ID_ATMEL 0x03eb #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index dc8e6adf95a4357ffaf9121a2da8ddc9d2700223..6ca6ab00fa93ffdb57f288bd130ce13f2f89d040 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -61,6 +61,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 7994ec2e4151b085bbcb596d8ddbd4d8765b939e..41f5896224bd4723db7f87179c7466df423238a7 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context) u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; struct icmsg_negotiate *negop = NULL; - vmbus_recvpacket(channel, hbeat_txf_buf, - PAGE_SIZE, &recvlen, &requestid); + while (1) { + + vmbus_recvpacket(channel, hbeat_txf_buf, + PAGE_SIZE, &recvlen, &requestid); + + if (!recvlen) + break; - if (recvlen > 0) { icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ sizeof(struct vmbuspipe_hdr)]; diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index 827c037031287637c602f4f3d6ca9c1790686f16..a7f886961830cf9c5ca7ecfaf9d8839ce1accbaf 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c @@ -30,6 +30,7 @@ #define ADT7411_REG_CFG1 0x18 #define ADT7411_CFG1_START_MONITOR (1 << 0) +#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3) #define ADT7411_REG_CFG2 0x19 #define ADT7411_CFG2_DISABLE_AVG (1 << 5) @@ -296,8 +297,10 @@ static int adt7411_probe(struct i2c_client *client, mutex_init(&data->device_lock); mutex_init(&data->update_lock); + /* According to the datasheet, we must only write 1 to bit 3 */ ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, - ADT7411_CFG1_START_MONITOR, 1); + ADT7411_CFG1_RESERVED_BIT3 + | ADT7411_CFG1_START_MONITOR, 1); if (ret < 0) return ret; diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 78fbee46362828fceb0e098f0088ba8b8828e1f6..65dbde7781815e1ef4dcecf3ccc93d2ff7e7a024 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -59,7 +59,6 @@ config I2C_CHARDEV config I2C_MUX tristate "I2C bus multiplexing support" - depends on HAS_IOMEM help Say Y here if you want the I2C core to support the ability to handle multiplexed I2C bus topologies, by presenting each diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 76e699f9ed9732cb910fd6092837d1ddf13c37c5..eef3aa6007f1051faa4c7dd5a6abf63880ef4a3e 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev, /* Set the number of I2C channel instance */ adap_info->ch_num = id->driver_data; - ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, - KBUILD_MODNAME, adap_info); - if (ret) { - pch_pci_err(pdev, "request_irq FAILED\n"); - goto err_request_irq; - } - for (i = 0; i < adap_info->ch_num; i++) { pch_adap = &adap_info->pch_data[i].pch_adapter; adap_info->pch_i2c_suspended = false; @@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev *pdev, adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i; pch_adap->dev.parent = &pdev->dev; + } + + ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, + KBUILD_MODNAME, adap_info); + if (ret) { + pch_pci_err(pdev, "request_irq FAILED\n"); + goto err_request_irq; + } + + for (i = 0; i < adap_info->ch_num; i++) { + pch_adap = &adap_info->pch_data[i].pch_adapter; pch_i2c_init(&adap_info->pch_data[i]); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index fdcbdab808e9fcbea9eb301ab7a2fb261abb3226..33b11563cde79b04f6068b426f6aac83883d3333 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device) #ifdef CONFIG_PM_SLEEP static int qup_i2c_suspend(struct device *device) { - qup_i2c_pm_suspend_runtime(device); + if (!pm_runtime_suspended(device)) + return qup_i2c_pm_suspend_runtime(device); return 0; } diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 4233f5695352fdc951c54c54dc6fbb9de926d26e..3c38029e3fe976cb4ce854dd3d811a849d94aa66 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -105,7 +105,7 @@ struct slimpro_i2c_dev { struct mbox_chan *mbox_chan; struct mbox_client mbox_client; struct completion rd_complete; - u8 dma_buffer[I2C_SMBUS_BLOCK_MAX]; + u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */ u32 *resp_msg; }; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index ba8eb087f224655d1853c1192371759cf4d5e9f0..d625167357cc828c6be92ba89676404b0d15db75 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1876,6 +1876,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) /* add the driver to the list of i2c drivers in the driver core */ driver->driver.owner = owner; driver->driver.bus = &i2c_bus_type; + INIT_LIST_HEAD(&driver->clients); /* When registration returns, the driver core * will have called probe() for all matching-but-unbound devices. @@ -1886,7 +1887,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); - INIT_LIST_HEAD(&driver->clients); /* Walk the adapters that are already present */ i2c_for_each_dev(driver, __process_new_driver); diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index f06b0e24673b8732efdc2640246dae6c73809a13..af2a63cb405619f88123d4e3527667208c8e6314 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL config I2C_MUX_REG tristate "Register-based I2C multiplexer" + depends on HAS_IOMEM help If you say yes to this option, support will be included for a register based I2C multiplexer. This driver provides access to diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 291c61a41c9a8cae12198c207a2f12afd5d5532c..fa24d519661596de415e01490b36a36eff63fc28 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -68,6 +68,9 @@ #define BMC150_ACCEL_REG_PMU_BW 0x10 #define BMC150_ACCEL_DEF_BW 125 +#define BMC150_ACCEL_REG_RESET 0x14 +#define BMC150_ACCEL_RESET_VAL 0xB6 + #define BMC150_ACCEL_REG_INT_MAP_0 0x19 #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2) @@ -1487,6 +1490,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data) int ret, i; unsigned int val; + /* + * Reset chip to get it in a known good state. A delay of 1.8ms after + * reset is required according to the data sheets of supported chips. + */ + regmap_write(data->regmap, BMC150_ACCEL_REG_RESET, + BMC150_ACCEL_RESET_VAL); + usleep_range(1800, 2500); + ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); if (ret < 0) { dev_err(data->dev, diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 3a9f106787d28b2f85248402b68628fd02604460..9d72d4bcf5e9b316034219bb1b880fc55a72c076 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, if (ret < 0) goto error_ret; *val = ret; + ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); if (ret < 0) goto error_ret; + *val = 0; *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; ret = IIO_VAL_INT_PLUS_MICRO; break; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 90135f496aaf1aebab981fbde4355d700689824b..51cb6d8cffdaef470c5c66d167467c0763dfeecf 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -333,6 +333,7 @@ config QCOM_TADC config ROCKCHIP_SARADC tristate "Rockchip SARADC driver" depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST) + depends on RESET_CONTROLLER help Say yes here to build support for the SARADC found in SoCs from Rockchip. diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 01d71588d752ae0aa73da6abf4019f35d041939a..ba82de25a79727dfd8832424a9283b6410645ec6 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -533,6 +533,7 @@ static struct attribute_group ad799x_event_attrs_group = { static const struct iio_info ad7991_info = { .read_raw = &ad799x_read_raw, .driver_module = THIS_MODULE, + .update_scan_mode = ad799x_update_scan_mode, }; static const struct iio_info ad7993_4_7_8_noirq_info = { diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 7b40925dd4ff297e56fa0a3541980e9964d14092..93986f0590efa64f7e3b58fc468e96de4ac48cdb 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private) st->ts_bufferedmeasure = false; input_report_key(st->ts_input, BTN_TOUCH, 0); input_sync(st->ts_input); - } else if (status & AT91_ADC_EOC(3)) { - /* Conversion finished */ + } else if (status & AT91_ADC_EOC(3) && st->ts_input) { + /* Conversion finished and we've a touchscreen */ if (st->ts_bufferedmeasure) { /* * Last measurement is always discarded, since it can diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 9c311c1e1ac7f89cef190a3445507025fdb8f131..dffff64b598938f336995cbf2a1cac08eef558f8 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -53,6 +55,7 @@ struct rockchip_saradc { struct clk *clk; struct completion completion; struct regulator *vref; + struct reset_control *reset; const struct rockchip_saradc_data *data; u16 last_val; }; @@ -171,6 +174,16 @@ static const struct of_device_id rockchip_saradc_match[] = { }; MODULE_DEVICE_TABLE(of, rockchip_saradc_match); +/** + * Reset SARADC Controller. + */ +static void rockchip_saradc_reset_controller(struct reset_control *reset) +{ + reset_control_assert(reset); + usleep_range(10, 20); + reset_control_deassert(reset); +} + static int rockchip_saradc_probe(struct platform_device *pdev) { struct rockchip_saradc *info = NULL; @@ -199,6 +212,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev) if (IS_ERR(info->regs)) return PTR_ERR(info->regs); + /* + * The reset should be an optional property, as it should work + * with old devicetrees as well + */ + info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb"); + if (IS_ERR(info->reset)) { + ret = PTR_ERR(info->reset); + if (ret != -ENOENT) + return ret; + + dev_dbg(&pdev->dev, "no reset control found\n"); + info->reset = NULL; + } + init_completion(&info->completion); irq = platform_get_irq(pdev, 0); @@ -233,6 +260,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev) return PTR_ERR(info->vref); } + if (info->reset) + rockchip_saradc_reset_controller(info->reset); + /* * Use a default value for the converter clock. * This may become user-configurable in the future. diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index c1e05532d437f263a9aa3d7f7c96147b13bfe682..0470fc843d4efebba6ca4a6f269691ea34b9914f 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -32,6 +32,7 @@ struct tiadc_device { struct ti_tscadc_dev *mfd_tscadc; + struct mutex fifo1_lock; /* to protect fifo access */ int channels; u8 channel_line[8]; u8 channel_step[8]; @@ -360,6 +361,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct tiadc_device *adc_dev = iio_priv(indio_dev); + int ret = IIO_VAL_INT; int i, map_val; unsigned int fifo1count, read, stepid; bool found = false; @@ -373,13 +375,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, if (!step_en) return -EINVAL; + mutex_lock(&adc_dev->fifo1_lock); fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); while (fifo1count--) tiadc_readl(adc_dev, REG_FIFO1); am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); - timeout = jiffies + usecs_to_jiffies + timeout = jiffies + msecs_to_jiffies (IDLE_TIMEOUT * adc_dev->channels); /* Wait for Fifo threshold interrupt */ while (1) { @@ -389,7 +392,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, if (time_after(jiffies, timeout)) { am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); - return -EAGAIN; + ret = -EAGAIN; + goto err_unlock; } } map_val = adc_dev->channel_step[chan->scan_index]; @@ -415,8 +419,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); if (found == false) - return -EBUSY; - return IIO_VAL_INT; + ret = -EBUSY; + +err_unlock: + mutex_unlock(&adc_dev->fifo1_lock); + return ret; } static const struct iio_info tiadc_info = { @@ -485,6 +492,7 @@ static int tiadc_probe(struct platform_device *pdev) tiadc_step_config(indio_dev); tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); + mutex_init(&adc_dev->fifo1_lock); err = tiadc_channel_init(indio_dev, adc_dev->channels); if (err < 0) diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index e81f434760f4c778c604c7d2e102bfa79adef9b2..b5beea53d6f6551aba68e570be25deca87d8288c 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -30,34 +30,34 @@ static struct { u32 usage_id; int unit; /* 0 for default others from HID sensor spec */ int scale_val0; /* scale, whole number */ - int scale_val1; /* scale, fraction in micros */ + int scale_val1; /* scale, fraction in nanos */ } unit_conversion[] = { - {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, + {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000}, {HID_USAGE_SENSOR_ACCEL_3D, HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, {HID_USAGE_SENSOR_ACCEL_3D, - HID_USAGE_SENSOR_UNITS_G, 9, 806650}, + HID_USAGE_SENSOR_UNITS_G, 9, 806650000}, - {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, + {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293}, {HID_USAGE_SENSOR_GYRO_3D, HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, {HID_USAGE_SENSOR_GYRO_3D, - HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, + HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293}, - {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, + {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000}, {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, - {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, + {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293}, {HID_USAGE_SENSOR_INCLINOMETER_3D, - HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, + HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, {HID_USAGE_SENSOR_INCLINOMETER_3D, HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, {HID_USAGE_SENSOR_ALS, 0, 1, 0}, {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, - {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0}, - {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0}, + {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, + {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000}, }; static int pow_10(unsigned power) @@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); /* * This fuction applies the unit exponent to the scale. * For example: - * 9.806650 ->exp:2-> val0[980]val1[665000] - * 9.000806 ->exp:2-> val0[900]val1[80600] - * 0.174535 ->exp:2-> val0[17]val1[453500] - * 1.001745 ->exp:0-> val0[1]val1[1745] - * 1.001745 ->exp:2-> val0[100]val1[174500] - * 1.001745 ->exp:4-> val0[10017]val1[450000] - * 9.806650 ->exp:-2-> val0[0]val1[98066] + * 9.806650000 ->exp:2-> val0[980]val1[665000000] + * 9.000806000 ->exp:2-> val0[900]val1[80600000] + * 0.174535293 ->exp:2-> val0[17]val1[453529300] + * 1.001745329 ->exp:0-> val0[1]val1[1745329] + * 1.001745329 ->exp:2-> val0[100]val1[174532900] + * 1.001745329 ->exp:4-> val0[10017]val1[453290000] + * 9.806650000 ->exp:-2-> val0[0]val1[98066500] */ -static void adjust_exponent_micro(int *val0, int *val1, int scale0, +static void adjust_exponent_nano(int *val0, int *val1, int scale0, int scale1, int exp) { int i; @@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0, if (exp > 0) { *val0 = scale0 * pow_10(exp); res = 0; - if (exp > 6) { + if (exp > 9) { *val1 = 0; return; } for (i = 0; i < exp; ++i) { - x = scale1 / pow_10(5 - i); + x = scale1 / pow_10(8 - i); res += (pow_10(exp - 1 - i) * x); - scale1 = scale1 % pow_10(5 - i); + scale1 = scale1 % pow_10(8 - i); } *val0 += res; *val1 = scale1 * pow_10(exp); } else if (exp < 0) { exp = abs(exp); - if (exp > 6) { + if (exp > 9) { *val0 = *val1 = 0; return; } *val0 = scale0 / pow_10(exp); rem = scale0 % pow_10(exp); res = 0; - for (i = 0; i < (6 - exp); ++i) { - x = scale1 / pow_10(5 - i); - res += (pow_10(5 - exp - i) * x); - scale1 = scale1 % pow_10(5 - i); + for (i = 0; i < (9 - exp); ++i) { + x = scale1 / pow_10(8 - i); + res += (pow_10(8 - exp - i) * x); + scale1 = scale1 % pow_10(8 - i); } - *val1 = rem * pow_10(6 - exp) + res; + *val1 = rem * pow_10(9 - exp) + res; } else { *val0 = scale0; *val1 = scale1; @@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id, unit_conversion[i].unit == attr_info->units) { exp = hid_sensor_convert_exponent( attr_info->unit_expo); - adjust_exponent_micro(val0, val1, + adjust_exponent_nano(val0, val1, unit_conversion[i].scale_val0, unit_conversion[i].scale_val1, exp); break; } } - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_NANO; } EXPORT_SYMBOL(hid_sensor_format_scale); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 7afd226a3321097815fb8a120bb3f0ca1461eddf..32bb036069eb032313fa95895c83b3396cd752f3 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -110,7 +110,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, DEFINE_WAIT_FUNC(wait, woken_wake_function); size_t datum_size; size_t to_wait; - int ret; + int ret = 0; if (!indio_dev->info) return -ENODEV; @@ -153,7 +153,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, ret = rb->access->read_first_n(rb, n, buf); if (ret == 0 && (filp->f_flags & O_NONBLOCK)) ret = -EAGAIN; - } while (ret == 0); + } while (ret == 0); remove_wait_queue(&rb->pollq, &wait); return ret; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 159ede61f79318d4ee8e581a8a61df2028680aba..131b434af994ddf5a09c3e828b33baba58ef66cd 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -433,23 +433,21 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) scale_db = true; case IIO_VAL_INT_PLUS_MICRO: if (vals[1] < 0) - return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]), - -vals[1], - scale_db ? " dB" : ""); + return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]), + -vals[1], scale_db ? " dB" : ""); else return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1], scale_db ? " dB" : ""); case IIO_VAL_INT_PLUS_NANO: if (vals[1] < 0) - return sprintf(buf, "-%ld.%09u\n", abs(vals[0]), - -vals[1]); + return sprintf(buf, "-%d.%09u\n", abs(vals[0]), + -vals[1]); else return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); case IIO_VAL_FRACTIONAL: tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); - vals[1] = do_div(tmp, 1000000000LL); - vals[0] = tmp; - return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); + vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]); + return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1])); case IIO_VAL_FRACTIONAL_LOG2: tmp = (s64)vals[0] * 1000000000LL >> vals[1]; vals[1] = do_div(tmp, 1000000000LL); diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c index b98b9d94d184cd058b7ddff7c40e7b9d87f965ac..a97e802ca523138227e9930a585357fd2f873e94 100644 --- a/drivers/iio/orientation/hid-sensor-rotation.c +++ b/drivers/iio/orientation/hid-sensor-rotation.c @@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = { .id_table = hid_dev_rot_ids, .driver = { .name = KBUILD_MODNAME, + .pm = &hid_sensor_pm_ops, }, .probe = hid_dev_rot_probe, .remove = hid_dev_rot_remove, diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index e2f926cdcad2acdbdd8aec50f0f479f52d2a6584..a0aedda7dfd769202c3782e2b6c1a854b3abba13 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -392,7 +392,7 @@ static int as3935_probe(struct spi_device *spi) return ret; } - ret = iio_triggered_buffer_setup(indio_dev, NULL, + ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time, &as3935_trigger_handler, NULL); if (ret) { diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 4d8e7f18a9af1fb3b3c25def33c99b8e54805b85..941cd9b83941bb25d1847b09875a531fe81319d8 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -80,6 +80,8 @@ static struct ib_cm { __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; + /* Sync on cm change port state */ + spinlock_t state_lock; } cm; /* Counter indexes ordered by attribute ID */ @@ -161,6 +163,8 @@ struct cm_port { struct ib_mad_agent *mad_agent; struct kobject port_obj; u8 port_num; + struct list_head cm_priv_prim_list; + struct list_head cm_priv_altr_list; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; }; @@ -241,6 +245,12 @@ struct cm_id_private { u8 service_timeout; u8 target_ack_delay; + struct list_head prim_list; + struct list_head altr_list; + /* Indicates that the send port mad is registered and av is set */ + int prim_send_port_not_ready; + int altr_send_port_not_ready; + struct list_head work_list; atomic_t work_count; }; @@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; + struct cm_av *av; + unsigned long flags, flags2; + int ret = 0; + /* don't let the port to be released till the agent is down */ + spin_lock_irqsave(&cm.state_lock, flags2); + spin_lock_irqsave(&cm.lock, flags); + if (!cm_id_priv->prim_send_port_not_ready) + av = &cm_id_priv->av; + else if (!cm_id_priv->altr_send_port_not_ready && + (cm_id_priv->alt_av.port)) + av = &cm_id_priv->alt_av; + else { + pr_info("%s: not valid CM id\n", __func__); + ret = -ENODEV; + spin_unlock_irqrestore(&cm.lock, flags); + goto out; + } + spin_unlock_irqrestore(&cm.lock, flags); + /* Make sure the port haven't released the mad yet */ mad_agent = cm_id_priv->av.port->mad_agent; - ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); - if (IS_ERR(ah)) - return PTR_ERR(ah); + if (!mad_agent) { + pr_info("%s: not a valid MAD agent\n", __func__); + ret = -ENODEV; + goto out; + } + ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); + if (IS_ERR(ah)) { + ret = PTR_ERR(ah); + goto out; + } m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, - cm_id_priv->av.pkey_index, + av->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { ib_destroy_ah(ah); - return PTR_ERR(m); + ret = PTR_ERR(m); + goto out; } /* Timeout set by caller if response is expected. */ @@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, atomic_inc(&cm_id_priv->refcount); m->context[0] = cm_id_priv; *msg = m; - return 0; + +out: + spin_unlock_irqrestore(&cm.state_lock, flags2); + return ret; } static int cm_alloc_response_msg(struct cm_port *port, @@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, grh, &av->ah_attr); } -static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) +static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, + struct cm_id_private *cm_id_priv) { struct cm_device *cm_dev; struct cm_port *port = NULL; @@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) &av->ah_attr); av->timeout = path->packet_life_time + 1; - return 0; + spin_lock_irqsave(&cm.lock, flags); + if (&cm_id_priv->av == av) + list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); + else if (&cm_id_priv->alt_av == av) + list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); + else + ret = -EINVAL; + + spin_unlock_irqrestore(&cm.lock, flags); + + return ret; } static int cm_alloc_id(struct cm_id_private *cm_id_priv) @@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); + INIT_LIST_HEAD(&cm_id_priv->prim_list); + INIT_LIST_HEAD(&cm_id_priv->altr_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); return &cm_id_priv->id; @@ -892,6 +945,15 @@ retest: break; } + spin_lock_irq(&cm.lock); + if (!list_empty(&cm_id_priv->altr_list) && + (!cm_id_priv->altr_send_port_not_ready)) + list_del(&cm_id_priv->altr_list); + if (!list_empty(&cm_id_priv->prim_list) && + (!cm_id_priv->prim_send_port_not_ready)) + list_del(&cm_id_priv->prim_list); + spin_unlock_irq(&cm.lock); + cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); wait_for_completion(&cm_id_priv->comp); @@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, goto out; } - ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); + ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, + cm_id_priv); if (ret) goto error1; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, - &cm_id_priv->alt_av); + &cm_id_priv->alt_av, cm_id_priv); if (ret) goto error1; } @@ -1639,7 +1702,8 @@ static int cm_req_handler(struct cm_work *work) cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN); - ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); + ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, + cm_id_priv); if (ret) { ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, 0, &work->path[0].sgid, @@ -1650,7 +1714,8 @@ static int cm_req_handler(struct cm_work *work) goto rejected; } if (req_msg->alt_local_lid) { - ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); + ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, + cm_id_priv); if (ret) { ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, @@ -2705,7 +2770,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, goto out; } - ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); + ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, + cm_id_priv); if (ret) goto out; cm_id_priv->alt_av.timeout = @@ -2817,7 +2883,8 @@ static int cm_lap_handler(struct cm_work *work) cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); - cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); + cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, + cm_id_priv); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); @@ -3009,7 +3076,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); - ret = cm_init_av_by_path(param->path, &cm_id_priv->av); + ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); if (ret) goto out; @@ -3446,7 +3513,9 @@ out: static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; + struct cm_av tmp_av; unsigned long flags; + int tmp_send_port_not_ready; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); @@ -3455,7 +3524,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; + /* Swap address vector */ + tmp_av = cm_id_priv->av; cm_id_priv->av = cm_id_priv->alt_av; + cm_id_priv->alt_av = tmp_av; + /* Swap port send ready state */ + tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; + cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; + cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); @@ -3875,6 +3951,9 @@ static void cm_add_one(struct ib_device *ib_device) port->cm_dev = cm_dev; port->port_num = i; + INIT_LIST_HEAD(&port->cm_priv_prim_list); + INIT_LIST_HEAD(&port->cm_priv_altr_list); + ret = cm_create_port_fs(port); if (ret) goto error1; @@ -3932,6 +4011,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) { struct cm_device *cm_dev = client_data; struct cm_port *port; + struct cm_id_private *cm_id_priv; + struct ib_mad_agent *cur_mad_agent; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; @@ -3955,15 +4036,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); + /* Mark all the cm_id's as not valid */ + spin_lock_irq(&cm.lock); + list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) + cm_id_priv->altr_send_port_not_ready = 1; + list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) + cm_id_priv->prim_send_port_not_ready = 1; + spin_unlock_irq(&cm.lock); /* * We flush the queue here after the going_down set, this * verify that no new works will be queued in the recv handler, * after that we can call the unregister_mad_agent */ flush_workqueue(cm.wq); - ib_unregister_mad_agent(port->mad_agent); + spin_lock_irq(&cm.state_lock); + cur_mad_agent = port->mad_agent; + port->mad_agent = NULL; + spin_unlock_irq(&cm.state_lock); + ib_unregister_mad_agent(cur_mad_agent); cm_remove_port_fs(port); } + device_unregister(cm_dev->device); kfree(cm_dev); } @@ -3976,6 +4069,7 @@ static int __init ib_cm_init(void) INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); + spin_lock_init(&cm.state_lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index bb6685fb08c61483546505f99037b88af6202ad0..6aa648cb538139409e658d487102e3510f8489d5 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -106,7 +106,6 @@ struct mcast_group { atomic_t refcount; enum mcast_group_state state; struct ib_sa_query *query; - int query_id; u16 pkey_index; u8 leave_state; int retries; @@ -339,11 +338,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member) member->multicast.comp_mask, 3000, GFP_KERNEL, join_handler, group, &group->query); - if (ret >= 0) { - group->query_id = ret; - ret = 0; - } - return ret; + return (ret > 0) ? 0 : ret; } static int send_leave(struct mcast_group *group, u8 leave_state) @@ -363,11 +358,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) IB_SA_MCMEMBER_REC_JOIN_STATE, 3000, GFP_KERNEL, leave_handler, group, &group->query); - if (ret >= 0) { - group->query_id = ret; - ret = 0; - } - return ret; + return (ret > 0) ? 0 : ret; } static void join_group(struct mcast_group *group, struct mcast_member *member, diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 38acb3cfc545f91658f0d465b69914de99f281b2..04f3c0db912654c1cff4aac76d7103bf90c1beb7 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, cur_base = addr & PAGE_MASK; - if (npages == 0) { + if (npages == 0 || npages > UINT_MAX) { ret = -EINVAL; goto out; } diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 94bbd8c155fcca1daf5f2e9ee0fc65552821ff3f..a2d19d136099d22f655d50eb5ed4b7dadf7e1c14 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -116,6 +116,7 @@ struct ib_uverbs_event_file { struct ib_uverbs_file { struct kref ref; struct mutex mutex; + struct mutex cleanup_mutex; /* protect cleanup */ struct ib_uverbs_device *device; struct ib_ucontext *ucontext; struct ib_event_handler event_handler; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 24f3ca2c4ad761bf639eb2933c30387c25377c21..7becef27cbbe7d1cffcc4d130d4e13a4fcd49eb5 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -244,12 +244,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, container_of(uobj, struct ib_uqp_object, uevent.uobject); idr_remove_uobj(&ib_uverbs_qp_idr, uobj); - if (qp != qp->real_qp) { - ib_close_qp(qp); - } else { + if (qp == qp->real_qp) ib_uverbs_detach_umcast(qp, uqp); - ib_destroy_qp(qp); - } + ib_destroy_qp(qp); ib_uverbs_release_uevent(file, &uqp->uevent); kfree(uqp); } @@ -922,6 +919,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) file->async_file = NULL; kref_init(&file->ref); mutex_init(&file->mutex); + mutex_init(&file->cleanup_mutex); filp->private_data = file; kobject_get(&dev->kobj); @@ -947,18 +945,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_device *dev = file->device; - struct ib_ucontext *ucontext = NULL; + + mutex_lock(&file->cleanup_mutex); + if (file->ucontext) { + ib_uverbs_cleanup_ucontext(file, file->ucontext); + file->ucontext = NULL; + } + mutex_unlock(&file->cleanup_mutex); mutex_lock(&file->device->lists_mutex); - ucontext = file->ucontext; - file->ucontext = NULL; if (!file->is_closed) { list_del(&file->list); file->is_closed = 1; } mutex_unlock(&file->device->lists_mutex); - if (ucontext) - ib_uverbs_cleanup_ucontext(file, ucontext); if (file->async_file) kref_put(&file->async_file->ref, ib_uverbs_release_event_file); @@ -1172,22 +1172,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, mutex_lock(&uverbs_dev->lists_mutex); while (!list_empty(&uverbs_dev->uverbs_file_list)) { struct ib_ucontext *ucontext; - file = list_first_entry(&uverbs_dev->uverbs_file_list, struct ib_uverbs_file, list); file->is_closed = 1; - ucontext = file->ucontext; list_del(&file->list); - file->ucontext = NULL; kref_get(&file->ref); mutex_unlock(&uverbs_dev->lists_mutex); - /* We must release the mutex before going ahead and calling - * disassociate_ucontext. disassociate_ucontext might end up - * indirectly calling uverbs_close, for example due to freeing - * the resources (e.g mmput). - */ + ib_uverbs_event_handler(&file->event_handler, &event); + + mutex_lock(&file->cleanup_mutex); + ucontext = file->ucontext; + file->ucontext = NULL; + mutex_unlock(&file->cleanup_mutex); + + /* At this point ib_uverbs_close cannot be running + * ib_uverbs_cleanup_ucontext + */ if (ucontext) { + /* We must release the mutex before going ahead and + * calling disassociate_ucontext. disassociate_ucontext + * might end up indirectly calling uverbs_close, + * for example due to freeing the resources + * (e.g mmput). + */ ib_dev->disassociate_ucontext(ucontext); ib_uverbs_cleanup_ucontext(file, ucontext); } diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 06da56bda201283e474147b84bbdc05901abfe01..c007c766c61e4ad3f20c1099b2f553cdb1f9cfde 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr if (vlan_tag < 0x1000) vlan_tag |= (ah_attr->sl & 7) << 13; ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); - ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); + ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); + if (ret < 0) + return ERR_PTR(ret); + ah->av.eth.gid_index = ret; ah->av.eth.vlan = cpu_to_be16(vlan_tag); if (ah_attr->static_rate) { ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index b88fc8f5ab180c717e827f582187b817f92a0e33..57e1a08925d99700c91c5d758a3158636d7ca73b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, if (context) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; - goto err_dbmap; + goto err_cq_free; } return &cq->ibcq; +err_cq_free: + mlx4_cq_free(dev->dev, &cq->mcq); + err_dbmap: if (context) mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 05179f47bbdea9fc3f133407754612027f70efbf..d862b9b7910e489dc5a4a4f4b00d9b44eec4f764 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1080,6 +1080,27 @@ void handle_port_mgmt_change_event(struct work_struct *work) /* Generate GUID changed event */ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { + if (mlx4_is_master(dev->dev)) { + union ib_gid gid; + int err = 0; + + if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) + err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); + else + gid.global.subnet_prefix = + eqe->event.port_mgmt_change.params.port_info.gid_prefix; + if (err) { + pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n", + port, err); + } else { + pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n", + port, + (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), + be64_to_cpu(gid.global.subnet_prefix)); + atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, + be64_to_cpu(gid.global.subnet_prefix)); + } + } mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); /*if master, notify all slaves*/ if (mlx4_is_master(dev->dev)) @@ -2154,6 +2175,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) if (err) goto demux_err; dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; + atomic64_set(&dev->sriov.demux[i].subnet_prefix, + be64_to_cpu(gid.global.subnet_prefix)); err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, &dev->sriov.sqps[i]); if (err) diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 99451d887266d2bffee04e2a71347f69b4883305..36ec8aa048aacafdd17f89e1cb66a360d344ad0b 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group) if (!group->members[i]) leave_state |= (1 << i); - return leave_state & (group->rec.scope_join_state & 7); + return leave_state & (group->rec.scope_join_state & 0xf); } static int join_group(struct mcast_group *group, int slave, u8 join_mask) @@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) } else mcg_warn_group(group, "DRIVER BUG\n"); } else if (group->state == MCAST_LEAVE_SENT) { - if (group->rec.scope_join_state & 7) - group->rec.scope_join_state &= 0xf8; + if (group->rec.scope_join_state & 0xf) + group->rec.scope_join_state &= 0xf0; group->state = MCAST_IDLE; mutex_unlock(&group->lock); if (release_group(group, 1)) @@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask, static int handle_join_req(struct mcast_group *group, u8 join_mask, struct mcast_req *req) { - u8 group_join_state = group->rec.scope_join_state & 7; + u8 group_join_state = group->rec.scope_join_state & 0xf; int ref = 0; u16 status; struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; @@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work) u8 cur_join_state; resp_join_state = ((struct ib_sa_mcmember_data *) - group->response_sa_mad.data)->scope_join_state & 7; - cur_join_state = group->rec.scope_join_state & 7; + group->response_sa_mad.data)->scope_join_state & 0xf; + cur_join_state = group->rec.scope_join_state & 0xf; if (method == IB_MGMT_METHOD_GET_RESP) { /* successfull join */ @@ -710,7 +710,7 @@ process_requests: req = list_first_entry(&group->pending_list, struct mcast_req, group_list); sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; - req_join_state = sa_data->scope_join_state & 0x7; + req_join_state = sa_data->scope_join_state & 0xf; /* For a leave request, we will immediately answer the VF, and * update our internal counters. The actual leave will be sent diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 1caa11edac03347a246467436daa72dd18d5c505..78f29e91653a39e9871a783ac71037a4c7989dcf 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -441,7 +441,7 @@ struct mlx4_ib_demux_ctx { struct workqueue_struct *wq; struct workqueue_struct *ud_wq; spinlock_t ud_lock; - __be64 subnet_prefix; + atomic64_t subnet_prefix; __be64 guid_cache[128]; struct mlx4_ib_dev *dev; /* the following lock protects both mcg_table and mcg_mgid0_list */ diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ea1e2ddaddf5b9a000c14721b7471823a3e53ef5..f350f2d61c15b8efe9303a681d1f90d70b44ccf4 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2331,24 +2331,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; - if (is_eth) + if (is_eth) { memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); - else { - if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { - /* When multi-function is enabled, the ib_core gid - * indexes don't necessarily match the hw ones, so - * we must use our own cache */ - sqp->ud_header.grh.source_gid.global.subnet_prefix = - to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. - subnet_prefix; - sqp->ud_header.grh.source_gid.global.interface_id = - to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. - guid_cache[ah->av.ib.gid_index]; - } else - ib_get_cached_gid(ib_dev, - be32_to_cpu(ah->av.ib.port_pd) >> 24, - ah->av.ib.gid_index, - &sqp->ud_header.grh.source_gid, NULL); + } else { + if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { + /* When multi-function is enabled, the ib_core gid + * indexes don't necessarily match the hw ones, so + * we must use our own cache + */ + sqp->ud_header.grh.source_gid.global.subnet_prefix = + cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov. + demux[sqp->qp.port - 1]. + subnet_prefix))); + sqp->ud_header.grh.source_gid.global.interface_id = + to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. + guid_cache[ah->av.ib.gid_index]; + } else { + ib_get_cached_gid(ib_dev, + be32_to_cpu(ah->av.ib.port_pd) >> 24, + ah->av.ib.gid_index, + &sqp->ud_header.grh.source_gid, NULL); + } } memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 8184267c79012e027ef12614aab72d373c2b51b3..02c8deab1fff4c08d34963d75cea8ef5b774def6 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -787,8 +787,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, if (err) goto err_create; } else { - /* for now choose 64 bytes till we have a proper interface */ - cqe_size = 64; + cqe_size = cache_line_size() == 128 ? 128 : 64; err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index, &inlen); if (err) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index bfc940ff9c8a531368bae44fde0ed68f000ae787..2a1fdcaa3044f681a2558d77526f8178993d3e3d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -947,13 +947,13 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, { struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; struct ib_event ibev; - + bool fatal = false; u8 port = 0; switch (event) { case MLX5_DEV_EVENT_SYS_ERROR: - ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; + fatal = true; break; case MLX5_DEV_EVENT_PORT_UP: @@ -998,6 +998,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, if (ibdev->ib_active) ib_dispatch_event(&ibev); + + if (fatal) + ibdev->ib_active = false; } static void get_ext_port_caps(struct mlx5_ib_dev *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 3ede103097547d4355646d322b5570ebb421d2ec..69a151ae8261432ba95ad4135db57df8a336251e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -472,6 +472,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn); void ipoib_reap_ah(struct work_struct *work); +struct ipoib_path *__path_find(struct net_device *dev, void *gid); void ipoib_mark_paths_invalid(struct net_device *dev); void ipoib_flush_paths(struct net_device *dev); struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 3ae9726efb9837512a62214705bf5e8e9561a02c..8ca75af0e6d18d052b9d5e4be7cd644608da1f1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1299,6 +1299,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) } } +#define QPN_AND_OPTIONS_OFFSET 4 + static void ipoib_cm_tx_start(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, @@ -1307,6 +1309,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) struct ipoib_neigh *neigh; struct ipoib_cm_tx *p; unsigned long flags; + struct ipoib_path *path; int ret; struct ib_sa_path_rec pathrec; @@ -1319,7 +1322,19 @@ static void ipoib_cm_tx_start(struct work_struct *work) p = list_entry(priv->cm.start_list.next, typeof(*p), list); list_del_init(&p->list); neigh = p->neigh; + qpn = IPOIB_QPN(neigh->daddr); + /* + * As long as the search is with these 2 locks, + * path existence indicates its validity. + */ + path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); + if (!path) { + pr_info("%s ignore not valid path %pI6\n", + __func__, + neigh->daddr + QPN_AND_OPTIONS_OFFSET); + goto free_neigh; + } memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); spin_unlock_irqrestore(&priv->lock, flags); @@ -1331,6 +1346,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) spin_lock_irqsave(&priv->lock, flags); if (ret) { +free_neigh: neigh = p->neigh; if (neigh) { neigh->cm = NULL; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index fa9c42ff1fb00963a47a71868ff96abb6e15189b..85de078fb0cecbe63a457227ffbbe8c19bee042a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1028,8 +1028,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, } if (level == IPOIB_FLUSH_LIGHT) { + int oper_up; ipoib_mark_paths_invalid(dev); + /* Set IPoIB operation as down to prevent races between: + * the flush flow which leaves MCG and on the fly joins + * which can happen during that time. mcast restart task + * should deal with join requests we missed. + */ + oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); ipoib_mcast_dev_flush(dev); + if (oper_up) + set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); ipoib_flush_ah(dev); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 942dffca6a9d16d194ab11d73d5b9200f8f27198..5f7681b975d0defccb280a47ed1477c055707fde 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -481,7 +481,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) return -EINVAL; } -static struct ipoib_path *__path_find(struct net_device *dev, void *gid) +struct ipoib_path *__path_find(struct net_device *dev, void *gid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->path_tree.rb_node; diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c index 907e4e278fce1dc8f85777b90354ecd5cecd6e36..c877e56a9bd5760f0ef9151460a6fd5f7d99a6b2 100644 --- a/drivers/input/keyboard/goldfish_events.c +++ b/drivers/input/keyboard/goldfish_events.c @@ -17,11 +17,15 @@ #include #include #include +#include #include #include #include #include #include +#include + +#define GOLDFISH_MAX_FINGERS 5 enum { REG_READ = 0x00, @@ -51,7 +55,21 @@ static irqreturn_t events_interrupt(int irq, void *dev_id) value = __raw_readl(edev->addr + REG_READ); input_event(edev->input, type, code, value); - input_sync(edev->input); + // Send an extra (EV_SYN, SYN_REPORT, 0x0) event + // if a key was pressed. Some keyboard device + // drivers may only send the EV_KEY event and + // not EV_SYN. + // Note that sending an extra SYN_REPORT is not + // necessary nor correct protocol with other + // devices such as touchscreens, which will send + // their own SYN_REPORT's when sufficient event + // information has been collected (e.g., for + // touchscreens, when pressure and X/Y coordinates + // have been received). Hence, we will only send + // this extra SYN_REPORT if type == EV_KEY. + if (type == EV_KEY) { + input_sync(edev->input); + } return IRQ_HANDLED; } @@ -153,6 +171,15 @@ static int events_probe(struct platform_device *pdev) input_dev->name = edev->name; input_dev->id.bustype = BUS_HOST; + // Set the Goldfish Device to be multi-touch. + // In the Ranchu kernel, there is multi-touch-specific + // code for handling ABS_MT_SLOT events. + // See drivers/input/input.c:input_handle_abs_event. + // If we do not issue input_mt_init_slots, + // the kernel will filter out needed ABS_MT_SLOT + // events when we touch the screen in more than one place, + // preventing multi-touch with more than one finger from working. + input_mt_init_slots(input_dev, GOLDFISH_MAX_FINGERS, 0); events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX); events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX); @@ -178,10 +205,26 @@ static int events_probe(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_events_of_match[] = { + { .compatible = "google,goldfish-events-keypad", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_events_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id goldfish_events_acpi_match[] = { + { "GFSH0002", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_events_acpi_match); +#endif + static struct platform_driver events_driver = { .probe = events_probe, .driver = { .name = "goldfish_events", + .of_match_table = goldfish_events_of_match, + .acpi_match_table = ACPI_PTR(goldfish_events_acpi_match), }, }; diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c index 7fbf7247e65f5879faa006d19340d8711dc34883..7e5222aec7c1e5e5f47b229422e84842a1df43cf 100644 --- a/drivers/input/keyreset.c +++ b/drivers/input/keyreset.c @@ -32,8 +32,7 @@ struct keyreset_state { static void do_restart(struct work_struct *unused) { - sys_sync(); - kernel_restart(NULL); + orderly_reboot(); } static void do_reset_fn(void *priv) diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index be5b399da5d3ec7b982fa49945bcc4d3be576b39..43482ae1e0492ef38f160611b96828f47581a022 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1163,6 +1163,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), }, }, + { + /* Fujitsu H760 also has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), + }, + }, #endif { } }; @@ -1507,10 +1514,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { }, }, { - /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ + /* Fujitsu H760 does not work with crc_enabled == 0 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), }, }, { @@ -1520,6 +1527,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), }, }, + { + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), + }, + }, + { + /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"), + }, + }, { /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ .matches = { diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h index a5eed2ade53de3a4e1d06776288ec17baba52530..34da81c006b6d80d3df98feffc7651d8eaa3f2c8 100644 --- a/drivers/input/serio/i8042-io.h +++ b/drivers/input/serio/i8042-io.h @@ -81,7 +81,7 @@ static inline int i8042_platform_init(void) return -EBUSY; #endif - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h index ee1ad27d6ed06ef70370f3096352b37acb1c1368..08a1c10a1448d12db092a1c3f7525e4f5f1b76a2 100644 --- a/drivers/input/serio/i8042-ip22io.h +++ b/drivers/input/serio/i8042-ip22io.h @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) return -EBUSY; #endif - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h index f708c75d16f1d919f4e054c94437fb6e4e587866..1aabea43329edf56f2067c73eafca4d140d3ac4b 100644 --- a/drivers/input/serio/i8042-ppcio.h +++ b/drivers/input/serio/i8042-ppcio.h @@ -44,7 +44,7 @@ static inline void i8042_write_command(int val) static inline int i8042_platform_init(void) { - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index afcd1c1a05b272b1d615b481f2de9c0a812a2889..6231d63860ee324031d36e6e1c42dd57407dc27f 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -130,7 +130,7 @@ static int __init i8042_platform_init(void) } } - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h index 73f5cc124a3606a5cb73406e207d8a27fd4c339e..455747552f858a8819ec18d03317980352e514ea 100644 --- a/drivers/input/serio/i8042-unicore32io.h +++ b/drivers/input/serio/i8042-unicore32io.h @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042")) return -EBUSY; - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 68f5f4a0f1e72f10b35e45243218e6cfbf3d586e..073246c7d1634eef411c1bf07d7f72ab47cad369 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { } }; +/* + * On some Asus laptops, just running self tests cause problems. + */ +static const struct dmi_system_id i8042_dmi_noselftest_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "R409L"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"), + }, + }, + { } +}; static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { { /* MSI Wind U-100 */ @@ -793,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P34"), }, }, + { + /* Schenker XMG C504 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "XMG"), + DMI_MATCH(DMI_PRODUCT_NAME, "C504"), + }, + }, { } }; @@ -1072,12 +1163,18 @@ static int __init i8042_platform_init(void) return retval; #if defined(__ia64__) - i8042_reset = true; + i8042_reset = I8042_RESET_ALWAYS; #endif #ifdef CONFIG_X86 - if (dmi_check_system(i8042_dmi_reset_table)) - i8042_reset = true; + /* Honor module parameter when value is not default */ + if (i8042_reset == I8042_RESET_DEFAULT) { + if (dmi_check_system(i8042_dmi_reset_table)) + i8042_reset = I8042_RESET_ALWAYS; + + if (dmi_check_system(i8042_dmi_noselftest_table)) + i8042_reset = I8042_RESET_NEVER; + } if (dmi_check_system(i8042_dmi_noloop_table)) i8042_noloop = true; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 405252a884dd41e233da4399ab109f213becb85a..89abfdb539ac750ff50eca67f77b2fe6898ff6f0 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -48,9 +48,39 @@ static bool i8042_unlock; module_param_named(unlock, i8042_unlock, bool, 0); MODULE_PARM_DESC(unlock, "Ignore keyboard lock."); -static bool i8042_reset; -module_param_named(reset, i8042_reset, bool, 0); -MODULE_PARM_DESC(reset, "Reset controller during init and cleanup."); +enum i8042_controller_reset_mode { + I8042_RESET_NEVER, + I8042_RESET_ALWAYS, + I8042_RESET_ON_S2RAM, +#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM +}; +static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT; +static int i8042_set_reset(const char *val, const struct kernel_param *kp) +{ + enum i8042_controller_reset_mode *arg = kp->arg; + int error; + bool reset; + + if (val) { + error = kstrtobool(val, &reset); + if (error) + return error; + } else { + reset = true; + } + + *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER; + return 0; +} + +static const struct kernel_param_ops param_ops_reset_param = { + .flags = KERNEL_PARAM_OPS_FL_NOARG, + .set = i8042_set_reset, +}; +#define param_check_reset_param(name, p) \ + __param_check(name, p, enum i8042_controller_reset_mode) +module_param_named(reset, i8042_reset, reset_param, 0); +MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both"); static bool i8042_direct; module_param_named(direct, i8042_direct, bool, 0); @@ -1019,7 +1049,7 @@ static int i8042_controller_init(void) * Reset the controller and reset CRT to the original value set by BIOS. */ -static void i8042_controller_reset(bool force_reset) +static void i8042_controller_reset(bool s2r_wants_reset) { i8042_flush(); @@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset) * Reset the controller if requested. */ - if (i8042_reset || force_reset) + if (i8042_reset == I8042_RESET_ALWAYS || + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { i8042_controller_selftest(); + } /* * Restore the original control register setting. @@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void) * before suspending. */ -static int i8042_controller_resume(bool force_reset) +static int i8042_controller_resume(bool s2r_wants_reset) { int error; @@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset) if (error) return error; - if (i8042_reset || force_reset) { + if (i8042_reset == I8042_RESET_ALWAYS || + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { error = i8042_controller_selftest(); if (error) return error; @@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev) static int i8042_pm_resume(struct device *dev) { - bool force_reset; + bool want_reset; int i; for (i = 0; i < I8042_NUM_PORTS; i++) { @@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev) * off control to the platform firmware, otherwise we can simply restore * the mode. */ - force_reset = pm_resume_via_firmware(); + want_reset = pm_resume_via_firmware(); - return i8042_controller_resume(force_reset); + return i8042_controller_resume(want_reset); } static int i8042_pm_thaw(struct device *dev) @@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev) i8042_platform_device = dev; - if (i8042_reset) { + if (i8042_reset == I8042_RESET_ALWAYS) { error = i8042_controller_selftest(); if (error) return error; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0397985a260128c6054513a22133adafd460991c..5975d76ce755dcb395a082a42bb6c11e8becbdd5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1833,6 +1833,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) kfree(dom->aperture[i]); } + if (dom->domain.id) + domain_id_free(dom->domain.id); + kfree(dom); } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ce1eb562be36c96e29d1068263fbea33eb5f4d15..03a6917233492b6c1b5cfd76b70d29ee33038c41 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1835,6 +1835,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .oas = oas, .tlb = &arm_smmu_gather_ops, .iommu_dev = smmu->dev, + .iova_base = domain->geometry.aperture_start, + .iova_end = domain->geometry.aperture_end, }; } diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index 6d90221486c9950644754a2e0afed43307c44d38..2acb9242bcf86f2a34d3c3eff7cd39d7fb50c7d8 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -151,7 +151,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, iommu_tlbiall(mapping->domain); mapping->have_stale_tlbs = false; - av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync); + av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base, + mapping->base + mapping->size - 1, + skip_sync); } return (bit << FAST_PAGE_SHIFT) + mapping->base; @@ -328,7 +330,7 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, if (unlikely(iova == DMA_ERROR_CODE)) goto fail; - pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova); if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot))) goto fail_free_iova; @@ -351,7 +353,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova, { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; unsigned long flags; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->base, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); int nptes = len >> FAST_PAGE_SHIFT; @@ -373,7 +376,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->base, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); @@ -385,7 +389,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->base, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); @@ -513,7 +518,8 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, while (sg_miter_next(&miter)) { int nptes = miter.length >> FAST_PAGE_SHIFT; - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, + iova_iter); if (unlikely(av8l_fast_map_public( ptep, page_to_phys(miter.page), miter.length, prot))) { @@ -541,7 +547,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, out_unmap: /* need to take the lock again for page tables and iova */ spin_lock_irqsave(&mapping->lock, flags); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr); av8l_fast_unmap_public(ptep, size); fast_dmac_clean_range(mapping, ptep, ptep + count); out_free_iova: @@ -573,7 +579,7 @@ static void fast_smmu_free(struct device *dev, size_t size, pages = area->pages; dma_common_free_remap(vaddr, size, VM_USERMAP, false); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle); spin_lock_irqsave(&mapping->lock, flags); av8l_fast_unmap_public(ptep, size); fast_dmac_clean_range(mapping, ptep, ptep + count); @@ -745,6 +751,9 @@ int fast_smmu_attach_device(struct device *dev, mapping->fast->domain = domain; mapping->fast->dev = dev; + domain->geometry.aperture_start = mapping->base; + domain->geometry.aperture_end = mapping->base + size - 1; + if (iommu_attach_device(domain, dev)) return -EINVAL; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 3821c4786662a4841f9e83c33d6bd7f70cf6d53d..e913a930ac8093dc856fba2fb71b8664e37452b0 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -326,7 +326,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, struct pci_dev *pdev = to_pci_dev(data); struct dmar_pci_notify_info *info; - /* Only care about add/remove events for physical functions */ + /* Only care about add/remove events for physical functions. + * For VFs we actually do the lookup based on the corresponding + * PF in device_to_iommu() anyway. */ if (pdev->is_virtfn) return NOTIFY_DONE; if (action != BUS_NOTIFY_ADD_DEVICE && @@ -1858,10 +1860,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) /* * All PCI devices managed by this unit should have been destroyed. */ - if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) { for_each_active_dev_scope(dmaru->devices, dmaru->devices_cnt, i, dev) return -EBUSY; + } ret = dmar_ir_hotplug(dmaru, false); if (ret == 0) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 24d81308a1a6ac9b73cd6365f5db6d2324afc97b..59e9abd3345eb783fde00e8d2853943afba74116 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -885,7 +885,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf return NULL; if (dev_is_pci(dev)) { + struct pci_dev *pf_pdev; + pdev = to_pci_dev(dev); + /* VFs aren't listed in scope tables; we need to look up + * the PF instead to find the IOMMU. */ + pf_pdev = pci_physfn(pdev); + dev = &pf_pdev->dev; segment = pci_domain_nr(pdev->bus); } else if (has_acpi_companion(dev)) dev = &ACPI_COMPANION(dev)->dev; @@ -898,6 +904,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, tmp) { if (tmp == dev) { + /* For a VF use its original BDF# not that of the PF + * which we used for the IOMMU lookup. Strictly speaking + * we could do this for all PCI devices; we only need to + * get the BDF# from the scope table for ACPI matches. */ + if (pdev->is_virtfn) + goto got_pdev; + *bus = drhd->devices[i].bus; *devfn = drhd->devices[i].devfn; goto out; @@ -1672,6 +1685,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) if (!iommu->domains || !iommu->domain_ids) return; +again: spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry_safe(info, tmp, &device_domain_list, global) { struct dmar_domain *domain; @@ -1684,10 +1698,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) domain = info->domain; - dmar_remove_one_dev_info(domain, info->dev); + __dmar_remove_one_dev_info(info); - if (!domain_type_is_vm_or_si(domain)) + if (!domain_type_is_vm_or_si(domain)) { + /* + * The domain_exit() function can't be called under + * device_domain_lock, as it takes this lock itself. + * So release the lock here and re-run the loop + * afterwards. + */ + spin_unlock_irqrestore(&device_domain_lock, flags); domain_exit(domain); + goto again; + } } spin_unlock_irqrestore(&device_domain_lock, flags); @@ -4182,10 +4205,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) if (!atsru) return 0; - if (!atsru->include_all && atsru->devices && atsru->devices_cnt) + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, i, dev) return -EBUSY; + } return 0; } diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index d9939fa9b58887567d1fe8a7a94c7e5f1452cdf0..f929879ecae60ce8c266e9f21719e2b49783c7ab 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) struct page *pages; int order; - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; - if (order < 0) - order = 0; - + /* Start at 2 because it's defined as 2^(1+PSS) */ + iommu->pasid_max = 2 << ecap_pss(iommu->ecap); + + /* Eventually I'm promised we will get a multi-level PASID table + * and it won't have to be physically contiguous. Until then, + * limit the size because 8MiB contiguous allocations can be hard + * to come by. The limit of 0x20000, which is 1MiB for each of + * the PASID and PASID-state tables, is somewhat arbitrary. */ + if (iommu->pasid_max > 0x20000) + iommu->pasid_max = 0x20000; + + order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!pages) { pr_warn("IOMMU: %s: Failed to allocate PASID table\n", @@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); if (ecap_dis(iommu->ecap)) { + /* Just making it explicit... */ + BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (pages) iommu->pasid_state_table = page_address(pages); @@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) int intel_svm_free_pasid_tables(struct intel_iommu *iommu) { - int order; - - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; - if (order < 0) - order = 0; + int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); if (iommu->pasid_table) { free_pages((unsigned long)iommu->pasid_table, order); @@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ } svm->iommu = iommu; - if (pasid_max > 2 << ecap_pss(iommu->ecap)) - pasid_max = 2 << ecap_pss(iommu->ecap); + if (pasid_max > iommu->pasid_max) + pasid_max = iommu->pasid_max; /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ ret = idr_alloc(&iommu->pasid_idr, svm, diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c index 5d32a382e291a7e0c707ecf757d8edfcb557ca22..3582e206db68ea7a60cc1aaf7060c80e5abb2cd3 100644 --- a/drivers/iommu/io-pgtable-fast.c +++ b/drivers/iommu/io-pgtable-fast.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,9 @@ struct av8l_fast_io_pgtable { av8l_fast_iopte *puds[4]; av8l_fast_iopte *pmds; struct page **pages; /* page table memory */ + int nr_pages; + dma_addr_t base; + dma_addr_t end; }; /* Page table bits */ @@ -168,12 +172,14 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } } -void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync) +void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base, + u64 end, bool skip_sync) { int i; av8l_fast_iopte *pmdp = pmds; - for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) { + for (i = base >> AV8L_FAST_PAGE_SHIFT; + i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { if (!(*pmdp & AV8L_FAST_PTE_VALID)) { *pmdp = 0; if (!skip_sync) @@ -224,7 +230,7 @@ static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; av8l_fast_map_public(ptep, paddr, size, prot); @@ -255,7 +261,7 @@ static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size) { struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; __av8l_fast_unmap(ptep, size, false); @@ -333,7 +339,7 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) } /* - * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and + * We need max 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and * 2048 pages for pmds (each pud page contains 512 table entries, each * pointing to a pmd). */ @@ -342,12 +348,38 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) #define NUM_PMD_PAGES 2048 #define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES) +/* undefine arch specific definitions which depends on page table format */ +#undef pud_index +#undef pud_mask +#undef pud_next +#undef pmd_index +#undef pmd_mask +#undef pmd_next + +#define pud_index(addr) (((addr) >> 30) & 0x3) +#define pud_mask(addr) ((addr) & ~((1UL << 30) - 1)) +#define pud_next(addr, end) \ +({ unsigned long __boundary = pud_mask(addr + (1UL << 30));\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + +#define pmd_index(addr) (((addr) >> 21) & 0x1ff) +#define pmd_mask(addr) ((addr) & ~((1UL << 21) - 1)) +#define pmd_next(addr, end) \ +({ unsigned long __boundary = pmd_mask(addr + (1UL << 21));\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + static int av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, struct io_pgtable_cfg *cfg, void *cookie) { int i, j, pg = 0; struct page **pages, *page; + dma_addr_t base = cfg->iova_base; + dma_addr_t end = cfg->iova_end; + dma_addr_t pud, pmd; + int pmd_pg_index; pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN | __GFP_NORETRY); @@ -365,10 +397,11 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, data->pgd = page_address(page); /* - * We need 2048 entries at level 2 to map 4GB of VA space. A page - * can hold 512 entries, so we need 4 pages. + * We need max 2048 entries at level 2 to map 4GB of VA space. A page + * can hold 512 entries, so we need max 4 pages. */ - for (i = 0; i < 4; ++i) { + for (i = pud_index(base), pud = base; pud < end; + ++i, pud = pud_next(pud, end)) { av8l_fast_iopte pte, *ptep; page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -383,18 +416,26 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, dmac_clean_range(data->pgd, data->pgd + 4); /* - * We have 4 puds, each of which can point to 512 pmds, so we'll - * have 2048 pmds, each of which can hold 512 ptes, for a grand + * We have max 4 puds, each of which can point to 512 pmds, so we'll + * have max 2048 pmds, each of which can hold 512 ptes, for a grand * total of 2048*512=1048576 PTEs. */ - for (i = 0; i < 4; ++i) { - for (j = 0; j < 512; ++j) { + pmd_pg_index = pg; + for (i = pud_index(base), pud = base; pud < end; + ++i, pud = pud_next(pud, end)) { + for (j = pmd_index(pud), pmd = pud; pmd < pud_next(pud, end); + ++j, pmd = pmd_next(pmd, end)) { av8l_fast_iopte pte, *pudp; + void *addr; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto err_free_pages; pages[pg++] = page; + + addr = page_address(page); + dmac_clean_range(addr, addr + SZ_4K); + pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE; pudp = data->puds[i] + j; *pudp = pte; @@ -402,21 +443,21 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, dmac_clean_range(data->puds[i], data->puds[i] + 512); } - if (WARN_ON(pg != NUM_PGTBL_PAGES)) - goto err_free_pages; - /* * We map the pmds into a virtually contiguous space so that we * don't have to traverse the first two levels of the page tables * to find the appropriate pud. Instead, it will be a simple * offset from the virtual base of the pmds. */ - data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES, + data->pmds = vmap(&pages[pmd_pg_index], pg - pmd_pg_index, VM_IOREMAP, PAGE_KERNEL); if (!data->pmds) goto err_free_pages; data->pages = pages; + data->nr_pages = pg; + data->base = base; + data->end = end; return 0; err_free_pages: @@ -516,7 +557,7 @@ static void av8l_fast_free_pgtable(struct io_pgtable *iop) struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop); vunmap(data->pmds); - for (i = 0; i < NUM_PGTBL_PAGES; ++i) + for (i = 0; i < data->nr_pages; ++i) __free_page(data->pages[i]); kvfree(data->pages); kfree(data); @@ -588,6 +629,7 @@ static int __init av8l_fast_positive_testing(void) struct av8l_fast_io_pgtable *data; av8l_fast_iopte *pmds; u64 max = SZ_1G * 4ULL - 1; + u64 base = 0; cfg = (struct io_pgtable_cfg) { .quirks = 0, @@ -595,6 +637,8 @@ static int __init av8l_fast_positive_testing(void) .ias = 32, .oas = 32, .pgsize_bitmap = SZ_4K, + .iova_base = base, + .iova_end = max, }; cfg_cookie = &cfg; @@ -607,81 +651,81 @@ static int __init av8l_fast_positive_testing(void) pmds = data->pmds; /* map the entire 4GB VA space with 4K map calls */ - for (iova = 0; iova < max; iova += SZ_4K) { + for (iova = base; iova < max; iova += SZ_4K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all */ - for (iova = 0; iova < max; iova += SZ_4K) { + for (iova = base; iova < max; iova += SZ_4K) { if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, false); + av8l_fast_clear_stale_ptes(pmds, base, max, false); /* map the entire 4GB VA space with 8K map calls */ - for (iova = 0; iova < max; iova += SZ_8K) { + for (iova = base; iova < max; iova += SZ_8K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all with 8K unmap calls */ - for (iova = 0; iova < max; iova += SZ_8K) { + for (iova = base; iova < max; iova += SZ_8K) { if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, false); + av8l_fast_clear_stale_ptes(pmds, base, max, false); /* map the entire 4GB VA space with 16K map calls */ - for (iova = 0; iova < max; iova += SZ_16K) { + for (iova = base; iova < max; iova += SZ_16K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all */ - for (iova = 0; iova < max; iova += SZ_16K) { + for (iova = base; iova < max; iova += SZ_16K) { if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, false); + av8l_fast_clear_stale_ptes(pmds, base, max, false); /* map the entire 4GB VA space with 64K map calls */ - for (iova = 0; iova < max; iova += SZ_64K) { + for (iova = base; iova < max; iova += SZ_64K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all at once */ - if (WARN_ON(ops->unmap(ops, 0, max) != max)) + if (WARN_ON(ops->unmap(ops, base, max - base) != (max - base))) failed++; free_io_pgtable_ops(ops); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index f4533040806f8521e7ea3e29ffa6e216e47aa784..e6939c2212d4a8bcc0232d7f64fb0e653b822cf8 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -69,6 +69,8 @@ struct io_pgtable_cfg { unsigned int oas; const struct iommu_gather_ops *tlb; struct device *iommu_dev; + dma_addr_t iova_base; + dma_addr_t iova_end; /* Low-level data specific to the table format */ union { diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 8a0c7f28819841a83e1afb0e3eccf71d4aa287a7..981c3959da59431a776fa45faab5fae6929c93e3 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d, { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; + unsigned long flags; unsigned smr; int idx; int ret; @@ -194,12 +195,12 @@ static int aic_irq_domain_xlate(struct irq_domain *d, gc = dgc->gc[idx]; - irq_gc_lock(gc); + irq_gc_lock_irqsave(gc, flags); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock(gc); + irq_gc_unlock_irqrestore(gc, flags); return ret; } diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 62bb840c613f2a660966333f858426af5ab3d93e..7dee71bde3504cced7b64285bf517994deb76425 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, unsigned int *out_type) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); + unsigned long flags; unsigned smr; int ret; @@ -269,13 +270,13 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - irq_gc_lock(bgc); + irq_gc_lock_irqsave(bgc, flags); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR); - irq_gc_unlock(bgc); + irq_gc_unlock_irqrestore(bgc, flags); return ret; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index cfdc235c1d28303311185f49fec1048929109e08..c40a9f3b0724559fbdaef054c7f720d5d1895b4b 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -154,7 +154,7 @@ static void gic_enable_redist(bool enable) return; /* No PM support in this redistributor */ } - while (count--) { + while (--count) { val = readl_relaxed(rbase + GICR_WAKER); if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) break; @@ -702,7 +702,7 @@ static struct notifier_block gic_cpu_notifier = { static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { - int cpu = *base_cpu; + int next_cpu, cpu = *base_cpu; unsigned long mpidr = cpu_logical_map(cpu); u16 tlist = 0; @@ -716,9 +716,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, tlist |= 1 << (mpidr & 0xf); - cpu = cpumask_next(cpu, mask); - if (cpu >= nr_cpu_ids) + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) goto out; + cpu = next_cpu; mpidr = cpu_logical_map(cpu); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 966227a3df1a18fa123fd273b2da4247bfe998e7..620268b63b2a2117b8692d436d236c116a2faf5f 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -596,18 +596,9 @@ config LEDS_QPNP LEDs in both PWM and light pattern generator (LPG) modes. For older PMICs, it also supports WLEDs and flash LEDs. -config LEDS_QPNP_FLASH - tristate "Support for QPNP Flash LEDs" - depends on LEDS_CLASS && SPMI - help - This driver supports the flash LED functionality of Qualcomm - Technologies, Inc. QPNP PMICs. This driver supports PMICs up through - PM8994. It can configure the flash LED target current for several - independent channels. - config LEDS_QPNP_FLASH_V2 tristate "Support for QPNP V2 Flash LEDs" - depends on LEDS_CLASS && MFD_SPMI_PMIC && !LEDS_QPNP_FLASH + depends on LEDS_CLASS && MFD_SPMI_PMIC help This driver supports the flash V2 LED functionality of Qualcomm Technologies, Inc. QPNP PMICs. This driver supports PMICs starting diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 8d8ba9175810b8e0ab76504a78e0d168cfbb6eea..aa5ba0cf4de698172bc2fb71e42bb3c110684a5d 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -61,7 +61,6 @@ obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o -obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c deleted file mode 100644 index cd76941b87ca97409ebe4b5f69bdb88cfbbf431f..0000000000000000000000000000000000000000 --- a/drivers/leds/leds-qpnp-flash.c +++ /dev/null @@ -1,2683 +0,0 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "leds.h" - -#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05) -#define FLASH_SAFETY_TIMER(base) (base + 0x40) -#define FLASH_MAX_CURRENT(base) (base + 0x41) -#define FLASH_LED0_CURRENT(base) (base + 0x42) -#define FLASH_LED1_CURRENT(base) (base + 0x43) -#define FLASH_CLAMP_CURRENT(base) (base + 0x44) -#define FLASH_MODULE_ENABLE_CTRL(base) (base + 0x46) -#define FLASH_LED_STROBE_CTRL(base) (base + 0x47) -#define FLASH_LED_TMR_CTRL(base) (base + 0x48) -#define FLASH_HEADROOM(base) (base + 0x4A) -#define FLASH_STARTUP_DELAY(base) (base + 0x4B) -#define FLASH_MASK_ENABLE(base) (base + 0x4C) -#define FLASH_VREG_OK_FORCE(base) (base + 0x4F) -#define FLASH_FAULT_DETECT(base) (base + 0x51) -#define FLASH_THERMAL_DRATE(base) (base + 0x52) -#define FLASH_CURRENT_RAMP(base) (base + 0x54) -#define FLASH_VPH_PWR_DROOP(base) (base + 0x5A) -#define FLASH_HDRM_SNS_ENABLE_CTRL0(base) (base + 0x5C) -#define FLASH_HDRM_SNS_ENABLE_CTRL1(base) (base + 0x5D) -#define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0) -#define FLASH_PERPH_RESET_CTRL(base) (base + 0xDA) -#define FLASH_TORCH(base) (base + 0xE4) - -#define FLASH_STATUS_REG_MASK 0xFF -#define FLASH_LED_FAULT_STATUS(base) (base + 0x08) -#define INT_LATCHED_STS(base) (base + 0x18) -#define IN_POLARITY_HIGH(base) (base + 0x12) -#define INT_SET_TYPE(base) (base + 0x11) -#define INT_EN_SET(base) (base + 0x15) -#define INT_LATCHED_CLR(base) (base + 0x14) - -#define FLASH_HEADROOM_MASK 0x03 -#define FLASH_STARTUP_DLY_MASK 0x03 -#define FLASH_VREG_OK_FORCE_MASK 0xC0 -#define FLASH_FAULT_DETECT_MASK 0x80 -#define FLASH_THERMAL_DERATE_MASK 0xBF -#define FLASH_SECURE_MASK 0xFF -#define FLASH_TORCH_MASK 0x03 -#define FLASH_CURRENT_MASK 0x7F -#define FLASH_TMR_MASK 0x03 -#define FLASH_TMR_SAFETY 0x00 -#define FLASH_SAFETY_TIMER_MASK 0x7F -#define FLASH_MODULE_ENABLE_MASK 0xE0 -#define FLASH_STROBE_MASK 0xC0 -#define FLASH_CURRENT_RAMP_MASK 0xBF -#define FLASH_VPH_PWR_DROOP_MASK 0xF3 -#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81 -#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0 -#define FLASH_FOLLOW_OTST2_RB_MASK 0x08 - -#define FLASH_LED_TRIGGER_DEFAULT "none" -#define FLASH_LED_HEADROOM_DEFAULT_MV 500 -#define FLASH_LED_STARTUP_DELAY_DEFAULT_US 128 -#define FLASH_LED_CLAMP_CURRENT_DEFAULT_MA 200 -#define FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C 80 -#define FLASH_LED_RAMP_UP_STEP_DEFAULT_US 3 -#define FLASH_LED_RAMP_DN_STEP_DEFAULT_US 3 -#define FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV 3200 -#define FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US 10 -#define FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT 2 -#define FLASH_RAMP_UP_DELAY_US_MIN 1000 -#define FLASH_RAMP_UP_DELAY_US_MAX 1001 -#define FLASH_RAMP_DN_DELAY_US_MIN 2160 -#define FLASH_RAMP_DN_DELAY_US_MAX 2161 -#define FLASH_BOOST_REGULATOR_PROBE_DELAY_MS 2000 -#define FLASH_TORCH_MAX_LEVEL 0x0F -#define FLASH_MAX_LEVEL 0x4F -#define FLASH_LED_FLASH_HW_VREG_OK 0x40 -#define FLASH_LED_FLASH_SW_VREG_OK 0x80 -#define FLASH_LED_STROBE_TYPE_HW 0x04 -#define FLASH_DURATION_DIVIDER 10 -#define FLASH_LED_HEADROOM_DIVIDER 100 -#define FLASH_LED_HEADROOM_OFFSET 2 -#define FLASH_LED_MAX_CURRENT_MA 1000 -#define FLASH_LED_THERMAL_THRESHOLD_MIN 95 -#define FLASH_LED_THERMAL_DEVIDER 10 -#define FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV 2500 -#define FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER 100 -#define FLASH_LED_HDRM_SNS_ENABLE 0x81 -#define FLASH_LED_HDRM_SNS_DISABLE 0x01 -#define FLASH_LED_UA_PER_MA 1000 -#define FLASH_LED_MASK_MODULE_MASK2_ENABLE 0x20 -#define FLASH_LED_MASK3_ENABLE_SHIFT 7 -#define FLASH_LED_MODULE_CTRL_DEFAULT 0x60 -#define FLASH_LED_CURRENT_READING_DELAY_MIN 5000 -#define FLASH_LED_CURRENT_READING_DELAY_MAX 5001 -#define FLASH_LED_OPEN_FAULT_DETECTED 0xC - -#define FLASH_UNLOCK_SECURE 0xA5 -#define FLASH_LED_TORCH_ENABLE 0x00 -#define FLASH_LED_TORCH_DISABLE 0x03 -#define FLASH_MODULE_ENABLE 0x80 -#define FLASH_LED0_TRIGGER 0x80 -#define FLASH_LED1_TRIGGER 0x40 -#define FLASH_LED0_ENABLEMENT 0x40 -#define FLASH_LED1_ENABLEMENT 0x20 -#define FLASH_LED_DISABLE 0x00 -#define FLASH_LED_MIN_CURRENT_MA 13 -#define FLASH_SUBTYPE_DUAL 0x01 -#define FLASH_SUBTYPE_SINGLE 0x02 - -/* - * ID represents physical LEDs for individual control purpose. - */ -enum flash_led_id { - FLASH_LED_0 = 0, - FLASH_LED_1, - FLASH_LED_SWITCH, -}; - -enum flash_led_type { - FLASH = 0, - TORCH, - SWITCH, -}; - -enum thermal_derate_rate { - RATE_1_PERCENT = 0, - RATE_1P25_PERCENT, - RATE_2_PERCENT, - RATE_2P5_PERCENT, - RATE_5_PERCENT, -}; - -enum current_ramp_steps { - RAMP_STEP_0P2_US = 0, - RAMP_STEP_0P4_US, - RAMP_STEP_0P8_US, - RAMP_STEP_1P6_US, - RAMP_STEP_3P3_US, - RAMP_STEP_6P7_US, - RAMP_STEP_13P5_US, - RAMP_STEP_27US, -}; - -struct flash_regulator_data { - struct regulator *regs; - const char *reg_name; - u32 max_volt_uv; -}; - -/* - * Configurations for each individual LED - */ -struct flash_node_data { - struct platform_device *pdev; - struct regmap *regmap; - struct led_classdev cdev; - struct work_struct work; - struct flash_regulator_data *reg_data; - u16 max_current; - u16 prgm_current; - u16 prgm_current2; - u16 duration; - u8 id; - u8 type; - u8 trigger; - u8 enable; - u8 num_regulators; - bool flash_on; -}; - -/* - * Flash LED configuration read from device tree - */ -struct flash_led_platform_data { - unsigned int temp_threshold_num; - unsigned int temp_derate_curr_num; - unsigned int *die_temp_derate_curr_ma; - unsigned int *die_temp_threshold_degc; - u16 ramp_up_step; - u16 ramp_dn_step; - u16 vph_pwr_droop_threshold; - u16 headroom; - u16 clamp_current; - u8 thermal_derate_threshold; - u8 vph_pwr_droop_debounce_time; - u8 startup_dly; - u8 thermal_derate_rate; - bool pmic_charger_support; - bool self_check_en; - bool thermal_derate_en; - bool current_ramp_en; - bool vph_pwr_droop_en; - bool hdrm_sns_ch0_en; - bool hdrm_sns_ch1_en; - bool power_detect_en; - bool mask3_en; - bool follow_rb_disable; - bool die_current_derate_en; -}; - -struct qpnp_flash_led_buffer { - struct mutex debugfs_lock; /* Prevent thread concurrency */ - size_t rpos; - size_t wpos; - size_t len; - char data[0]; -}; - -/* - * Flash LED data structure containing flash LED attributes - */ -struct qpnp_flash_led { - struct pmic_revid_data *revid_data; - struct platform_device *pdev; - struct regmap *regmap; - struct flash_led_platform_data *pdata; - struct pinctrl *pinctrl; - struct pinctrl_state *gpio_state_active; - struct pinctrl_state *gpio_state_suspend; - struct flash_node_data *flash_node; - struct power_supply *battery_psy; - struct workqueue_struct *ordered_workq; - struct qpnp_vadc_chip *vadc_dev; - struct mutex flash_led_lock; - struct qpnp_flash_led_buffer *log; - struct dentry *dbgfs_root; - int num_leds; - u32 buffer_cnt; - u16 base; - u16 current_addr; - u16 current2_addr; - u8 peripheral_type; - u8 fault_reg; - bool gpio_enabled; - bool charging_enabled; - bool strobe_debug; - bool dbg_feature_en; - bool open_fault; -}; - -static u8 qpnp_flash_led_ctrl_dbg_regs[] = { - 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, - 0x4A, 0x4B, 0x4C, 0x4F, 0x51, 0x52, 0x54, 0x55, 0x5A, 0x5C, 0x5D, -}; - -static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led, - struct file *file) -{ - struct qpnp_flash_led_buffer *log; - size_t logbufsize = SZ_4K; - - log = kzalloc(logbufsize, GFP_KERNEL); - if (!log) - return -ENOMEM; - - log->rpos = 0; - log->wpos = 0; - log->len = logbufsize - sizeof(*log); - mutex_init(&log->debugfs_lock); - led->log = log; - - led->buffer_cnt = 1; - file->private_data = led; - - return 0; -} - -static int flash_led_dfs_open(struct inode *inode, struct file *file) -{ - struct qpnp_flash_led *led = inode->i_private; - - return flash_led_dbgfs_file_open(led, file); -} - -static int flash_led_dfs_close(struct inode *inode, struct file *file) -{ - struct qpnp_flash_led *led = file->private_data; - - if (led && led->log) { - file->private_data = NULL; - mutex_destroy(&led->log->debugfs_lock); - kfree(led->log); - } - - return 0; -} - -#define MIN_BUFFER_WRITE_LEN 20 -static int print_to_log(struct qpnp_flash_led_buffer *log, - const char *fmt, ...) -{ - va_list args; - int cnt; - char *log_buf; - size_t size = log->len - log->wpos; - - if (size < MIN_BUFFER_WRITE_LEN) - return 0; /* not enough buffer left */ - - log_buf = &log->data[log->wpos]; - va_start(args, fmt); - cnt = vscnprintf(log_buf, size, fmt, args); - va_end(args); - - log->wpos += cnt; - return cnt; -} - -static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf, - size_t count, loff_t *ppos) { - struct qpnp_flash_led *led = fp->private_data; - struct qpnp_flash_led_buffer *log = led->log; - uint val; - int rc = 0; - size_t len; - size_t ret; - - mutex_lock(&log->debugfs_lock); - if ((log->rpos >= log->wpos && led->buffer_cnt == 0) || - ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN)) - goto unlock_mutex; - - rc = regmap_read(led->regmap, INT_LATCHED_STS(led->base), &val); - if (rc) { - dev_err(&led->pdev->dev, - "Unable to read from address %x, rc(%d)\n", - INT_LATCHED_STS(led->base), rc); - goto unlock_mutex; - } - led->buffer_cnt--; - - rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base)); - if (rc == 0) - goto unlock_mutex; - - rc = print_to_log(log, "0x%02X ", val); - if (rc == 0) - goto unlock_mutex; - - if (log->wpos > 0 && log->data[log->wpos - 1] == ' ') - log->data[log->wpos - 1] = '\n'; - - len = min(count, log->wpos - log->rpos); - - ret = copy_to_user(buf, &log->data[log->rpos], len); - if (ret) { - pr_err("error copy register value to user\n"); - rc = -EFAULT; - goto unlock_mutex; - } - - len -= ret; - *ppos += len; - log->rpos += len; - - rc = len; - -unlock_mutex: - mutex_unlock(&log->debugfs_lock); - return rc; -} - -static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf, - size_t count, loff_t *ppos) { - struct qpnp_flash_led *led = fp->private_data; - struct qpnp_flash_led_buffer *log = led->log; - int rc = 0; - size_t len; - size_t ret; - - mutex_lock(&log->debugfs_lock); - if ((log->rpos >= log->wpos && led->buffer_cnt == 0) || - ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN)) - goto unlock_mutex; - - led->buffer_cnt--; - - rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base)); - if (rc == 0) - goto unlock_mutex; - - rc = print_to_log(log, "0x%02X ", led->fault_reg); - if (rc == 0) - goto unlock_mutex; - - if (log->wpos > 0 && log->data[log->wpos - 1] == ' ') - log->data[log->wpos - 1] = '\n'; - - len = min(count, log->wpos - log->rpos); - - ret = copy_to_user(buf, &log->data[log->rpos], len); - if (ret) { - pr_err("error copy register value to user\n"); - rc = -EFAULT; - goto unlock_mutex; - } - - len -= ret; - *ppos += len; - log->rpos += len; - - rc = len; - -unlock_mutex: - mutex_unlock(&log->debugfs_lock); - return rc; -} - -static ssize_t flash_led_dfs_fault_reg_enable(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) { - - u8 *val; - int pos = 0; - int cnt = 0; - int data; - size_t ret = 0; - - struct qpnp_flash_led *led = file->private_data; - char *kbuf; - - mutex_lock(&led->log->debugfs_lock); - kbuf = kmalloc(count + 1, GFP_KERNEL); - if (!kbuf) { - ret = -ENOMEM; - goto unlock_mutex; - } - - ret = copy_from_user(kbuf, buf, count); - if (!ret) { - pr_err("failed to copy data from user\n"); - ret = -EFAULT; - goto free_buf; - } - - count -= ret; - *ppos += count; - kbuf[count] = '\0'; - val = kbuf; - while (sscanf(kbuf + pos, "%i", &data) == 1) { - pos++; - val[cnt++] = data & 0xff; - } - - if (!cnt) - goto free_buf; - - ret = count; - if (*val == 1) - led->strobe_debug = true; - else - led->strobe_debug = false; - -free_buf: - kfree(kbuf); -unlock_mutex: - mutex_unlock(&led->log->debugfs_lock); - return ret; -} - -static ssize_t flash_led_dfs_dbg_enable(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) { - - u8 *val; - int pos = 0; - int cnt = 0; - int data; - size_t ret = 0; - struct qpnp_flash_led *led = file->private_data; - char *kbuf; - - mutex_lock(&led->log->debugfs_lock); - kbuf = kmalloc(count + 1, GFP_KERNEL); - if (!kbuf) { - ret = -ENOMEM; - goto unlock_mutex; - } - - ret = copy_from_user(kbuf, buf, count); - if (ret == count) { - pr_err("failed to copy data from user\n"); - ret = -EFAULT; - goto free_buf; - } - count -= ret; - *ppos += count; - kbuf[count] = '\0'; - val = kbuf; - while (sscanf(kbuf + pos, "%i", &data) == 1) { - pos++; - val[cnt++] = data & 0xff; - } - - if (!cnt) - goto free_buf; - - ret = count; - if (*val == 1) - led->dbg_feature_en = true; - else - led->dbg_feature_en = false; - -free_buf: - kfree(kbuf); -unlock_mutex: - mutex_unlock(&led->log->debugfs_lock); - return ret; -} - -static const struct file_operations flash_led_dfs_latched_reg_fops = { - .open = flash_led_dfs_open, - .release = flash_led_dfs_close, - .read = flash_led_dfs_latched_reg_read, -}; - -static const struct file_operations flash_led_dfs_strobe_reg_fops = { - .open = flash_led_dfs_open, - .release = flash_led_dfs_close, - .read = flash_led_dfs_fault_reg_read, - .write = flash_led_dfs_fault_reg_enable, -}; - -static const struct file_operations flash_led_dfs_dbg_feature_fops = { - .open = flash_led_dfs_open, - .release = flash_led_dfs_close, - .write = flash_led_dfs_dbg_enable, -}; - -static int -qpnp_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask, u8 val) -{ - int rc; - - rc = regmap_update_bits(led->regmap, addr, mask, val); - if (rc) - dev_err(&led->pdev->dev, - "Unable to update_bits to addr=%x, rc(%d)\n", addr, rc); - - dev_dbg(&led->pdev->dev, "Write 0x%02X to addr 0x%02X\n", val, addr); - - return rc; -} - -static int qpnp_flash_led_get_allowed_die_temp_curr(struct qpnp_flash_led *led, - int64_t die_temp_degc) -{ - int die_temp_curr_ma; - - if (die_temp_degc >= led->pdata->die_temp_threshold_degc[0]) - die_temp_curr_ma = 0; - else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[1]) - die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[0]; - else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[2]) - die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[1]; - else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[3]) - die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[2]; - else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[4]) - die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[3]; - else - die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[4]; - - return die_temp_curr_ma; -} - -static int64_t qpnp_flash_led_get_die_temp(struct qpnp_flash_led *led) -{ - struct qpnp_vadc_result die_temp_result; - int rc; - - rc = qpnp_vadc_read(led->vadc_dev, SPARE2, &die_temp_result); - if (rc) { - pr_err("failed to read the die temp\n"); - return -EINVAL; - } - - return die_temp_result.physical; -} - -static int qpnp_get_pmic_revid(struct qpnp_flash_led *led) -{ - struct device_node *revid_dev_node; - - revid_dev_node = of_parse_phandle(led->pdev->dev.of_node, - "qcom,pmic-revid", 0); - if (!revid_dev_node) { - dev_err(&led->pdev->dev, - "qcom,pmic-revid property missing\n"); - return -EINVAL; - } - - led->revid_data = get_revid_data(revid_dev_node); - if (IS_ERR(led->revid_data)) { - pr_err("Couldn't get revid data rc = %ld\n", - PTR_ERR(led->revid_data)); - return PTR_ERR(led->revid_data); - } - - return 0; -} - -static int -qpnp_flash_led_get_max_avail_current(struct flash_node_data *flash_node, - struct qpnp_flash_led *led) -{ - union power_supply_propval prop; - int64_t chg_temp_milidegc, die_temp_degc; - int max_curr_avail_ma = 2000; - int allowed_die_temp_curr_ma = 2000; - int rc; - - if (led->pdata->power_detect_en) { - if (!led->battery_psy) { - dev_err(&led->pdev->dev, - "Failed to query power supply\n"); - return -EINVAL; - } - - /* - * When charging is enabled, enforce this new enablement - * sequence to reduce fuel gauge reading resolution. - */ - if (led->charging_enabled) { - rc = qpnp_led_masked_write(led, - FLASH_MODULE_ENABLE_CTRL(led->base), - FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Module enable reg write failed\n"); - return -EINVAL; - } - - usleep_range(FLASH_LED_CURRENT_READING_DELAY_MIN, - FLASH_LED_CURRENT_READING_DELAY_MAX); - } - - power_supply_get_property(led->battery_psy, - POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, &prop); - if (!prop.intval) { - dev_err(&led->pdev->dev, - "battery too low for flash\n"); - return -EINVAL; - } - - max_curr_avail_ma = (prop.intval / FLASH_LED_UA_PER_MA); - } - - /* - * When thermal mitigation is available, this logic will execute to - * derate current based upon the PMIC die temperature. - */ - if (led->pdata->die_current_derate_en) { - chg_temp_milidegc = qpnp_flash_led_get_die_temp(led); - if (chg_temp_milidegc < 0) - return -EINVAL; - - die_temp_degc = div_s64(chg_temp_milidegc, 1000); - allowed_die_temp_curr_ma = - qpnp_flash_led_get_allowed_die_temp_curr(led, - die_temp_degc); - if (allowed_die_temp_curr_ma < 0) - return -EINVAL; - } - - max_curr_avail_ma = (max_curr_avail_ma >= allowed_die_temp_curr_ma) - ? allowed_die_temp_curr_ma : max_curr_avail_ma; - - return max_curr_avail_ma; -} - -static ssize_t qpnp_flash_led_die_temp_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct qpnp_flash_led *led; - struct flash_node_data *flash_node; - unsigned long val; - struct led_classdev *led_cdev = dev_get_drvdata(dev); - ssize_t ret; - - ret = kstrtoul(buf, 10, &val); - if (ret) - return ret; - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - led = dev_get_drvdata(&flash_node->pdev->dev); - - /*'0' for disable die_temp feature; non-zero to enable feature*/ - if (val == 0) - led->pdata->die_current_derate_en = false; - else - led->pdata->die_current_derate_en = true; - - return count; -} - -static ssize_t qpnp_led_strobe_type_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct flash_node_data *flash_node; - unsigned long state; - struct led_classdev *led_cdev = dev_get_drvdata(dev); - ssize_t ret = -EINVAL; - - ret = kstrtoul(buf, 10, &state); - if (ret) - return ret; - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - - /* '0' for sw strobe; '1' for hw strobe */ - if (state == 1) - flash_node->trigger |= FLASH_LED_STROBE_TYPE_HW; - else - flash_node->trigger &= ~FLASH_LED_STROBE_TYPE_HW; - - return count; -} - -static ssize_t qpnp_flash_led_dump_regs_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qpnp_flash_led *led; - struct flash_node_data *flash_node; - struct led_classdev *led_cdev = dev_get_drvdata(dev); - int rc, i, count = 0; - u16 addr; - uint val; - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - led = dev_get_drvdata(&flash_node->pdev->dev); - for (i = 0; i < ARRAY_SIZE(qpnp_flash_led_ctrl_dbg_regs); i++) { - addr = led->base + qpnp_flash_led_ctrl_dbg_regs[i]; - rc = regmap_read(led->regmap, addr, &val); - if (rc) { - dev_err(&led->pdev->dev, - "Unable to read from addr=%x, rc(%d)\n", - addr, rc); - return -EINVAL; - } - - count += snprintf(buf + count, PAGE_SIZE - count, - "REG_0x%x = 0x%02x\n", addr, val); - - if (count >= PAGE_SIZE) - return PAGE_SIZE - 1; - } - - return count; -} - -static ssize_t qpnp_flash_led_current_derate_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct qpnp_flash_led *led; - struct flash_node_data *flash_node; - unsigned long val; - struct led_classdev *led_cdev = dev_get_drvdata(dev); - ssize_t ret; - - ret = kstrtoul(buf, 10, &val); - if (ret) - return ret; - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - led = dev_get_drvdata(&flash_node->pdev->dev); - - /*'0' for disable derate feature; non-zero to enable derate feature */ - if (val == 0) - led->pdata->power_detect_en = false; - else - led->pdata->power_detect_en = true; - - return count; -} - -static ssize_t qpnp_flash_led_max_current_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qpnp_flash_led *led; - struct flash_node_data *flash_node; - struct led_classdev *led_cdev = dev_get_drvdata(dev); - int max_curr_avail_ma = 0; - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - led = dev_get_drvdata(&flash_node->pdev->dev); - - if (led->flash_node[0].flash_on) - max_curr_avail_ma += led->flash_node[0].max_current; - if (led->flash_node[1].flash_on) - max_curr_avail_ma += led->flash_node[1].max_current; - - if (led->pdata->power_detect_en || - led->pdata->die_current_derate_en) { - max_curr_avail_ma = - qpnp_flash_led_get_max_avail_current(flash_node, led); - - if (max_curr_avail_ma < 0) - return -EINVAL; - } - - return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma); -} - -static struct device_attribute qpnp_flash_led_attrs[] = { - __ATTR(strobe, 0664, NULL, qpnp_led_strobe_type_store), - __ATTR(reg_dump, 0664, qpnp_flash_led_dump_regs_show, NULL), - __ATTR(enable_current_derate, 0664, NULL, - qpnp_flash_led_current_derate_store), - __ATTR(max_allowed_current, 0664, qpnp_flash_led_max_current_show, - NULL), - __ATTR(enable_die_temp_current_derate, 0664, NULL, - qpnp_flash_led_die_temp_store), -}; - -static int qpnp_flash_led_get_thermal_derate_rate(const char *rate) -{ - /* - * return 5% derate as default value if user specifies - * a value un-supported - */ - if (strcmp(rate, "1_PERCENT") == 0) - return RATE_1_PERCENT; - else if (strcmp(rate, "1P25_PERCENT") == 0) - return RATE_1P25_PERCENT; - else if (strcmp(rate, "2_PERCENT") == 0) - return RATE_2_PERCENT; - else if (strcmp(rate, "2P5_PERCENT") == 0) - return RATE_2P5_PERCENT; - else if (strcmp(rate, "5_PERCENT") == 0) - return RATE_5_PERCENT; - else - return RATE_5_PERCENT; -} - -static int qpnp_flash_led_get_ramp_step(const char *step) -{ - /* - * return 27 us as default value if user specifies - * a value un-supported - */ - if (strcmp(step, "0P2_US") == 0) - return RAMP_STEP_0P2_US; - else if (strcmp(step, "0P4_US") == 0) - return RAMP_STEP_0P4_US; - else if (strcmp(step, "0P8_US") == 0) - return RAMP_STEP_0P8_US; - else if (strcmp(step, "1P6_US") == 0) - return RAMP_STEP_1P6_US; - else if (strcmp(step, "3P3_US") == 0) - return RAMP_STEP_3P3_US; - else if (strcmp(step, "6P7_US") == 0) - return RAMP_STEP_6P7_US; - else if (strcmp(step, "13P5_US") == 0) - return RAMP_STEP_13P5_US; - else - return RAMP_STEP_27US; -} - -static u8 qpnp_flash_led_get_droop_debounce_time(u8 val) -{ - /* - * return 10 us as default value if user specifies - * a value un-supported - */ - switch (val) { - case 0: - return 0; - case 10: - return 1; - case 32: - return 2; - case 64: - return 3; - default: - return 1; - } -} - -static u8 qpnp_flash_led_get_startup_dly(u8 val) -{ - /* - * return 128 us as default value if user specifies - * a value un-supported - */ - switch (val) { - case 10: - return 0; - case 32: - return 1; - case 64: - return 2; - case 128: - return 3; - default: - return 3; - } -} - -static int -qpnp_flash_led_get_peripheral_type(struct qpnp_flash_led *led) -{ - int rc; - uint val; - - rc = regmap_read(led->regmap, - FLASH_LED_PERIPHERAL_SUBTYPE(led->base), &val); - if (rc) { - dev_err(&led->pdev->dev, - "Unable to read peripheral subtype\n"); - return -EINVAL; - } - - return val; -} - -static int qpnp_flash_led_module_disable(struct qpnp_flash_led *led, - struct flash_node_data *flash_node) -{ - union power_supply_propval psy_prop; - int rc; - uint val, tmp; - - rc = regmap_read(led->regmap, FLASH_LED_STROBE_CTRL(led->base), &val); - if (rc) { - dev_err(&led->pdev->dev, "Unable to read strobe reg\n"); - return -EINVAL; - } - - tmp = (~flash_node->trigger) & val; - if (!tmp) { - if (flash_node->type == TORCH) { - rc = qpnp_led_masked_write(led, - FLASH_LED_UNLOCK_SECURE(led->base), - FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); - if (rc) { - dev_err(&led->pdev->dev, - "Secure reg write failed\n"); - return -EINVAL; - } - - rc = qpnp_led_masked_write(led, - FLASH_TORCH(led->base), - FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Torch reg write failed\n"); - return -EINVAL; - } - } - - if (led->battery_psy && - led->revid_data->pmic_subtype == PMI8996_SUBTYPE && - !led->revid_data->rev3) { - psy_prop.intval = false; - rc = power_supply_set_property(led->battery_psy, - POWER_SUPPLY_PROP_FLASH_TRIGGER, - &psy_prop); - if (rc) { - dev_err(&led->pdev->dev, - "Failed to enble charger i/p current limit\n"); - return -EINVAL; - } - } - - rc = qpnp_led_masked_write(led, - FLASH_MODULE_ENABLE_CTRL(led->base), - FLASH_MODULE_ENABLE_MASK, - FLASH_LED_MODULE_CTRL_DEFAULT); - if (rc) { - dev_err(&led->pdev->dev, "Module disable failed\n"); - return -EINVAL; - } - - if (led->pinctrl) { - rc = pinctrl_select_state(led->pinctrl, - led->gpio_state_suspend); - if (rc) { - dev_err(&led->pdev->dev, - "failed to disable GPIO\n"); - return -EINVAL; - } - led->gpio_enabled = false; - } - - if (led->battery_psy) { - psy_prop.intval = false; - rc = power_supply_set_property(led->battery_psy, - POWER_SUPPLY_PROP_FLASH_ACTIVE, - &psy_prop); - if (rc) { - dev_err(&led->pdev->dev, - "Failed to setup OTG pulse skip enable\n"); - return -EINVAL; - } - } - } - - if (flash_node->trigger & FLASH_LED0_TRIGGER) { - rc = qpnp_led_masked_write(led, - led->current_addr, - FLASH_CURRENT_MASK, 0x00); - if (rc) { - dev_err(&led->pdev->dev, - "current register write failed\n"); - return -EINVAL; - } - } - - if (flash_node->trigger & FLASH_LED1_TRIGGER) { - rc = qpnp_led_masked_write(led, - led->current2_addr, - FLASH_CURRENT_MASK, 0x00); - if (rc) { - dev_err(&led->pdev->dev, - "current register write failed\n"); - return -EINVAL; - } - } - - if (flash_node->id == FLASH_LED_SWITCH) - flash_node->trigger &= FLASH_LED_STROBE_TYPE_HW; - - return 0; -} - -static enum -led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev) -{ - return led_cdev->brightness; -} - -static int flash_regulator_parse_dt(struct qpnp_flash_led *led, - struct flash_node_data *flash_node) { - - int i = 0, rc; - struct device_node *node = flash_node->cdev.dev->of_node; - struct device_node *temp = NULL; - const char *temp_string; - u32 val; - - flash_node->reg_data = devm_kzalloc(&led->pdev->dev, - sizeof(struct flash_regulator_data *) * - flash_node->num_regulators, - GFP_KERNEL); - if (!flash_node->reg_data) { - dev_err(&led->pdev->dev, - "Unable to allocate memory\n"); - return -ENOMEM; - } - - for_each_child_of_node(node, temp) { - rc = of_property_read_string(temp, "regulator-name", - &temp_string); - if (!rc) - flash_node->reg_data[i].reg_name = temp_string; - else { - dev_err(&led->pdev->dev, - "Unable to read regulator name\n"); - return rc; - } - - rc = of_property_read_u32(temp, "max-voltage", &val); - if (!rc) { - flash_node->reg_data[i].max_volt_uv = val; - } else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, - "Unable to read max voltage\n"); - return rc; - } - - i++; - } - - return 0; -} - -static int flash_regulator_setup(struct qpnp_flash_led *led, - struct flash_node_data *flash_node, bool on) -{ - int i, rc = 0; - - if (on == false) { - i = flash_node->num_regulators; - goto error_regulator_setup; - } - - for (i = 0; i < flash_node->num_regulators; i++) { - flash_node->reg_data[i].regs = - regulator_get(flash_node->cdev.dev, - flash_node->reg_data[i].reg_name); - if (IS_ERR(flash_node->reg_data[i].regs)) { - rc = PTR_ERR(flash_node->reg_data[i].regs); - dev_err(&led->pdev->dev, - "Failed to get regulator\n"); - goto error_regulator_setup; - } - - if (regulator_count_voltages(flash_node->reg_data[i].regs) - > 0) { - rc = regulator_set_voltage(flash_node->reg_data[i].regs, - flash_node->reg_data[i].max_volt_uv, - flash_node->reg_data[i].max_volt_uv); - if (rc) { - dev_err(&led->pdev->dev, - "regulator set voltage failed\n"); - regulator_put(flash_node->reg_data[i].regs); - goto error_regulator_setup; - } - } - } - - return rc; - -error_regulator_setup: - while (i--) { - if (regulator_count_voltages(flash_node->reg_data[i].regs) - > 0) { - regulator_set_voltage(flash_node->reg_data[i].regs, - 0, flash_node->reg_data[i].max_volt_uv); - } - - regulator_put(flash_node->reg_data[i].regs); - } - - return rc; -} - -static int flash_regulator_enable(struct qpnp_flash_led *led, - struct flash_node_data *flash_node, bool on) -{ - int i, rc = 0; - - if (on == false) { - i = flash_node->num_regulators; - goto error_regulator_enable; - } - - for (i = 0; i < flash_node->num_regulators; i++) { - rc = regulator_enable(flash_node->reg_data[i].regs); - if (rc) { - dev_err(&led->pdev->dev, - "regulator enable failed\n"); - goto error_regulator_enable; - } - } - - return rc; - -error_regulator_enable: - while (i--) - regulator_disable(flash_node->reg_data[i].regs); - - return rc; -} - -int qpnp_flash_led_prepare(struct led_trigger *trig, int options, - int *max_current) -{ - struct led_classdev *led_cdev = trigger_to_lcdev(trig); - struct flash_node_data *flash_node; - struct qpnp_flash_led *led; - int rc; - - if (!led_cdev) { - pr_err("Invalid led_trigger provided\n"); - return -EINVAL; - } - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - led = dev_get_drvdata(&flash_node->pdev->dev); - - if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) { - dev_err(&led->pdev->dev, "Invalid options %d\n", options); - return -EINVAL; - } - - if (options & ENABLE_REGULATOR) { - rc = flash_regulator_enable(led, flash_node, true); - if (rc < 0) { - dev_err(&led->pdev->dev, - "enable regulator failed, rc=%d\n", rc); - return rc; - } - } - - if (options & DISABLE_REGULATOR) { - rc = flash_regulator_enable(led, flash_node, false); - if (rc < 0) { - dev_err(&led->pdev->dev, - "disable regulator failed, rc=%d\n", rc); - return rc; - } - } - - if (options & QUERY_MAX_CURRENT) { - rc = qpnp_flash_led_get_max_avail_current(flash_node, led); - if (rc < 0) { - dev_err(&led->pdev->dev, - "query max current failed, rc=%d\n", rc); - return rc; - } - *max_current = rc; - } - - return 0; -} - -static void qpnp_flash_led_work(struct work_struct *work) -{ - struct flash_node_data *flash_node = container_of(work, - struct flash_node_data, work); - struct qpnp_flash_led *led = dev_get_drvdata(&flash_node->pdev->dev); - union power_supply_propval psy_prop; - int rc, brightness = flash_node->cdev.brightness; - int max_curr_avail_ma = 0; - int total_curr_ma = 0; - int i; - u8 val; - uint temp; - - mutex_lock(&led->flash_led_lock); - - if (!brightness) - goto turn_off; - - if (led->open_fault) { - dev_err(&led->pdev->dev, "Open fault detected\n"); - mutex_unlock(&led->flash_led_lock); - return; - } - - if (!flash_node->flash_on && flash_node->num_regulators > 0) { - rc = flash_regulator_enable(led, flash_node, true); - if (rc) { - mutex_unlock(&led->flash_led_lock); - return; - } - } - - if (!led->gpio_enabled && led->pinctrl) { - rc = pinctrl_select_state(led->pinctrl, - led->gpio_state_active); - if (rc) { - dev_err(&led->pdev->dev, "failed to enable GPIO\n"); - goto error_enable_gpio; - } - led->gpio_enabled = true; - } - - if (led->dbg_feature_en) { - rc = qpnp_led_masked_write(led, - INT_SET_TYPE(led->base), - FLASH_STATUS_REG_MASK, 0x1F); - if (rc) { - dev_err(&led->pdev->dev, - "INT_SET_TYPE write failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - IN_POLARITY_HIGH(led->base), - FLASH_STATUS_REG_MASK, 0x1F); - if (rc) { - dev_err(&led->pdev->dev, - "IN_POLARITY_HIGH write failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - INT_EN_SET(led->base), - FLASH_STATUS_REG_MASK, 0x1F); - if (rc) { - dev_err(&led->pdev->dev, "INT_EN_SET write failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - INT_LATCHED_CLR(led->base), - FLASH_STATUS_REG_MASK, 0x1F); - if (rc) { - dev_err(&led->pdev->dev, - "INT_LATCHED_CLR write failed\n"); - goto exit_flash_led_work; - } - } - - if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH && - flash_node->id != FLASH_LED_SWITCH) { - led->flash_node[led->num_leds - 1].trigger |= - (0x80 >> flash_node->id); - if (flash_node->id == FLASH_LED_0) - led->flash_node[led->num_leds - 1].prgm_current = - flash_node->prgm_current; - else if (flash_node->id == FLASH_LED_1) - led->flash_node[led->num_leds - 1].prgm_current2 = - flash_node->prgm_current; - } - - if (flash_node->type == TORCH) { - rc = qpnp_led_masked_write(led, - FLASH_LED_UNLOCK_SECURE(led->base), - FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); - if (rc) { - dev_err(&led->pdev->dev, "Secure reg write failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - FLASH_TORCH(led->base), - FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, "Torch reg write failed\n"); - goto exit_flash_led_work; - } - - if (flash_node->id == FLASH_LED_SWITCH) { - val = (u8)(flash_node->prgm_current * - FLASH_TORCH_MAX_LEVEL - / flash_node->max_current); - rc = qpnp_led_masked_write(led, - led->current_addr, - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "Torch reg write failed\n"); - goto exit_flash_led_work; - } - - val = (u8)(flash_node->prgm_current2 * - FLASH_TORCH_MAX_LEVEL - / flash_node->max_current); - rc = qpnp_led_masked_write(led, - led->current2_addr, - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "Torch reg write failed\n"); - goto exit_flash_led_work; - } - } else { - val = (u8)(flash_node->prgm_current * - FLASH_TORCH_MAX_LEVEL / - flash_node->max_current); - if (flash_node->id == FLASH_LED_0) { - rc = qpnp_led_masked_write(led, - led->current_addr, - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "current reg write failed\n"); - goto exit_flash_led_work; - } - } else { - rc = qpnp_led_masked_write(led, - led->current2_addr, - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "current reg write failed\n"); - goto exit_flash_led_work; - } - } - } - - rc = qpnp_led_masked_write(led, - FLASH_MAX_CURRENT(led->base), - FLASH_CURRENT_MASK, FLASH_TORCH_MAX_LEVEL); - if (rc) { - dev_err(&led->pdev->dev, - "Max current reg write failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - FLASH_MODULE_ENABLE_CTRL(led->base), - FLASH_MODULE_ENABLE_MASK, FLASH_MODULE_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Module enable reg write failed\n"); - goto exit_flash_led_work; - } - - if (led->pdata->hdrm_sns_ch0_en || - led->pdata->hdrm_sns_ch1_en) { - if (flash_node->id == FLASH_LED_SWITCH) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL0(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - flash_node->trigger & - FLASH_LED0_TRIGGER ? - FLASH_LED_HDRM_SNS_ENABLE : - FLASH_LED_HDRM_SNS_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense enable failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL1(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - flash_node->trigger & - FLASH_LED1_TRIGGER ? - FLASH_LED_HDRM_SNS_ENABLE : - FLASH_LED_HDRM_SNS_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense enable failed\n"); - goto exit_flash_led_work; - } - } else if (flash_node->id == FLASH_LED_0) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL0(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - FLASH_LED_HDRM_SNS_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense disable failed\n"); - goto exit_flash_led_work; - } - } else if (flash_node->id == FLASH_LED_1) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL1(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - FLASH_LED_HDRM_SNS_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense disable failed\n"); - goto exit_flash_led_work; - } - } - } - - rc = qpnp_led_masked_write(led, - FLASH_LED_STROBE_CTRL(led->base), - (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK - | FLASH_LED_STROBE_TYPE_HW - : flash_node->trigger | - FLASH_LED_STROBE_TYPE_HW), - flash_node->trigger); - if (rc) { - dev_err(&led->pdev->dev, "Strobe reg write failed\n"); - goto exit_flash_led_work; - } - } else if (flash_node->type == FLASH) { - if (flash_node->trigger & FLASH_LED0_TRIGGER) - max_curr_avail_ma += flash_node->max_current; - if (flash_node->trigger & FLASH_LED1_TRIGGER) - max_curr_avail_ma += flash_node->max_current; - - psy_prop.intval = true; - rc = power_supply_set_property(led->battery_psy, - POWER_SUPPLY_PROP_FLASH_ACTIVE, - &psy_prop); - if (rc) { - dev_err(&led->pdev->dev, - "Failed to setup OTG pulse skip enable\n"); - goto exit_flash_led_work; - } - - if (led->pdata->power_detect_en || - led->pdata->die_current_derate_en) { - if (led->battery_psy) { - power_supply_get_property(led->battery_psy, - POWER_SUPPLY_PROP_STATUS, - &psy_prop); - if (psy_prop.intval < 0) { - dev_err(&led->pdev->dev, - "Invalid battery status\n"); - goto exit_flash_led_work; - } - - if (psy_prop.intval == - POWER_SUPPLY_STATUS_CHARGING) - led->charging_enabled = true; - else if (psy_prop.intval == - POWER_SUPPLY_STATUS_DISCHARGING - || psy_prop.intval == - POWER_SUPPLY_STATUS_NOT_CHARGING) - led->charging_enabled = false; - } - max_curr_avail_ma = - qpnp_flash_led_get_max_avail_current - (flash_node, led); - if (max_curr_avail_ma < 0) { - dev_err(&led->pdev->dev, - "Failed to get max avail curr\n"); - goto exit_flash_led_work; - } - } - - if (flash_node->id == FLASH_LED_SWITCH) { - if (flash_node->trigger & FLASH_LED0_TRIGGER) - total_curr_ma += flash_node->prgm_current; - if (flash_node->trigger & FLASH_LED1_TRIGGER) - total_curr_ma += flash_node->prgm_current2; - - if (max_curr_avail_ma < total_curr_ma) { - flash_node->prgm_current = - (flash_node->prgm_current * - max_curr_avail_ma) / total_curr_ma; - flash_node->prgm_current2 = - (flash_node->prgm_current2 * - max_curr_avail_ma) / total_curr_ma; - } - - val = (u8)(flash_node->prgm_current * - FLASH_MAX_LEVEL / flash_node->max_current); - rc = qpnp_led_masked_write(led, - led->current_addr, FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "Current register write failed\n"); - goto exit_flash_led_work; - } - - val = (u8)(flash_node->prgm_current2 * - FLASH_MAX_LEVEL / flash_node->max_current); - rc = qpnp_led_masked_write(led, - led->current2_addr, FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "Current register write failed\n"); - goto exit_flash_led_work; - } - } else { - if (max_curr_avail_ma < flash_node->prgm_current) { - dev_err(&led->pdev->dev, - "battery only supprots %d mA\n", - max_curr_avail_ma); - flash_node->prgm_current = - (u16)max_curr_avail_ma; - } - - val = (u8)(flash_node->prgm_current * - FLASH_MAX_LEVEL - / flash_node->max_current); - if (flash_node->id == FLASH_LED_0) { - rc = qpnp_led_masked_write( - led, - led->current_addr, - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "current reg write failed\n"); - goto exit_flash_led_work; - } - } else if (flash_node->id == FLASH_LED_1) { - rc = qpnp_led_masked_write( - led, - led->current2_addr, - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "current reg write failed\n"); - goto exit_flash_led_work; - } - } - } - - val = (u8)((flash_node->duration - FLASH_DURATION_DIVIDER) - / FLASH_DURATION_DIVIDER); - rc = qpnp_led_masked_write(led, - FLASH_SAFETY_TIMER(led->base), - FLASH_SAFETY_TIMER_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "Safety timer reg write failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - FLASH_MAX_CURRENT(led->base), - FLASH_CURRENT_MASK, FLASH_MAX_LEVEL); - if (rc) { - dev_err(&led->pdev->dev, - "Max current reg write failed\n"); - goto exit_flash_led_work; - } - - if (!led->charging_enabled) { - rc = qpnp_led_masked_write(led, - FLASH_MODULE_ENABLE_CTRL(led->base), - FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Module enable reg write failed\n"); - goto exit_flash_led_work; - } - - usleep_range(FLASH_RAMP_UP_DELAY_US_MIN, - FLASH_RAMP_UP_DELAY_US_MAX); - } - - if (led->revid_data->pmic_subtype == PMI8996_SUBTYPE && - !led->revid_data->rev3) { - rc = power_supply_set_property(led->battery_psy, - POWER_SUPPLY_PROP_FLASH_TRIGGER, - &psy_prop); - if (rc) { - dev_err(&led->pdev->dev, - "Failed to disable charger i/p curr limit\n"); - goto exit_flash_led_work; - } - } - - if (led->pdata->hdrm_sns_ch0_en || - led->pdata->hdrm_sns_ch1_en) { - if (flash_node->id == FLASH_LED_SWITCH) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL0(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - (flash_node->trigger & - FLASH_LED0_TRIGGER ? - FLASH_LED_HDRM_SNS_ENABLE : - FLASH_LED_HDRM_SNS_DISABLE)); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense enable failed\n"); - goto exit_flash_led_work; - } - - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL1(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - (flash_node->trigger & - FLASH_LED1_TRIGGER ? - FLASH_LED_HDRM_SNS_ENABLE : - FLASH_LED_HDRM_SNS_DISABLE)); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense enable failed\n"); - goto exit_flash_led_work; - } - } else if (flash_node->id == FLASH_LED_0) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL0(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - FLASH_LED_HDRM_SNS_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense disable failed\n"); - goto exit_flash_led_work; - } - } else if (flash_node->id == FLASH_LED_1) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL1(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - FLASH_LED_HDRM_SNS_ENABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense disable failed\n"); - goto exit_flash_led_work; - } - } - } - - rc = qpnp_led_masked_write(led, - FLASH_LED_STROBE_CTRL(led->base), - (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK - | FLASH_LED_STROBE_TYPE_HW - : flash_node->trigger | - FLASH_LED_STROBE_TYPE_HW), - flash_node->trigger); - if (rc) { - dev_err(&led->pdev->dev, "Strobe reg write failed\n"); - goto exit_flash_led_work; - } - - if (led->strobe_debug && led->dbg_feature_en) { - udelay(2000); - rc = regmap_read(led->regmap, - FLASH_LED_FAULT_STATUS(led->base), - &temp); - if (rc) { - dev_err(&led->pdev->dev, - "Unable to read from addr= %x, rc(%d)\n", - FLASH_LED_FAULT_STATUS(led->base), rc); - goto exit_flash_led_work; - } - led->fault_reg = temp; - } - } else { - pr_err("Both Torch and Flash cannot be select at same time\n"); - for (i = 0; i < led->num_leds; i++) - led->flash_node[i].flash_on = false; - goto turn_off; - } - - flash_node->flash_on = true; - mutex_unlock(&led->flash_led_lock); - - return; - -turn_off: - if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH && - flash_node->id != FLASH_LED_SWITCH) - led->flash_node[led->num_leds - 1].trigger &= - ~(0x80 >> flash_node->id); - if (flash_node->type == TORCH) { - /* - * Checking LED fault status detects hardware open fault. - * If fault occurs, all subsequent LED enablement requests - * will be rejected to protect hardware. - */ - rc = regmap_read(led->regmap, - FLASH_LED_FAULT_STATUS(led->base), &temp); - if (rc) { - dev_err(&led->pdev->dev, - "Failed to read out fault status register\n"); - goto exit_flash_led_work; - } - - led->open_fault |= (val & FLASH_LED_OPEN_FAULT_DETECTED); - } - - rc = qpnp_led_masked_write(led, - FLASH_LED_STROBE_CTRL(led->base), - (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK - | FLASH_LED_STROBE_TYPE_HW - : flash_node->trigger - | FLASH_LED_STROBE_TYPE_HW), - FLASH_LED_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, "Strobe disable failed\n"); - goto exit_flash_led_work; - } - - usleep_range(FLASH_RAMP_DN_DELAY_US_MIN, FLASH_RAMP_DN_DELAY_US_MAX); -exit_flash_hdrm_sns: - if (led->pdata->hdrm_sns_ch0_en) { - if (flash_node->id == FLASH_LED_0 || - flash_node->id == FLASH_LED_SWITCH) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL0(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - FLASH_LED_HDRM_SNS_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense disable failed\n"); - goto exit_flash_hdrm_sns; - } - } - } - - if (led->pdata->hdrm_sns_ch1_en) { - if (flash_node->id == FLASH_LED_1 || - flash_node->id == FLASH_LED_SWITCH) { - rc = qpnp_led_masked_write(led, - FLASH_HDRM_SNS_ENABLE_CTRL1(led->base), - FLASH_LED_HDRM_SNS_ENABLE_MASK, - FLASH_LED_HDRM_SNS_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, - "Headroom sense disable failed\n"); - goto exit_flash_hdrm_sns; - } - } - } -exit_flash_led_work: - rc = qpnp_flash_led_module_disable(led, flash_node); - if (rc) { - dev_err(&led->pdev->dev, "Module disable failed\n"); - goto exit_flash_led_work; - } -error_enable_gpio: - if (flash_node->flash_on && flash_node->num_regulators > 0) - flash_regulator_enable(led, flash_node, false); - - flash_node->flash_on = false; - mutex_unlock(&led->flash_led_lock); -} - -static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct flash_node_data *flash_node; - struct qpnp_flash_led *led; - - flash_node = container_of(led_cdev, struct flash_node_data, cdev); - led = dev_get_drvdata(&flash_node->pdev->dev); - - if (value < LED_OFF) { - pr_err("Invalid brightness value\n"); - return; - } - - if (value > flash_node->cdev.max_brightness) - value = flash_node->cdev.max_brightness; - - flash_node->cdev.brightness = value; - if (led->flash_node[led->num_leds - 1].id == - FLASH_LED_SWITCH) { - if (flash_node->type == TORCH) - led->flash_node[led->num_leds - 1].type = TORCH; - else if (flash_node->type == FLASH) - led->flash_node[led->num_leds - 1].type = FLASH; - - led->flash_node[led->num_leds - 1].max_current - = flash_node->max_current; - - if (flash_node->id == FLASH_LED_0 || - flash_node->id == FLASH_LED_1) { - if (value < FLASH_LED_MIN_CURRENT_MA && value != 0) - value = FLASH_LED_MIN_CURRENT_MA; - - flash_node->prgm_current = value; - flash_node->flash_on = value ? true : false; - } else if (flash_node->id == FLASH_LED_SWITCH) { - if (!value) { - flash_node->prgm_current = 0; - flash_node->prgm_current2 = 0; - } - } - } else { - if (value < FLASH_LED_MIN_CURRENT_MA && value != 0) - value = FLASH_LED_MIN_CURRENT_MA; - flash_node->prgm_current = value; - } - - queue_work(led->ordered_workq, &flash_node->work); -} - -static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) -{ - int rc; - u8 val, temp_val; - uint val_int; - - rc = qpnp_led_masked_write(led, - FLASH_MODULE_ENABLE_CTRL(led->base), - FLASH_MODULE_ENABLE_MASK, - FLASH_LED_MODULE_CTRL_DEFAULT); - if (rc) { - dev_err(&led->pdev->dev, "Module disable failed\n"); - return rc; - } - - rc = qpnp_led_masked_write(led, - FLASH_LED_STROBE_CTRL(led->base), - FLASH_STROBE_MASK, FLASH_LED_DISABLE); - if (rc) { - dev_err(&led->pdev->dev, "Strobe disable failed\n"); - return rc; - } - - rc = qpnp_led_masked_write(led, - FLASH_LED_TMR_CTRL(led->base), - FLASH_TMR_MASK, FLASH_TMR_SAFETY); - if (rc) { - dev_err(&led->pdev->dev, - "LED timer ctrl reg write failed(%d)\n", rc); - return rc; - } - - val = (u8)(led->pdata->headroom / FLASH_LED_HEADROOM_DIVIDER - - FLASH_LED_HEADROOM_OFFSET); - rc = qpnp_led_masked_write(led, - FLASH_HEADROOM(led->base), - FLASH_HEADROOM_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Headroom reg write failed\n"); - return rc; - } - - val = qpnp_flash_led_get_startup_dly(led->pdata->startup_dly); - - rc = qpnp_led_masked_write(led, - FLASH_STARTUP_DELAY(led->base), - FLASH_STARTUP_DLY_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Startup delay reg write failed\n"); - return rc; - } - - val = (u8)(led->pdata->clamp_current * FLASH_MAX_LEVEL / - FLASH_LED_MAX_CURRENT_MA); - rc = qpnp_led_masked_write(led, - FLASH_CLAMP_CURRENT(led->base), - FLASH_CURRENT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Clamp current reg write failed\n"); - return rc; - } - - if (led->pdata->pmic_charger_support) - val = FLASH_LED_FLASH_HW_VREG_OK; - else - val = FLASH_LED_FLASH_SW_VREG_OK; - rc = qpnp_led_masked_write(led, - FLASH_VREG_OK_FORCE(led->base), - FLASH_VREG_OK_FORCE_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "VREG OK force reg write failed\n"); - return rc; - } - - if (led->pdata->self_check_en) - val = FLASH_MODULE_ENABLE; - else - val = FLASH_LED_DISABLE; - rc = qpnp_led_masked_write(led, - FLASH_FAULT_DETECT(led->base), - FLASH_FAULT_DETECT_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Fault detect reg write failed\n"); - return rc; - } - - val = 0x0; - val |= led->pdata->mask3_en << FLASH_LED_MASK3_ENABLE_SHIFT; - val |= FLASH_LED_MASK_MODULE_MASK2_ENABLE; - rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base), - FLASH_MASK_MODULE_CONTRL_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Mask module enable failed\n"); - return rc; - } - - rc = regmap_read(led->regmap, FLASH_PERPH_RESET_CTRL(led->base), - &val_int); - if (rc) { - dev_err(&led->pdev->dev, - "Unable to read from address %x, rc(%d)\n", - FLASH_PERPH_RESET_CTRL(led->base), rc); - return -EINVAL; - } - val = (u8)val_int; - - if (led->pdata->follow_rb_disable) { - rc = qpnp_led_masked_write(led, - FLASH_LED_UNLOCK_SECURE(led->base), - FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); - if (rc) { - dev_err(&led->pdev->dev, "Secure reg write failed\n"); - return -EINVAL; - } - - val |= FLASH_FOLLOW_OTST2_RB_MASK; - rc = qpnp_led_masked_write(led, - FLASH_PERPH_RESET_CTRL(led->base), - FLASH_FOLLOW_OTST2_RB_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "failed to reset OTST2_RB bit\n"); - return rc; - } - } else { - rc = qpnp_led_masked_write(led, - FLASH_LED_UNLOCK_SECURE(led->base), - FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); - if (rc) { - dev_err(&led->pdev->dev, "Secure reg write failed\n"); - return -EINVAL; - } - - val &= ~FLASH_FOLLOW_OTST2_RB_MASK; - rc = qpnp_led_masked_write(led, - FLASH_PERPH_RESET_CTRL(led->base), - FLASH_FOLLOW_OTST2_RB_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, - "failed to reset OTST2_RB bit\n"); - return rc; - } - } - - if (!led->pdata->thermal_derate_en) - val = 0x0; - else { - val = led->pdata->thermal_derate_en << 7; - val |= led->pdata->thermal_derate_rate << 3; - val |= (led->pdata->thermal_derate_threshold - - FLASH_LED_THERMAL_THRESHOLD_MIN) / - FLASH_LED_THERMAL_DEVIDER; - } - rc = qpnp_led_masked_write(led, - FLASH_THERMAL_DRATE(led->base), - FLASH_THERMAL_DERATE_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Thermal derate reg write failed\n"); - return rc; - } - - if (!led->pdata->current_ramp_en) - val = 0x0; - else { - val = led->pdata->current_ramp_en << 7; - val |= led->pdata->ramp_up_step << 3; - val |= led->pdata->ramp_dn_step; - } - rc = qpnp_led_masked_write(led, - FLASH_CURRENT_RAMP(led->base), - FLASH_CURRENT_RAMP_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "Current ramp reg write failed\n"); - return rc; - } - - if (!led->pdata->vph_pwr_droop_en) - val = 0x0; - else { - val = led->pdata->vph_pwr_droop_en << 7; - val |= ((led->pdata->vph_pwr_droop_threshold - - FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV) / - FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER) << 4; - temp_val = - qpnp_flash_led_get_droop_debounce_time( - led->pdata->vph_pwr_droop_debounce_time); - if (temp_val == 0xFF) { - dev_err(&led->pdev->dev, "Invalid debounce time\n"); - return temp_val; - } - - val |= temp_val; - } - rc = qpnp_led_masked_write(led, - FLASH_VPH_PWR_DROOP(led->base), - FLASH_VPH_PWR_DROOP_MASK, val); - if (rc) { - dev_err(&led->pdev->dev, "VPH PWR droop reg write failed\n"); - return rc; - } - - led->battery_psy = power_supply_get_by_name("battery"); - if (!led->battery_psy) { - dev_err(&led->pdev->dev, - "Failed to get battery power supply\n"); - return -EINVAL; - } - - return 0; -} - -static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, - struct flash_node_data *flash_node) -{ - const char *temp_string; - struct device_node *node = flash_node->cdev.dev->of_node; - struct device_node *temp = NULL; - int rc = 0, num_regs = 0; - u32 val; - - rc = of_property_read_string(node, "label", &temp_string); - if (!rc) { - if (strcmp(temp_string, "flash") == 0) - flash_node->type = FLASH; - else if (strcmp(temp_string, "torch") == 0) - flash_node->type = TORCH; - else if (strcmp(temp_string, "switch") == 0) - flash_node->type = SWITCH; - else { - dev_err(&led->pdev->dev, "Wrong flash LED type\n"); - return -EINVAL; - } - } else if (rc < 0) { - dev_err(&led->pdev->dev, "Unable to read flash type\n"); - return rc; - } - - rc = of_property_read_u32(node, "qcom,current", &val); - if (!rc) { - if (val < FLASH_LED_MIN_CURRENT_MA) - val = FLASH_LED_MIN_CURRENT_MA; - flash_node->prgm_current = val; - } else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, "Unable to read current\n"); - return rc; - } - - rc = of_property_read_u32(node, "qcom,id", &val); - if (!rc) - flash_node->id = (u8)val; - else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, "Unable to read led ID\n"); - return rc; - } - - if (flash_node->type == SWITCH || flash_node->type == FLASH) { - rc = of_property_read_u32(node, "qcom,duration", &val); - if (!rc) - flash_node->duration = (u16)val; - else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, "Unable to read duration\n"); - return rc; - } - } - - switch (led->peripheral_type) { - case FLASH_SUBTYPE_SINGLE: - flash_node->trigger = FLASH_LED0_TRIGGER; - break; - case FLASH_SUBTYPE_DUAL: - if (flash_node->id == FLASH_LED_0) - flash_node->trigger = FLASH_LED0_TRIGGER; - else if (flash_node->id == FLASH_LED_1) - flash_node->trigger = FLASH_LED1_TRIGGER; - break; - default: - dev_err(&led->pdev->dev, "Invalid peripheral type\n"); - } - - while ((temp = of_get_next_child(node, temp))) { - if (of_find_property(temp, "regulator-name", NULL)) - num_regs++; - } - - if (num_regs) - flash_node->num_regulators = num_regs; - - return rc; -} - -static int qpnp_flash_led_parse_common_dt( - struct qpnp_flash_led *led, - struct device_node *node) -{ - int rc; - u32 val, temp_val; - const char *temp; - - led->pdata->headroom = FLASH_LED_HEADROOM_DEFAULT_MV; - rc = of_property_read_u32(node, "qcom,headroom", &val); - if (!rc) - led->pdata->headroom = (u16)val; - else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, "Unable to read headroom\n"); - return rc; - } - - led->pdata->startup_dly = FLASH_LED_STARTUP_DELAY_DEFAULT_US; - rc = of_property_read_u32(node, "qcom,startup-dly", &val); - if (!rc) - led->pdata->startup_dly = (u8)val; - else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, "Unable to read startup delay\n"); - return rc; - } - - led->pdata->clamp_current = FLASH_LED_CLAMP_CURRENT_DEFAULT_MA; - rc = of_property_read_u32(node, "qcom,clamp-current", &val); - if (!rc) { - if (val < FLASH_LED_MIN_CURRENT_MA) - val = FLASH_LED_MIN_CURRENT_MA; - led->pdata->clamp_current = (u16)val; - } else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, "Unable to read clamp current\n"); - return rc; - } - - led->pdata->pmic_charger_support = - of_property_read_bool(node, - "qcom,pmic-charger-support"); - - led->pdata->self_check_en = - of_property_read_bool(node, "qcom,self-check-enabled"); - - led->pdata->thermal_derate_en = - of_property_read_bool(node, - "qcom,thermal-derate-enabled"); - - if (led->pdata->thermal_derate_en) { - led->pdata->thermal_derate_rate = - FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT; - rc = of_property_read_string(node, "qcom,thermal-derate-rate", - &temp); - if (!rc) { - temp_val = - qpnp_flash_led_get_thermal_derate_rate(temp); - if (temp_val < 0) { - dev_err(&led->pdev->dev, - "Invalid thermal derate rate\n"); - return -EINVAL; - } - - led->pdata->thermal_derate_rate = (u8)temp_val; - } else { - dev_err(&led->pdev->dev, - "Unable to read thermal derate rate\n"); - return -EINVAL; - } - - led->pdata->thermal_derate_threshold = - FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C; - rc = of_property_read_u32(node, "qcom,thermal-derate-threshold", - &val); - if (!rc) - led->pdata->thermal_derate_threshold = (u8)val; - else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, - "Unable to read thermal derate threshold\n"); - return rc; - } - } - - led->pdata->current_ramp_en = - of_property_read_bool(node, - "qcom,current-ramp-enabled"); - if (led->pdata->current_ramp_en) { - led->pdata->ramp_up_step = FLASH_LED_RAMP_UP_STEP_DEFAULT_US; - rc = of_property_read_string(node, "qcom,ramp_up_step", &temp); - if (!rc) { - temp_val = qpnp_flash_led_get_ramp_step(temp); - if (temp_val < 0) { - dev_err(&led->pdev->dev, - "Invalid ramp up step values\n"); - return -EINVAL; - } - led->pdata->ramp_up_step = (u8)temp_val; - } else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, - "Unable to read ramp up steps\n"); - return rc; - } - - led->pdata->ramp_dn_step = FLASH_LED_RAMP_DN_STEP_DEFAULT_US; - rc = of_property_read_string(node, "qcom,ramp_dn_step", &temp); - if (!rc) { - temp_val = qpnp_flash_led_get_ramp_step(temp); - if (temp_val < 0) { - dev_err(&led->pdev->dev, - "Invalid ramp down step values\n"); - return rc; - } - led->pdata->ramp_dn_step = (u8)temp_val; - } else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, - "Unable to read ramp down steps\n"); - return rc; - } - } - - led->pdata->vph_pwr_droop_en = of_property_read_bool(node, - "qcom,vph-pwr-droop-enabled"); - if (led->pdata->vph_pwr_droop_en) { - led->pdata->vph_pwr_droop_threshold = - FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV; - rc = of_property_read_u32(node, - "qcom,vph-pwr-droop-threshold", &val); - if (!rc) { - led->pdata->vph_pwr_droop_threshold = (u16)val; - } else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, - "Unable to read VPH PWR droop threshold\n"); - return rc; - } - - led->pdata->vph_pwr_droop_debounce_time = - FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US; - rc = of_property_read_u32(node, - "qcom,vph-pwr-droop-debounce-time", &val); - if (!rc) - led->pdata->vph_pwr_droop_debounce_time = (u8)val; - else if (rc != -EINVAL) { - dev_err(&led->pdev->dev, - "Unable to read VPH PWR droop debounce time\n"); - return rc; - } - } - - led->pdata->hdrm_sns_ch0_en = of_property_read_bool(node, - "qcom,headroom-sense-ch0-enabled"); - - led->pdata->hdrm_sns_ch1_en = of_property_read_bool(node, - "qcom,headroom-sense-ch1-enabled"); - - led->pdata->power_detect_en = of_property_read_bool(node, - "qcom,power-detect-enabled"); - - led->pdata->mask3_en = of_property_read_bool(node, - "qcom,otst2-module-enabled"); - - led->pdata->follow_rb_disable = of_property_read_bool(node, - "qcom,follow-otst2-rb-disabled"); - - led->pdata->die_current_derate_en = of_property_read_bool(node, - "qcom,die-current-derate-enabled"); - - if (led->pdata->die_current_derate_en) { - led->vadc_dev = qpnp_get_vadc(&led->pdev->dev, "die-temp"); - if (IS_ERR(led->vadc_dev)) { - pr_err("VADC channel property Missing\n"); - return -EINVAL; - } - - if (of_find_property(node, "qcom,die-temp-threshold", - &led->pdata->temp_threshold_num)) { - if (led->pdata->temp_threshold_num > 0) { - led->pdata->die_temp_threshold_degc = - devm_kzalloc(&led->pdev->dev, - led->pdata->temp_threshold_num, - GFP_KERNEL); - - if (led->pdata->die_temp_threshold_degc - == NULL) { - dev_err(&led->pdev->dev, - "failed to allocate die temp array\n"); - return -ENOMEM; - } - led->pdata->temp_threshold_num /= - sizeof(unsigned int); - - rc = of_property_read_u32_array(node, - "qcom,die-temp-threshold", - led->pdata->die_temp_threshold_degc, - led->pdata->temp_threshold_num); - if (rc) { - dev_err(&led->pdev->dev, - "couldn't read temp threshold rc=%d\n", - rc); - return rc; - } - } - } - - if (of_find_property(node, "qcom,die-temp-derate-current", - &led->pdata->temp_derate_curr_num)) { - if (led->pdata->temp_derate_curr_num > 0) { - led->pdata->die_temp_derate_curr_ma = - devm_kzalloc(&led->pdev->dev, - led->pdata->temp_derate_curr_num, - GFP_KERNEL); - if (led->pdata->die_temp_derate_curr_ma - == NULL) { - dev_err(&led->pdev->dev, - "failed to allocate die derate current array\n"); - return -ENOMEM; - } - led->pdata->temp_derate_curr_num /= - sizeof(unsigned int); - - rc = of_property_read_u32_array(node, - "qcom,die-temp-derate-current", - led->pdata->die_temp_derate_curr_ma, - led->pdata->temp_derate_curr_num); - if (rc) { - dev_err(&led->pdev->dev, - "couldn't read temp limits rc =%d\n", - rc); - return rc; - } - } - } - if (led->pdata->temp_threshold_num != - led->pdata->temp_derate_curr_num) { - pr_err("Both array size are not same\n"); - return -EINVAL; - } - } - - led->pinctrl = devm_pinctrl_get(&led->pdev->dev); - if (IS_ERR_OR_NULL(led->pinctrl)) { - dev_err(&led->pdev->dev, "Unable to acquire pinctrl\n"); - led->pinctrl = NULL; - return 0; - } - - led->gpio_state_active = pinctrl_lookup_state(led->pinctrl, - "flash_led_enable"); - if (IS_ERR_OR_NULL(led->gpio_state_active)) { - dev_err(&led->pdev->dev, "Cannot lookup LED active state\n"); - devm_pinctrl_put(led->pinctrl); - led->pinctrl = NULL; - return PTR_ERR(led->gpio_state_active); - } - - led->gpio_state_suspend = pinctrl_lookup_state(led->pinctrl, - "flash_led_disable"); - if (IS_ERR_OR_NULL(led->gpio_state_suspend)) { - dev_err(&led->pdev->dev, "Cannot lookup LED disable state\n"); - devm_pinctrl_put(led->pinctrl); - led->pinctrl = NULL; - return PTR_ERR(led->gpio_state_suspend); - } - - return 0; -} - -static int qpnp_flash_led_probe(struct platform_device *pdev) -{ - struct qpnp_flash_led *led; - unsigned int base; - struct device_node *node, *temp; - struct dentry *root, *file; - int rc, i = 0, j, num_leds = 0; - u32 val; - - root = NULL; - node = pdev->dev.of_node; - if (node == NULL) { - dev_info(&pdev->dev, "No flash device defined\n"); - return -ENODEV; - } - - rc = of_property_read_u32(pdev->dev.of_node, "reg", &base); - if (rc < 0) { - dev_err(&pdev->dev, - "Couldn't find reg in node = %s rc = %d\n", - pdev->dev.of_node->full_name, rc); - return rc; - } - - led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL); - if (!led) - return -ENOMEM; - - led->regmap = dev_get_regmap(pdev->dev.parent, NULL); - if (!led->regmap) { - dev_err(&pdev->dev, "Couldn't get parent's regmap\n"); - return -EINVAL; - } - - led->base = base; - led->pdev = pdev; - led->current_addr = FLASH_LED0_CURRENT(led->base); - led->current2_addr = FLASH_LED1_CURRENT(led->base); - - led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL); - if (!led->pdata) - return -ENOMEM; - - led->peripheral_type = (u8)qpnp_flash_led_get_peripheral_type(led); - if (led->peripheral_type < 0) { - dev_err(&pdev->dev, "Failed to get peripheral type\n"); - return rc; - } - - rc = qpnp_flash_led_parse_common_dt(led, node); - if (rc) { - dev_err(&pdev->dev, - "Failed to get common config for flash LEDs\n"); - return rc; - } - - rc = qpnp_flash_led_init_settings(led); - if (rc) { - dev_err(&pdev->dev, "Failed to initialize flash LED\n"); - return rc; - } - - rc = qpnp_get_pmic_revid(led); - if (rc) - return rc; - - temp = NULL; - while ((temp = of_get_next_child(node, temp))) - num_leds++; - - if (!num_leds) - return -ECHILD; - - led->flash_node = devm_kzalloc(&pdev->dev, - (sizeof(struct flash_node_data) * num_leds), - GFP_KERNEL); - if (!led->flash_node) { - dev_err(&pdev->dev, "Unable to allocate memory\n"); - return -ENOMEM; - } - - mutex_init(&led->flash_led_lock); - - led->ordered_workq = alloc_ordered_workqueue("flash_led_workqueue", 0); - if (!led->ordered_workq) { - dev_err(&pdev->dev, "Failed to allocate ordered workqueue\n"); - return -ENOMEM; - } - - for_each_child_of_node(node, temp) { - led->flash_node[i].cdev.brightness_set = - qpnp_flash_led_brightness_set; - led->flash_node[i].cdev.brightness_get = - qpnp_flash_led_brightness_get; - led->flash_node[i].pdev = pdev; - - INIT_WORK(&led->flash_node[i].work, qpnp_flash_led_work); - rc = of_property_read_string(temp, "qcom,led-name", - &led->flash_node[i].cdev.name); - if (rc < 0) { - dev_err(&led->pdev->dev, - "Unable to read flash name\n"); - return rc; - } - - rc = of_property_read_string(temp, "qcom,default-led-trigger", - &led->flash_node[i].cdev.default_trigger); - if (rc < 0) { - dev_err(&led->pdev->dev, - "Unable to read trigger name\n"); - return rc; - } - - rc = of_property_read_u32(temp, "qcom,max-current", &val); - if (!rc) { - if (val < FLASH_LED_MIN_CURRENT_MA) - val = FLASH_LED_MIN_CURRENT_MA; - led->flash_node[i].max_current = (u16)val; - led->flash_node[i].cdev.max_brightness = val; - } else { - dev_err(&led->pdev->dev, - "Unable to read max current\n"); - return rc; - } - rc = led_classdev_register(&pdev->dev, - &led->flash_node[i].cdev); - if (rc) { - dev_err(&pdev->dev, "Unable to register led\n"); - goto error_led_register; - } - - led->flash_node[i].cdev.dev->of_node = temp; - - rc = qpnp_flash_led_parse_each_led_dt(led, &led->flash_node[i]); - if (rc) { - dev_err(&pdev->dev, - "Failed to parse config for each LED\n"); - goto error_led_register; - } - - if (led->flash_node[i].num_regulators) { - rc = flash_regulator_parse_dt(led, &led->flash_node[i]); - if (rc) { - dev_err(&pdev->dev, - "Unable to parse regulator data\n"); - goto error_led_register; - } - - rc = flash_regulator_setup(led, &led->flash_node[i], - true); - if (rc) { - dev_err(&pdev->dev, - "Unable to set up regulator\n"); - goto error_led_register; - } - } - - for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) { - rc = - sysfs_create_file(&led->flash_node[i].cdev.dev->kobj, - &qpnp_flash_led_attrs[j].attr); - if (rc) - goto error_led_register; - } - - i++; - } - - led->num_leds = i; - - root = debugfs_create_dir("flashLED", NULL); - if (IS_ERR_OR_NULL(root)) { - pr_err("Error creating top level directory err%ld", - (long)root); - if (PTR_ERR(root) == -ENODEV) - pr_err("debugfs is not enabled in kernel"); - goto error_led_debugfs; - } - - led->dbgfs_root = root; - file = debugfs_create_file("enable_debug", 0600, root, led, - &flash_led_dfs_dbg_feature_fops); - if (!file) { - pr_err("error creating 'enable_debug' entry\n"); - goto error_led_debugfs; - } - - file = debugfs_create_file("latched", 0600, root, led, - &flash_led_dfs_latched_reg_fops); - if (!file) { - pr_err("error creating 'latched' entry\n"); - goto error_led_debugfs; - } - - file = debugfs_create_file("strobe", 0600, root, led, - &flash_led_dfs_strobe_reg_fops); - if (!file) { - pr_err("error creating 'strobe' entry\n"); - goto error_led_debugfs; - } - - dev_set_drvdata(&pdev->dev, led); - - return 0; - -error_led_debugfs: - i = led->num_leds - 1; - j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1; -error_led_register: - for (; i >= 0; i--) { - for (; j >= 0; j--) - sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj, - &qpnp_flash_led_attrs[j].attr); - j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1; - led_classdev_unregister(&led->flash_node[i].cdev); - } - debugfs_remove_recursive(root); - mutex_destroy(&led->flash_led_lock); - destroy_workqueue(led->ordered_workq); - - return rc; -} - -static int qpnp_flash_led_remove(struct platform_device *pdev) -{ - struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev); - int i, j; - - for (i = led->num_leds - 1; i >= 0; i--) { - if (led->flash_node[i].reg_data) { - if (led->flash_node[i].flash_on) - flash_regulator_enable(led, - &led->flash_node[i], false); - flash_regulator_setup(led, &led->flash_node[i], - false); - } - for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) - sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj, - &qpnp_flash_led_attrs[j].attr); - led_classdev_unregister(&led->flash_node[i].cdev); - } - debugfs_remove_recursive(led->dbgfs_root); - mutex_destroy(&led->flash_led_lock); - destroy_workqueue(led->ordered_workq); - - return 0; -} - -static const struct of_device_id spmi_match_table[] = { - { .compatible = "qcom,qpnp-flash-led",}, - { }, -}; - -static struct platform_driver qpnp_flash_led_driver = { - .driver = { - .name = "qcom,qpnp-flash-led", - .of_match_table = spmi_match_table, - }, - .probe = qpnp_flash_led_probe, - .remove = qpnp_flash_led_remove, -}; - -static int __init qpnp_flash_led_init(void) -{ - return platform_driver_register(&qpnp_flash_led_driver); -} -late_initcall(qpnp_flash_led_init); - -static void __exit qpnp_flash_led_exit(void) -{ - platform_driver_unregister(&qpnp_flash_led_driver); -} -module_exit(qpnp_flash_led_exit); - -MODULE_DESCRIPTION("QPNP Flash LED driver"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("leds:leds-qpnp-flash"); diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index a9859489acf6c46ce9f88f26bb6db616ad5bf2bf..596347f345db83c34be61aa8fea96e21142071ac 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -287,8 +287,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) } page = mempool_alloc(rrpc->page_pool, GFP_NOIO); - if (!page) + if (!page) { + bio_put(bio); return -ENOMEM; + } while ((slot = find_first_zero_bit(rblk->invalid_pages, nr_pgs_per_blk)) < nr_pgs_per_blk) { diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e85bcae50f65f68c2035efcbb328006d1bf45bb7..e540b7942ebac2b387dd609ed0c7de1f22515040 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -112,8 +112,7 @@ struct iv_tcw_private { * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, - DM_CRYPT_EXIT_THREAD}; + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; /* * The fields in here must be read only after initialization. @@ -1204,18 +1203,20 @@ continue_locked: if (!RB_EMPTY_ROOT(&cc->write_tree)) goto pop_from_list; - if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { - spin_unlock_irq(&cc->write_thread_wait.lock); - break; - } - - __set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_INTERRUPTIBLE); __add_wait_queue(&cc->write_thread_wait, &wait); spin_unlock_irq(&cc->write_thread_wait.lock); + if (unlikely(kthread_should_stop())) { + set_task_state(current, TASK_RUNNING); + remove_wait_queue(&cc->write_thread_wait, &wait); + break; + } + schedule(); + set_task_state(current, TASK_RUNNING); spin_lock_irq(&cc->write_thread_wait.lock); __remove_wait_queue(&cc->write_thread_wait, &wait); goto continue_locked; @@ -1530,13 +1531,8 @@ static void crypt_dtr(struct dm_target *ti) if (!cc) return; - if (cc->write_thread) { - spin_lock_irq(&cc->write_thread_wait.lock); - set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); - wake_up_locked(&cc->write_thread_wait); - spin_unlock_irq(&cc->write_thread_wait.lock); + if (cc->write_thread) kthread_stop(cc->write_thread); - } if (cc->io_queue) destroy_workqueue(cc->io_queue); @@ -1928,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } + /* + * Check if bio is too large, split as needed. + */ + if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && + bio_data_dir(bio) == WRITE) + dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); + io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); io->ctx.req = (struct ablkcipher_request *)(io + 1); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index cd0a93df4cb77edcbf198e158cfb240fd81259d2..8e9e928dafba398bb6e1ac19445508594c1c2807 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) pb->bio_submitted = true; /* - * Map reads as normal only if corrupt_bio_byte set. + * Error reads if neither corrupt_bio_byte or drop_writes are set. + * Otherwise, flakey_end_io() will decide if the reads should be modified. */ if (bio_data_dir(bio) == READ) { - /* If flags were specified, only corrupt those that match. */ - if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && - all_corrupt_bio_flags_match(bio, fc)) - goto map_bio; - else + if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags)) return -EIO; + goto map_bio; } /* @@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) struct flakey_c *fc = ti->private; struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); - /* - * Corrupt successful READs while in down state. - */ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { - if (fc->corrupt_bio_byte) + if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && + all_corrupt_bio_flags_match(bio, fc)) { + /* + * Corrupt successful matching READs while in down state. + */ corrupt_bio_data(bio, fc); - else + + } else if (!test_bit(DROP_WRITES, &fc->flags)) { + /* + * Error read during the down_interval if drop_writes + * wasn't configured. + */ return -EIO; + } } return error; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 624589d51c2cb67828567ad34b02d283e5965956..c8b513ee117c9cd9b7e5e0e18efbc2611a9cba7c 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -258,12 +258,12 @@ static int log_one_block(struct log_writes_c *lc, goto out; sector++; - bio = bio_alloc(GFP_KERNEL, block->vec_cnt); + atomic_inc(&lc->io_blocks); + bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); if (!bio) { DMERR("Couldn't alloc log bio"); goto error; } - atomic_inc(&lc->io_blocks); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; bio->bi_bdev = lc->logdev->bdev; @@ -280,7 +280,7 @@ static int log_one_block(struct log_writes_c *lc, if (ret != block->vecs[i].bv_len) { atomic_inc(&lc->io_blocks); submit_bio(WRITE, bio); - bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); + bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES)); if (!bio) { DMERR("Couldn't alloc log bio"); goto error; @@ -456,9 +456,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - ret = -EINVAL; lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); - if (!lc->log_kthread) { + if (IS_ERR(lc->log_kthread)) { + ret = PTR_ERR(lc->log_kthread); ti->error = "Couldn't alloc kthread"; dm_put_device(ti, lc->dev); dm_put_device(ti, lc->logdev); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index cfa29f574c2a9e1454788a5757471835b254d857..5b2ef966012b585874ca0bc796d1ff47309c8313 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1220,10 +1220,10 @@ static void activate_path(struct work_struct *work) { struct pgpath *pgpath = container_of(work, struct pgpath, activate_path.work); + struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); - if (pgpath->is_active) - scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), - pg_init_done, pgpath); + if (pgpath->is_active && !blk_queue_dying(q)) + scsi_dh_activate(q, pg_init_done, pgpath); else pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index f2a363a89629902350649cb818f6159ecda1afd9..115bd3846c3f060e431e7410a6e2b0a1fd162c47 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1288,6 +1288,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) dm_bio_restore(bd, bio); bio_record->details.bi_bdev = NULL; + bio->bi_error = 0; queue_bio(ms, bio, rw); return DM_ENDIO_INCOMPLETE; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d8615788b17d4f5b73689107eb823f4a00334a20..dd1cde5458b862c990dba3f8de1f335394535fef 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2267,8 +2267,6 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->bs) bioset_free(md->bs); - cleanup_srcu_struct(&md->io_barrier); - if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; @@ -2280,6 +2278,8 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->queue) blk_cleanup_queue(md->queue); + cleanup_srcu_struct(&md->io_barrier); + if (md->bdev) { bdput(md->bdev); md->bdev = NULL; @@ -2876,6 +2876,7 @@ EXPORT_SYMBOL_GPL(dm_device_name); static void __dm_destroy(struct mapped_device *md, bool wait) { + struct request_queue *q = dm_get_md_queue(md); struct dm_table *map; int srcu_idx; @@ -2886,6 +2887,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); + spin_lock_irq(q->queue_lock); + queue_flag_set(QUEUE_FLAG_DYING, q); + spin_unlock_irq(q->queue_lock); + if (dm_request_based(md) && md->kworker_task) flush_kthread_worker(&md->kworker); @@ -3252,10 +3257,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map) int dm_resume(struct mapped_device *md) { - int r = -EINVAL; + int r; struct dm_table *map = NULL; retry: + r = -EINVAL; mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); if (!dm_suspended_md(md)) @@ -3279,8 +3285,6 @@ retry: goto out; clear_bit(DMF_SUSPENDED, &md->flags); - - r = 0; out: mutex_unlock(&md->suspend_lock); diff --git a/drivers/md/md.c b/drivers/md/md.c index c57fdf847b4767bb86c8b42548fe221785fd9b2e..c1c7d4fb4b775dea78ac3d796ed332d7b6b5f68a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7572,16 +7572,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations); int md_setup_cluster(struct mddev *mddev, int nodes) { - int err; - - err = request_module("md-cluster"); - if (err) { - pr_err("md-cluster module not found.\n"); - return -ENOENT; - } - + if (!md_cluster_ops) + request_module("md-cluster"); spin_lock(&pers_lock); + /* ensure module won't be unloaded */ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { + pr_err("can't find md-cluster module or get it's reference.\n"); spin_unlock(&pers_lock); return -ENOENT; } diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index cfc005ee11d8704e754008cdda3a775ac91613c2..7fc72de2434cfb3a50fd65160439f978198b0ecb 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c @@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = { }; static struct regdata mb86a20s_init2[] = { - { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 }, + { 0x50, 0xd1 }, { 0x51, 0x22 }, + { 0x39, 0x01 }, + { 0x71, 0x00 }, { 0x3b, 0x21 }, - { 0x3c, 0x38 }, + { 0x3c, 0x3a }, { 0x01, 0x0d }, - { 0x04, 0x08 }, { 0x05, 0x03 }, + { 0x04, 0x08 }, { 0x05, 0x05 }, { 0x04, 0x0e }, { 0x05, 0x00 }, - { 0x04, 0x0f }, { 0x05, 0x37 }, - { 0x04, 0x0b }, { 0x05, 0x78 }, + { 0x04, 0x0f }, { 0x05, 0x14 }, + { 0x04, 0x0b }, { 0x05, 0x8c }, { 0x04, 0x00 }, { 0x05, 0x00 }, - { 0x04, 0x01 }, { 0x05, 0x1e }, - { 0x04, 0x02 }, { 0x05, 0x07 }, - { 0x04, 0x03 }, { 0x05, 0xd0 }, + { 0x04, 0x01 }, { 0x05, 0x07 }, + { 0x04, 0x02 }, { 0x05, 0x0f }, + { 0x04, 0x03 }, { 0x05, 0xa0 }, { 0x04, 0x09 }, { 0x05, 0x00 }, { 0x04, 0x0a }, { 0x05, 0xff }, - { 0x04, 0x27 }, { 0x05, 0x00 }, + { 0x04, 0x27 }, { 0x05, 0x64 }, { 0x04, 0x28 }, { 0x05, 0x00 }, - { 0x04, 0x1e }, { 0x05, 0x00 }, - { 0x04, 0x29 }, { 0x05, 0x64 }, - { 0x04, 0x32 }, { 0x05, 0x02 }, + { 0x04, 0x1e }, { 0x05, 0xff }, + { 0x04, 0x29 }, { 0x05, 0x0a }, + { 0x04, 0x32 }, { 0x05, 0x0a }, { 0x04, 0x14 }, { 0x05, 0x02 }, { 0x04, 0x04 }, { 0x05, 0x00 }, { 0x04, 0x05 }, { 0x05, 0x22 }, @@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = { { 0x04, 0x07 }, { 0x05, 0xd8 }, { 0x04, 0x12 }, { 0x05, 0x00 }, { 0x04, 0x13 }, { 0x05, 0xff }, - { 0x04, 0x15 }, { 0x05, 0x4e }, - { 0x04, 0x16 }, { 0x05, 0x20 }, /* * On this demod, when the bit count reaches the count below, @@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = { { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */ { 0x45, 0x04 }, /* CN symbol 4 */ { 0x48, 0x04 }, /* CN manual mode */ - + { 0x50, 0xd5 }, { 0x51, 0x01 }, { 0x50, 0xd6 }, { 0x51, 0x1f }, { 0x50, 0xd2 }, { 0x51, 0x03 }, - { 0x50, 0xd7 }, { 0x51, 0xbf }, - { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff }, - { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c }, - - { 0x04, 0x40 }, { 0x05, 0x00 }, - { 0x28, 0x00 }, { 0x2b, 0x08 }, - { 0x28, 0x05 }, { 0x2b, 0x00 }, + { 0x50, 0xd7 }, { 0x51, 0x3f }, { 0x1c, 0x01 }, - { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f }, - { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 }, - { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 }, - { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 }, - { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 }, - { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, - { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 }, - { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 }, - { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b }, - { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 }, - { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d }, - { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 }, - { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b }, - { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, - { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 }, - { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 }, - { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 }, - { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, - { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, - { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef }, - { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 }, - { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 }, - { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d }, - { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 }, - { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba }, + { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 }, + { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d }, + { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, + { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 }, + { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 }, + { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 }, + { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, + { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 }, + { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e }, + { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e }, + { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 }, + { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, + { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 }, + { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 }, + { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe }, + { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 }, + { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee }, + { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 }, + { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f }, + { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 }, + { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 }, + { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a }, + { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc }, + { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba }, + { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 }, { 0x50, 0x1e }, { 0x51, 0x5d }, { 0x50, 0x22 }, { 0x51, 0x00 }, { 0x50, 0x23 }, { 0x51, 0xc8 }, @@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = { { 0x50, 0x26 }, { 0x51, 0x00 }, { 0x50, 0x27 }, { 0x51, 0xc3 }, { 0x50, 0x39 }, { 0x51, 0x02 }, - { 0xec, 0x0f }, - { 0xeb, 0x1f }, - { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, + { 0x50, 0xd5 }, { 0x51, 0x01 }, { 0xd0, 0x00 }, }; @@ -317,7 +309,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status) if (val >= 7) *status |= FE_HAS_SYNC; - if (val >= 8) /* Maybe 9? */ + /* + * Actually, on state S8, it starts receiving TS, but the TS + * output is only on normal state after the transition to S9. + */ + if (val >= 9) *status |= FE_HAS_LOCK; dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n", @@ -2067,6 +2063,11 @@ static void mb86a20s_release(struct dvb_frontend *fe) kfree(state); } +static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe) +{ + return DVBFE_ALGO_HW; +} + static struct dvb_frontend_ops mb86a20s_ops; struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, @@ -2140,6 +2141,7 @@ static struct dvb_frontend_ops mb86a20s_ops = { .read_status = mb86a20s_read_status_and_stats, .read_signal_strength = mb86a20s_read_signal_strength_from_cache, .tune = mb86a20s_tune, + .get_frontend_algo = mb86a20s_get_frontend_algo, }; MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware"); diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index f0480d687f174b259affc527713a394187e069ee..ba780c45f6450847e87be7a6ee21c219988e6c04 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe, sdinfo = &cfg->sub_devs[i]; client = v4l2_get_subdevdata(sdinfo->sd); if (client->addr == curr_client->addr && - client->adapter->nr == client->adapter->nr) { + client->adapter->nr == curr_client->adapter->nr) { if (vpfe->current_input >= 1) return -1; *app_input_index = j + vpfe->current_input; diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c index 5aa8a59128a8a1676ef381d3073685e4ff4df581..1628c098622f3fb56a4599a380060714fcd43d9e 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c @@ -133,7 +133,7 @@ static void msm_ispif_get_pack_mask_from_cfg( pack_mask[0] |= temp; CDBG("%s:num %d cid %d mode %d pack_mask %x %x\n", __func__, entry->num_cids, entry->cids[i], - pack_cfg[i].pack_mode, + pack_cfg[entry->cids[i]].pack_mode, pack_mask[0], pack_mask[1]); } @@ -165,6 +165,16 @@ static int msm_ispif_config2(struct ispif_device *ispif, return rc; } + for (i = 0; i < params->num; i++) { + int j; + + if (params->entries[i].num_cids > MAX_CID_CH_PARAM_ENTRY) + return -EINVAL; + for (j = 0; j < params->entries[i].num_cids; j++) + if (params->entries[i].cids[j] >= CID_MAX) + return -EINVAL; + } + for (i = 0; i < params->num; i++) { intftype = params->entries[i].intftype; vfe_intf = params->entries[i].vfe_intf; diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index 368df242a7076ada7d8dece2d5814ccdb22efa87..a63279715de6a6862044d5e7e1fedb7792026615 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -222,6 +222,14 @@ static int msm_v4l2_enum_framesizes(struct file *file, void *fh, return msm_vidc_enum_framesizes((void *)vidc_inst, fsize); } +static int msm_v4l2_queryctrl(struct file *file, void *fh, + struct v4l2_queryctrl *ctrl) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_query_ctrl((void *)vidc_inst, ctrl); +} + static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { .vidioc_querycap = msm_v4l2_querycap, .vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt, @@ -238,6 +246,7 @@ static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { .vidioc_streamoff = msm_v4l2_streamoff, .vidioc_s_ctrl = msm_v4l2_s_ctrl, .vidioc_g_ctrl = msm_v4l2_g_ctrl, + .vidioc_queryctrl = msm_v4l2_queryctrl, .vidioc_s_ext_ctrls = msm_v4l2_s_ext_ctrl, .vidioc_subscribe_event = msm_v4l2_subscribe_event, .vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event, diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index ec3246ed9d67fb39fe61647634c0c7a527dcf4ef..644203b6599944c49e60030d5f925cb0822162a7 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -110,6 +110,34 @@ int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f) } EXPORT_SYMBOL(msm_vidc_enum_fmt); +int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl) +{ + struct msm_vidc_inst *inst = instance; + int rc = 0; + + if (!inst || !ctrl) + return -EINVAL; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE: + ctrl->maximum = inst->capability.hier_p_hybrid.max; + ctrl->minimum = inst->capability.hier_p_hybrid.min; + break; + case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS: + ctrl->maximum = inst->capability.hier_b.max; + ctrl->minimum = inst->capability.hier_b.min; + break; + case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS: + ctrl->maximum = inst->capability.hier_p.max; + ctrl->minimum = inst->capability.hier_p.min; + break; + default: + rc = -EINVAL; + } + return rc; +} +EXPORT_SYMBOL(msm_vidc_query_ctrl); + int msm_vidc_s_fmt(void *instance, struct v4l2_format *f) { struct msm_vidc_inst *inst = instance; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 2aebc62d09d59b55639f041c0ba85f9dab0a25e3..8b459e4da618b5e87aca022d30affb8d3b20da62 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -959,6 +959,48 @@ static void print_cap(const char *type, type, cap->min, cap->max, cap->step_size); } + +static void msm_vidc_comm_update_ctrl_limits(struct msm_vidc_inst *inst) +{ + struct v4l2_ctrl *ctrl = NULL; + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, + V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE); + if (ctrl) { + v4l2_ctrl_modify_range(ctrl, inst->capability.hier_p_hybrid.min, + inst->capability.hier_p_hybrid.max, ctrl->step, + inst->capability.hier_p_hybrid.min); + dprintk(VIDC_DBG, + "%s: Updated Range = %lld --> %lld Def value = %lld\n", + ctrl->name, ctrl->minimum, ctrl->maximum, + ctrl->default_value); + } + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, + V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS); + if (ctrl) { + v4l2_ctrl_modify_range(ctrl, inst->capability.hier_b.min, + inst->capability.hier_b.max, ctrl->step, + inst->capability.hier_b.min); + dprintk(VIDC_DBG, + "%s: Updated Range = %lld --> %lld Def value = %lld\n", + ctrl->name, ctrl->minimum, ctrl->maximum, + ctrl->default_value); + } + + ctrl = v4l2_ctrl_find(&inst->ctrl_handler, + V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS); + if (ctrl) { + v4l2_ctrl_modify_range(ctrl, inst->capability.hier_p.min, + inst->capability.hier_p.max, ctrl->step, + inst->capability.hier_p.min); + dprintk(VIDC_DBG, + "%s: Updated Range = %lld --> %lld Def value = %lld\n", + ctrl->name, ctrl->minimum, ctrl->maximum, + ctrl->default_value); + } +} + static void handle_session_init_done(enum hal_command_response cmd, void *data) { struct msm_vidc_cb_cmd_done *response = data; @@ -1052,8 +1094,16 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data) print_cap("ltr_count", &inst->capability.ltr_count); print_cap("mbs_per_sec_low_power", &inst->capability.mbs_per_sec_power_save); + print_cap("hybrid-hp", &inst->capability.hier_p_hybrid); signal_session_msg_receipt(cmd, inst); + + /* + * Update controls after informing session_init_done to avoid + * timeouts. + */ + + msm_vidc_comm_update_ctrl_limits(inst); put_inst(inst); } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c index 65f70d901f2a611b987d15858469d98a26960630..3e269576c1262a0c28ac3d8e614a639714999877 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c @@ -225,7 +225,7 @@ void msm_dcvs_init_load(struct msm_vidc_inst *inst) core = inst->core; dcvs = &inst->dcvs; res = &core->resources; - dcvs->load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS); + dcvs->load = msm_comm_get_inst_load(inst, LOAD_CALC_IGNORE_TURBO_LOAD); num_rows = res->dcvs_tbl_size; table = res->dcvs_tbl; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index 022c776a6096d9224e045e4fcdaab578a975d1ac..a65e22c66e304e614d0067091acd8bba1667c316 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -22,6 +22,7 @@ #include "msm_vidc_res_parse.h" #include "venus_boot.h" #include "soc/qcom/secure_buffer.h" +#include "soc/qcom/cx_ipeak.h" enum clock_properties { CLOCK_PROP_HAS_SCALING = 1 << 0, @@ -171,6 +172,8 @@ void msm_vidc_free_platform_resources( msm_vidc_free_qdss_addr_table(res); msm_vidc_free_bus_vectors(res); msm_vidc_free_buffer_usage_table(res); + cx_ipeak_unregister(res->cx_ipeak_context); + res->cx_ipeak_context = NULL; } static int msm_vidc_load_reg_table(struct msm_vidc_platform_resources *res) @@ -1133,8 +1136,36 @@ int read_platform_resources_from_dt( of_property_read_u32(pdev->dev.of_node, "qcom,max-secure-instances", &res->max_secure_inst_count); + + res->cx_ipeak_context = cx_ipeak_register(pdev->dev.of_node, + "qcom,cx-ipeak-data"); + + if (IS_ERR(res->cx_ipeak_context)) { + rc = PTR_ERR(res->cx_ipeak_context); + if (rc == -EPROBE_DEFER) + dprintk(VIDC_INFO, + "cx-ipeak register failed. Deferring probe!"); + else + dprintk(VIDC_ERR, + "cx-ipeak register failed. rc: %d", rc); + + res->cx_ipeak_context = NULL; + goto err_register_cx_ipeak; + } else if (res->cx_ipeak_context) { + dprintk(VIDC_INFO, "cx-ipeak register successful"); + } else { + dprintk(VIDC_INFO, "cx-ipeak register not implemented"); + } + + of_property_read_u32(pdev->dev.of_node, + "qcom,clock-freq-threshold", + &res->clk_freq_threshold); + dprintk(VIDC_DBG, "cx ipeak threshold frequency = %u\n", + res->clk_freq_threshold); + return rc; +err_register_cx_ipeak: err_setup_legacy_cb: err_load_max_hw_load: msm_vidc_free_allowed_clocks_table(res); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h index 03b31d7fd9d13cbe4e1ce12b33b3bf22cb171240..3a329d98991858e1841ba2acdf28cf4ec9011915 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h @@ -17,6 +17,7 @@ #include #include #include +#include "soc/qcom/cx_ipeak.h" #define MAX_BUFFER_TYPES 32 struct platform_version_table { @@ -191,6 +192,8 @@ struct msm_vidc_platform_resources { uint32_t pm_qos_latency_us; uint32_t max_inst_count; uint32_t max_secure_inst_count; + uint32_t clk_freq_threshold; + struct cx_ipeak_client *cx_ipeak_context; }; static inline bool is_iommu_present(struct msm_vidc_platform_resources *res) diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index 777fb52b2201c3bd145d15d5267694f9d3d25631..4739fc999c825ea6dc69173344bfa199faf95aa9 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -1301,8 +1302,12 @@ static int venus_hfi_suspend(void *dev) } dprintk(VIDC_DBG, "Suspending Venus\n"); - rc = flush_delayed_work(&venus_hfi_pm_work); + flush_delayed_work(&venus_hfi_pm_work); + mutex_lock(&device->lock); + if (device->power_enabled) + rc = -EBUSY; + mutex_unlock(&device->lock); return rc; } @@ -1385,6 +1390,39 @@ static int __halt_axi(struct venus_hfi_device *device) return rc; } +static int __set_clk_rate(struct venus_hfi_device *device, + struct clock_info *cl, u64 rate) { + int rc = 0, rc1 = 0; + u64 toggle_freq = device->res->clk_freq_threshold; + struct cx_ipeak_client *ipeak = device->res->cx_ipeak_context; + struct clk *clk = cl->clk; + + if (device->clk_freq < toggle_freq && rate >= toggle_freq) { + rc1 = cx_ipeak_update(ipeak, true); + dprintk(VIDC_PROF, "Voting up: %d\n", rc); + } + + rc = clk_set_rate(clk, rate); + if (rc) + dprintk(VIDC_ERR, + "%s: Failed to set clock rate %llu %s: %d\n", + __func__, rate, cl->name, rc); + + if (device->clk_freq >= toggle_freq && rate < toggle_freq) { + rc1 = cx_ipeak_update(ipeak, false); + dprintk(VIDC_PROF, "Voting down: %d\n", rc); + } + + if (rc1) + dprintk(VIDC_ERR, + "cx_ipeak_update failed! ipeak %pK\n", ipeak); + + if (!rc) + device->clk_freq = rate; + + return rc; +} + static int __scale_clocks_cycles_per_mb(struct venus_hfi_device *device, struct vidc_clk_scale_data *data, unsigned long instant_bitrate) { @@ -1458,14 +1496,10 @@ get_clock_freq: if (!cl->has_scaling) continue; - device->clk_freq = rate; - rc = clk_set_rate(cl->clk, rate); - if (rc) { - dprintk(VIDC_ERR, - "%s: Failed to set clock rate %llu %s: %d\n", - __func__, rate, cl->name, rc); + rc = __set_clk_rate(device, cl, rate); + if (rc) return rc; - } + if (!strcmp(cl->name, "core_clk")) device->scaled_rate = rate; @@ -1506,14 +1540,11 @@ static int __scale_clocks_load(struct venus_hfi_device *device, int load, load, data, instant_bitrate); } - device->clk_freq = rate; - rc = clk_set_rate(cl->clk, rate); - if (rc) { - dprintk(VIDC_ERR, - "Failed to set clock rate %lu %s: %d\n", - rate, cl->name, rc); + + rc = __set_clk_rate(device, cl, rate); + if (rc) return rc; - } + if (!strcmp(cl->name, "core_clk")) device->scaled_rate = rate; @@ -3794,7 +3825,8 @@ static inline int __prepare_enable_clks(struct venus_hfi_device *device) * it to the lowest frequency possible */ if (cl->has_scaling) - clk_set_rate(cl->clk, clk_round_rate(cl->clk, 0)); + __set_clk_rate(device, cl, + clk_round_rate(cl->clk, 0)); if (cl->has_mem_retention) { rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_PERIPH); diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c index 491913778bcc5e5a209d0c53196f3bfd13c29154..2f52d66b4daec65cfb64a07244182d314ecb008e 100644 --- a/drivers/media/usb/cx231xx/cx231xx-avcore.c +++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c @@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, dev->board.agc_analog_digital_select_gpio, analog_or_digital); - return status; + if (status < 0) + return status; + + return 0; } int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 4a117a58c39a0de5521759b564209811245b3567..8389c162bc89824de930ca0cc42a36f01d094803 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = { .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, - .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */ + .agc_analog_digital_select_gpio = 0x1c, .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c index a2fd49b6be837cef33fe694bd6f48520e21312e7..19b0293312a0bbd02c1a78bdfd5ed8dba9682a28 100644 --- a/drivers/media/usb/cx231xx/cx231xx-core.c +++ b/drivers/media/usb/cx231xx/cx231xx-core.c @@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: + case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_HAUPPAUGE_EXETER: @@ -738,7 +739,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: - errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); + errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 0d248ce02a9b2eb02b9da6fecbb6175319110574..ab58f0b9da5cd75c0736bab7c0a1f9d8b6a95781 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -677,7 +677,7 @@ static void dib0700_rc_urb_completion(struct urb *purb) struct dvb_usb_device *d = purb->context; struct dib0700_rc_response *poll_reply; enum rc_type protocol; - u32 uninitialized_var(keycode); + u32 keycode; u8 toggle; deb_info("%s()\n", __func__); @@ -719,7 +719,8 @@ static void dib0700_rc_urb_completion(struct urb *purb) poll_reply->nec.data == 0x00 && poll_reply->nec.not_data == 0xff) { poll_reply->data_state = 2; - break; + rc_repeat(d->rc_dev); + goto resubmit; } if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index a19b5c8b56ff79301d6f24d186ce54639efe6304..1a9e1e556706c46fae02f66be22b7690b0647e8f 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -507,9 +507,8 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap, if (dev->disconnected) return -ENODEV; - rc = rt_mutex_trylock(&dev->i2c_bus_lock); - if (rc < 0) - return rc; + if (!rt_mutex_trylock(&dev->i2c_bus_lock)) + return -EAGAIN; /* Switch I2C bus if needed */ if (bus != dev->cur_i2c_bus && diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index f23df4a9d8c56e460840082bfd5bb9ebcafce511..52b88e9e656b5f87e3573a7a0afb7e49da8cfb95 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -1624,7 +1624,7 @@ static int sd_start(struct gspca_dev *gspca_dev) static void sd_stopN(struct gspca_dev *gspca_dev) { - struct sd *sd = (struct sd *) gspca_dev; + struct sd *sd __maybe_unused = (struct sd *) gspca_dev; command_pause(gspca_dev); diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c index 39c96bb4c985fce504372d306f96e8fe71ea7775..0712b1bc90b4dd5f3f87796865837de66f30c595 100644 --- a/drivers/media/usb/gspca/konica.c +++ b/drivers/media/usb/gspca/konica.c @@ -243,7 +243,7 @@ static int sd_start(struct gspca_dev *gspca_dev) static void sd_stopN(struct gspca_dev *gspca_dev) { - struct sd *sd = (struct sd *) gspca_dev; + struct sd *sd __maybe_unused = (struct sd *) gspca_dev; konica_stream_off(gspca_dev); #if IS_ENABLED(CONFIG_INPUT) diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c index e2cc4e5a0ccb77dce361f3cdc20517e7283394d1..bb52fc1fe598c66982a914cbb351fcb46b24d743 100644 --- a/drivers/media/usb/gspca/t613.c +++ b/drivers/media/usb/gspca/t613.c @@ -837,7 +837,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { - struct sd *sd = (struct sd *) gspca_dev; + struct sd *sd __maybe_unused = (struct sd *) gspca_dev; int pkt_type; if (data[0] == 0x5a) { diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 1105db2355d200c454ee13b4baea9588bc2aa05e..83bfb1659abe4717a050dac021d831754ef33557 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) int rc; if (!host->req) { + pm_runtime_get_sync(ms_dev(host)); do { rc = memstick_next_req(msh, &host->req); dev_dbg(ms_dev(host), "next req %d\n", rc); @@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) host->req->error); } } while (!rc); + pm_runtime_put(ms_dev(host)); } } @@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", __func__, param, value); + pm_runtime_get_sync(ms_dev(host)); mutex_lock(&ucr->dev_mutex); err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); @@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, } out: mutex_unlock(&ucr->dev_mutex); + pm_runtime_put(ms_dev(host)); /* power-on delay */ if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) @@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host) int err; for (;;) { + pm_runtime_get_sync(ms_dev(host)); mutex_lock(&ucr->dev_mutex); /* Check pending MS card changes */ @@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host) } poll_again: + pm_runtime_put(ms_dev(host)); if (host->eject) break; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2346635ccff3929994b646526323e7441f2eae81..3087618b1b26fe616e588ca8b7cc7b543756c90a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1474,6 +1474,7 @@ config MFD_WM8350 config MFD_WM8350_I2C bool "Wolfson Microelectronics WM8350 with I2C" select MFD_WM8350 + select REGMAP_I2C depends on I2C=y help The WM8350 is an integrated audio and power management diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c index 06c205868573e8d7d7b98e1c142156a2cc572454..c216c3a55793d9044f8c509aa1f097ca46332828 100644 --- a/drivers/mfd/atmel-hlcdc.c +++ b/drivers/mfd/atmel-hlcdc.c @@ -50,8 +50,9 @@ static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg, if (reg <= ATMEL_HLCDC_DIS) { u32 status; - readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status, - !(status & ATMEL_HLCDC_SIP), 1, 100); + readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR, + status, !(status & ATMEL_HLCDC_SIP), + 1, 100); } writel(val, hregmap->regs + reg); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 88e80ec772f6eebb0045cbea0a1ddff84eeb44fc..fe89e5e337d5461b51b2c9ba4fbb971bf858a52a 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -494,9 +494,6 @@ int intel_lpss_suspend(struct device *dev) for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) lpss->priv_ctx[i] = readl(lpss->priv + i * 4); - /* Put the device into reset state */ - writel(0, lpss->priv + LPSS_PRIV_RESETS); - return 0; } EXPORT_SYMBOL_GPL(intel_lpss_suspend); diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 60b60dc63dddbc610d0ac1cf02111066b4af5095..022c9374ce8bf1df02b481ac62daba32919b7e65 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -354,6 +354,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) clones[i]); } + put_device(dev); + return 0; } EXPORT_SYMBOL(mfd_clone_cell); diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index dbd907d7170ebe990cb63fede374624ea03b6fd4..691dab791f7af81d91ed3892a945c74c1f0d3fa1 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c @@ -46,9 +46,6 @@ static void rtsx_usb_sg_timed_out(unsigned long data) dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__); usb_sg_cancel(&ucr->current_sg); - - /* we know the cancellation is caused by time-out */ - ucr->current_sg.status = -ETIMEDOUT; } static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr, @@ -67,12 +64,15 @@ static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr, ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout); add_timer(&ucr->sg_timer); usb_sg_wait(&ucr->current_sg); - del_timer_sync(&ucr->sg_timer); + if (!del_timer_sync(&ucr->sg_timer)) + ret = -ETIMEDOUT; + else + ret = ucr->current_sg.status; if (act_len) *act_len = ucr->current_sg.bytes; - return ucr->current_sg.status; + return ret; } int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 222367cc8c815ba9214e9e9c25a8482f7967a772..524660510599cb43daa4b3fbb2f34b60da7b39ef 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, if (copy_from_user(sgl->lpage, user_addr + user_size - sgl->lpage_size, sgl->lpage_size)) { rc = -EFAULT; - goto err_out1; + goto err_out2; } } return 0; + err_out2: + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, + sgl->lpage_dma_addr); + sgl->lpage = NULL; + sgl->lpage_dma_addr = 0; err_out1: __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, sgl->fpage_dma_addr); + sgl->fpage = NULL; + sgl->fpage_dma_addr = 0; err_out: __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, sgl->sgl_dma_addr); + sgl->sgl = NULL; + sgl->sgl_dma_addr = 0; + sgl->sgl_size = 0; return -ENOMEM; } diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 020de5919c2111a0711e69e79d64f85bdbe1de0e..bdc7fcd80eca649a88e223c8f1bcddd0a1de3a41 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -151,7 +151,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = 0; bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + if (bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; goto err; diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index a77643954523aec43de661a491018d9b7c39df10..e598382317030283d4fb170924e640440e1d2a82 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -144,7 +144,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) mutex_lock(&bus->device_lock); if (!mei_cl_is_connected(cl)) { - rets = -EBUSY; + rets = -ENODEV; goto out; } } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index a8a68acd326752980e8b31a61c8b0b9a891018c8..a2661381ddfc6389e0391b11aab0c8faf028ef02 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -121,6 +121,10 @@ #define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ + +#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ +#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 25b1997a62cbc38626ffb3b0166efe649aed9e08..36333750c512533d7cee2d1f2dbf648c1efb6940 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1258,8 +1258,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) static bool mei_me_fw_type_sps(struct pci_dev *pdev) { u32 reg; - /* Read ME FW Status check for SPS Firmware */ - pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); + unsigned int devfn; + + /* + * Read ME FW Status register to check for SPS Firmware + * The SPS FW is only signaled in pci function 0 + */ + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®); /* if bits [19:16] = 15, running SPS Firmware */ return (reg & 0xf0000) == 0xf0000; } diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index bae680c648ffc9dcd188eefac028028f4f0a2d2f..396d75d9fb11cce88647e8343a7f595a465026c5 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -972,11 +972,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) hisr = mei_txe_br_reg_read(hw, HISR_REG); aliveness = mei_txe_aliveness_get(dev); - if (hhisr & IPC_HHIER_SEC && aliveness) + if (hhisr & IPC_HHIER_SEC && aliveness) { ipc_isr = mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG); - else + } else { ipc_isr = 0; + hhisr &= ~IPC_HHIER_SEC; + } generated = generated || (hisr & HISR_INT_STS_MSK) || diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 80f9afcb13823282a9859e08a96c36f55901efa6..4ef189a7a2fb681d4dd5647edcd777698877bddb 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -207,7 +207,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, mutex_lock(&dev->device_lock); if (!mei_cl_is_connected(cl)) { - rets = -EBUSY; + rets = -ENODEV; goto out; } } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 27678d8154e074ea698952832aac64bb843032f9..01e20384ac4476b2e97a15233cea73d29849e51f 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -84,8 +84,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, /* required last entry */ {0, } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 60b02f28a8ffb71bfd569f6ca7e75c03e3726885..fb5ffde61e8b4a0a76882e93e98393e193bffed7 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2848,7 +2848,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_blk_data *md = mq->data; struct mmc_packed *packed = mqrq->packed; bool do_rel_wr, do_data_tag; - u32 *packed_cmd_hdr; + __le32 *packed_cmd_hdr; u8 hdr_blocks; u8 i = 1; @@ -4114,7 +4114,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { - if (mmc_card_mmc(card) || + if ((mmc_card_mmc(card) && + card->csd.mmca_vsn >= CSD_SPEC_VER_3) || (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT)) md->flags |= MMC_BLK_CMD23; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 253979b51c844f0fcde5c935d6245e5ea221056d..505712f0e1b0dda689f2fb7d5fd6ac6eecaa5cdb 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -25,7 +25,7 @@ enum mmc_packed_type { struct mmc_packed { struct list_head list; - u32 cmd_hdr[1024]; + __le32 cmd_hdr[1024]; unsigned int blocks; u8 nr_entries; u8 retries; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 26e57f3c52284d5380cb20be0c4bf1506252b782..f79e8377a2eed30076645475678560211ea9fea1 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -948,6 +948,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) pr_debug("%s: %d bytes transferred: %d\n", mmc_hostname(host), mrq->data->bytes_xfered, mrq->data->error); +#ifdef CONFIG_BLOCK if (mrq->lat_hist_enabled) { ktime_t completion; u_int64_t delta_us; @@ -959,6 +960,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) (mrq->data->flags & MMC_DATA_READ), delta_us); } +#endif trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data); } @@ -1709,11 +1711,13 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, } if (!err && areq) { +#ifdef CONFIG_BLOCK if (host->latency_hist_enabled) { areq->mrq->io_start = ktime_get(); areq->mrq->lat_hist_enabled = 1; } else areq->mrq->lat_hist_enabled = 0; +#endif trace_mmc_blk_rw_start(areq->mrq->cmd->opcode, areq->mrq->cmd->arg, areq->mrq->data); @@ -4415,6 +4419,7 @@ static void __exit mmc_exit(void) destroy_workqueue(workqueue); } +#ifdef CONFIG_BLOCK static ssize_t latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -4462,6 +4467,7 @@ mmc_latency_hist_sysfs_exit(struct mmc_host *host) { device_remove_file(&host->class_dev, &dev_attr_latency_hist); } +#endif subsys_initcall(mmc_init); module_exit(mmc_exit); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 333f691a73c7d5fb66fae444fd8a860843927fae..e8294502a701768dd67783fc531deb9b4520f43c 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -883,7 +883,9 @@ int mmc_add_host(struct mmc_host *host) pr_err("%s: failed to create sysfs group with err %d\n", __func__, err); +#ifdef CONFIG_BLOCK mmc_latency_hist_sysfs_init(host); +#endif mmc_start_host(host); if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) @@ -915,7 +917,9 @@ void mmc_remove_host(struct mmc_host *host) sysfs_remove_group(&host->parent->kobj, &dev_attr_grp); sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp); +#ifdef CONFIG_BLOCK mmc_latency_hist_sysfs_exit(host); +#endif device_del(&host->class_dev); diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 7e1d13b68b062b4199e65d914c7cac464e2b0daf..7dcfb1d5034fba36f02405807387a049ba37dd28 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -59,12 +59,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev, host->pdata = pdev->dev.platform_data; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - /* Get registers' physical base address */ - host->phy_regs = (void *)(regs->start); host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); + /* Get registers' physical base address */ + host->phy_regs = regs->start; + platform_set_drvdata(pdev, host); return dw_mci_probe(host); } diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 7a6cedbe48a837e7fd5800c9fe1da131d569df51..fb204ee6ff89289f57920cafdc54434defc08c5a 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -699,7 +699,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, int ret = 0; /* Set external dma config: burst size, burst width */ - cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset); + cfg.dst_addr = host->phy_regs + fifo_offset; cfg.src_addr = cfg.dst_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index d839147e591d24f5d5d0a97d389ea04ffbaa9883..44ecebd1ea8c1834a5d311fbe36ddfc8383e3ecd 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mmc); + spin_lock_init(&host->lock); + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, dev_name(&pdev->dev), host); if (ret) goto out_free_dma; - spin_lock_init(&host->lock); - ret = mmc_add_host(mmc); if (ret) goto out_free_dma; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 28a057fae0a196092c113af946bc5ef32629f28a..72bbb12fb938022da4e9720af05c305a415052f7 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -798,14 +798,16 @@ static int pxamci_probe(struct platform_device *pdev) gpio_direction_output(gpio_power, host->pdata->gpio_power_invert); } - if (gpio_is_valid(gpio_ro)) + if (gpio_is_valid(gpio_ro)) { ret = mmc_gpio_request_ro(mmc, gpio_ro); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); - goto out; - } else { - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", + gpio_ro); + goto out; + } else { + mmc->caps2 |= host->pdata->gpio_card_ro_invert ? + 0 : MMC_CAP2_RO_ACTIVE_HIGH; + } } if (gpio_is_valid(gpio_cd)) diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 6c71fc9f76c7ecab19a5a7ac24a53ab38a6125fd..da9f71b8deb0b3fa423437d16cbae53658fd4c50 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) dev_dbg(sdmmc_dev(host), "%s\n", __func__); mutex_lock(&ucr->dev_mutex); - if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) { - mutex_unlock(&ucr->dev_mutex); - return; - } - sd_set_power_mode(host, ios->power_mode); sd_set_bus_width(host, ios->bus_width); sd_set_timing(host, ios->timing, &host->ddr_mode); @@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work) container_of(work, struct rtsx_usb_sdmmc, led_work); struct rtsx_ucr *ucr = host->ucr; + pm_runtime_get_sync(sdmmc_dev(host)); mutex_lock(&ucr->dev_mutex); if (host->led.brightness == LED_OFF) @@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work) rtsx_usb_turn_on_led(ucr); mutex_unlock(&ucr->dev_mutex); + pm_runtime_put(sdmmc_dev(host)); } #endif diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 43853306a6bbe86f1420cc9dd87902a429fc325e..21d2a4b8f7aec3b9a4a75475b70322f943d5aa20 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -774,7 +774,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) * host->clock is in Hz. target_timeout is in us. * Hence, us = 1000000 * cycles / Hz. Round up. */ - val = 1000000 * data->timeout_clks; + val = 1000000ULL * data->timeout_clks; if (do_div(val, host->clock)) target_timeout++; target_timeout += val; diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index 744ca5cacc9b2e8b6f10189dc169f29426cad9e9..f9fa3fad728e5e3b0687c5affadd7cf8d0b129b0 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c @@ -75,15 +75,15 @@ static int __init init_msp_flash(void) printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); - msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); + msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL); if (!msp_flash) return -ENOMEM; - msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); + msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL); if (!msp_parts) goto free_msp_flash; - msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); + msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL); if (!msp_maps) goto free_msp_parts; diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 142fc3d794637366cc4a4996b5e07f882d14b7a2..784c6e1a0391e92c90723e698d8bc148fe3e4916 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, info->mtd = mtd_concat_create(cdev, info->num_subdev, plat->name); - if (info->mtd == NULL) + if (info->mtd == NULL) { ret = -ENXIO; + goto err; + } } info->mtd->dev.parent = &pdev->dev; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index c72313d66cf6a0d3034ba82d519a3bd315d21c59..bc054a5ed7f8cb6b419b3df884915ce584e9dbdd 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode) unsigned long flags; u32 val; + /* Reset ECC hardware */ + davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); + spin_lock_irqsave(&davinci_nand_lock, flags); /* Start 4-bit ECC calculation for read/write */ diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 990898b9dc7289f881ecf357573bbc1d79bbf6ad..bba7dd1b5ebf19f03be99c1aeb0a6f291e7fbae2 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -513,10 +513,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, unsigned long long ec = be64_to_cpu(ech->ec); unmap_peb(ai, pnum); dbg_bld("Adding PEB to free: %i", pnum); + if (err == UBI_IO_FF_BITFLIPS) - add_aeb(ai, free, pnum, ec, 1); - else - add_aeb(ai, free, pnum, ec, 0); + scrub = 1; + + add_aeb(ai, free, pnum, ec, scrub); continue; } else if (err == 0 || err == UBI_IO_BITFLIPS) { dbg_bld("Found non empty PEB:%i in pool", pnum); @@ -748,11 +749,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, fmvhdr->vol_type, be32_to_cpu(fmvhdr->last_eb_bytes)); - if (!av) - goto fail_bad; - if (PTR_ERR(av) == -EINVAL) { - ubi_err(ubi, "volume (ID %i) already exists", - fmvhdr->vol_id); + if (IS_ERR(av)) { + if (PTR_ERR(av) == -EEXIST) + ubi_err(ubi, "volume (ID %i) already exists", + fmvhdr->vol_id); + goto fail_bad; } diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 22a30bbaa1bd3d87de9dacf42b1bb9fdc66704ee..a907287860007cddef06ca293b1072745f9d9d0a 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -663,7 +663,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown) { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; - int vol_id = -1, lnum = -1; + int erase = 0, keep = 0, vol_id = -1, lnum = -1; #ifdef CONFIG_MTD_UBI_FASTMAP int anchor = wrk->anchor; #endif @@ -799,6 +799,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, e1->pnum); scrubbing = 1; goto out_not_moved; + } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) { + /* + * While a full scan would detect interrupted erasures + * at attach time we can face them here when attached from + * Fastmap. + */ + dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure", + e1->pnum); + erase = 1; + goto out_not_moved; } ubi_err(ubi, "error %d while reading VID header from PEB %d", @@ -834,6 +844,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * Target PEB had bit-flips or write error - torture it. */ torture = 1; + keep = 1; goto out_not_moved; } @@ -930,7 +941,7 @@ out_not_moved: ubi->erroneous_peb_count += 1; } else if (scrubbing) wl_tree_add(e1, &ubi->scrub); - else + else if (keep) wl_tree_add(e1, &ubi->used); if (dst_leb_clean) { wl_tree_add(e2, &ubi->free); @@ -951,6 +962,12 @@ out_not_moved: goto out_ro; } + if (erase) { + err = do_sync_erase(ubi, e1, vol_id, lnum, 1); + if (err) + goto out_ro; + } + mutex_unlock(&ubi->move_mutex); return 0; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b3d70a7a52620ff8d228b3b1a96a7f89fab081b3..5dca77e0ffed19879311b2d183b797a78927f1ec 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1317,9 +1317,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) slave_dev->name); } - /* already enslaved */ - if (slave_dev->flags & IFF_SLAVE) { - netdev_dbg(bond_dev, "Error: Device was already enslaved\n"); + /* already in-use? */ + if (netdev_is_rx_handler_busy(slave_dev)) { + netdev_err(bond_dev, + "Error: Device is in use and cannot be enslaved\n"); return -EBUSY; } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index ad535a854e5cf28309a71547b49e411db85b385b..eab132778e67241cc5ba2a436af2c338cfef61b9 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -471,9 +472,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb); /* * CAN device restart for bus-off recovery */ -static void can_restart(unsigned long data) +static void can_restart(struct net_device *dev) { - struct net_device *dev = (struct net_device *)data; struct can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; @@ -513,6 +513,14 @@ restart: netdev_err(dev, "Error %d during restart", err); } +static void can_restart_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); + + can_restart(priv->dev); +} + int can_restart_now(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -526,8 +534,8 @@ int can_restart_now(struct net_device *dev) if (priv->state != CAN_STATE_BUS_OFF) return -EBUSY; - /* Runs as soon as possible in the timer context */ - mod_timer(&priv->restart_timer, jiffies); + cancel_delayed_work_sync(&priv->restart_work); + can_restart(dev); return 0; } @@ -548,8 +556,8 @@ void can_bus_off(struct net_device *dev) netif_carrier_off(dev); if (priv->restart_ms) - mod_timer(&priv->restart_timer, - jiffies + (priv->restart_ms * HZ) / 1000); + schedule_delayed_work(&priv->restart_work, + msecs_to_jiffies(priv->restart_ms)); } EXPORT_SYMBOL_GPL(can_bus_off); @@ -658,6 +666,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) return NULL; priv = netdev_priv(dev); + priv->dev = dev; if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; @@ -667,7 +676,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) priv->state = CAN_STATE_STOPPED; - init_timer(&priv->restart_timer); + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); return dev; } @@ -748,8 +757,6 @@ int open_candev(struct net_device *dev) if (!netif_carrier_ok(dev)) netif_carrier_on(dev); - setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); - return 0; } EXPORT_SYMBOL_GPL(open_candev); @@ -764,7 +771,7 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - del_timer_sync(&priv->restart_timer); + cancel_delayed_work_sync(&priv->restart_work); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 41c0fc9f3b1465d9dbbde6b5c7798b1e40cf43ef..16f7cadda5c32b430c0d25aea9746a93a063bd39 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device) struct flexcan_priv *priv = netdev_priv(dev); int err; - err = flexcan_chip_disable(priv); - if (err) - return err; - if (netif_running(dev)) { + err = flexcan_chip_disable(priv); + if (err) + return err; netif_stop_queue(dev); netif_device_detach(dev); } @@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); + int err; priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); + err = flexcan_chip_enable(priv); + if (err) + return err; } - return flexcan_chip_enable(priv); + return 0; } static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 6f946fedbb77c1943770b31665a0f5e1e6e4d889..0864f05633a29b2b865da5502a6f516253846729 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1137,6 +1137,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct ethtool_eee *p = &priv->port_sts[port].eee; u32 id_mode_dis = 0, port_mode; const char *str = NULL; u32 reg; @@ -1211,6 +1212,9 @@ force_link: reg |= DUPLX_MODE; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + + if (!phydev->is_pseudo_fixed_link) + p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); } static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 6bba1c98d764cf2b4222c82211677e30c448a4f0..c7994e37228438b664098bad041a5ae4f808fd01 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -187,8 +187,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ u32 mask) \ { \ - intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ priv->irq##which##_mask &= ~(mask); \ + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ } \ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ u32 mask) \ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index c32f5d32f81187d5ca4e6baba780f8eca142d632..b56c9c581359ef56618ff9554f1d1902be7d5ce9 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -314,6 +314,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); + + /* preserve ONLY bits 16-17 from current hardware value */ + ctl &= BGMAC_DMA_RX_ADDREXT_MASK; + if (bgmac->core->id.rev >= 4) { ctl &= ~BGMAC_DMA_RX_BL_MASK; ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; @@ -324,7 +328,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, ctl &= ~BGMAC_DMA_RX_PT_MASK; ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; } - ctl &= BGMAC_DMA_RX_ADDREXT_MASK; ctl |= BGMAC_DMA_RX_ENABLE; ctl |= BGMAC_DMA_RX_PARITY_DISABLE; ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2e611dc5f16210393852110c7dda5dadf4dfc560..1c8123816745b81bc479ef52c3af123962d5319d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14819,6 +14819,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev, } offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); + if (!offset) { + DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); + goto out; + } DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); /* Read the table contents from nvram */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 0fb3f8de88e9e8f897ad16abc70db06b54c64bcb..91627561c58d0c2108b0b2716befe0717c0f72c0 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1168,6 +1168,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned int pkts_compl = 0; @@ -1195,7 +1196,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, pkts_compl++; dev->stats.tx_packets++; dev->stats.tx_bytes += tx_cb_ptr->skb->len; - dma_unmap_single(&dev->dev, + dma_unmap_single(kdev, dma_unmap_addr(tx_cb_ptr, dma_addr), dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); @@ -1203,7 +1204,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { dev->stats.tx_bytes += dma_unmap_len(tx_cb_ptr, dma_len); - dma_unmap_page(&dev->dev, + dma_unmap_page(kdev, dma_unmap_addr(tx_cb_ptr, dma_addr), dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); @@ -1754,6 +1755,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) { + struct device *kdev = &priv->pdev->dev; struct enet_cb *cb; int i; @@ -1761,7 +1763,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) cb = &priv->rx_cbs[i]; if (dma_unmap_addr(cb, dma_addr)) { - dma_unmap_single(&priv->dev->dev, + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), priv->rx_buf_len, DMA_FROM_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ca5ac5d6f4e6ca6d87ce029b7f4e06e8653e2bac..49056c33be74d05e19d2a60f6003b3c51fccbef0 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18142,14 +18142,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We needn't recover from permanent error */ - if (state == pci_channel_io_frozen) - tp->pcierr_recovery = true; - /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) goto done; + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; + tg3_phy_stop(tp); tg3_netif_stop(tp); @@ -18246,7 +18246,7 @@ static void tg3_io_resume(struct pci_dev *pdev) rtnl_lock(); - if (!netif_running(netdev)) + if (!netdev || !netif_running(netdev)) goto done; tg3_full_lock(tp, 0); diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index afb10e326b4fc46043b4d3f4cddb7d17f1cc341c..fab35a5938987c757f5faa2cbf808d7f28c05922 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -170,7 +170,6 @@ #define NIC_QSET_SQ_0_7_DOOR (0x010838) #define NIC_QSET_SQ_0_7_STATUS (0x010840) #define NIC_QSET_SQ_0_7_DEBUG (0x010848) -#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) #define NIC_QSET_RBDR_0_1_CFG (0x010C00) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index a12b2e38cf61221fc4de44f6395eb60de76825a5..ff1d777f3ed98af0a33c5dcc997ed9edb62254a8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -380,7 +380,10 @@ static void nicvf_get_regs(struct net_device *dev, p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); - p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); + /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which + * produces bus errors when read + */ + p[i++] = 0; p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f6147ffc7fbca76f4f6f512caa9e9612a32cf00a..ab716042bdd2245d9d05d4079430f573d88fe9a2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -944,11 +944,11 @@ fec_restart(struct net_device *ndev) * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ - if (fep->quirks & FEC_QUIRK_ENET_MAC) { - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); - } + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); + writel((__force u32)cpu_to_be32(temp_mac[0]), + fep->hwp + FEC_ADDR_LOW); + writel((__force u32)cpu_to_be32(temp_mac[1]), + fep->hwp + FEC_ADDR_HIGH); /* Clear any outstanding interrupt. */ writel(0xffffffff, fep->hwp + FEC_IEVENT); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d74c6e4d7b618fe3c5dd44d3ae93c2a433d3491..1cf715c72683b1dc2c7c4f758b136061e4bffb82 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -302,13 +302,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u16 len = le16_to_cpu(aq_desc->datalen); + u16 len; u8 *buf = (u8 *)buffer; u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; + len = le16_to_cpu(aq_desc->datalen); + i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2215bebe208e1150c8454d4c0b03474ba5ab5df6..4edbab6ca7ef42ff8820ee024a2381ea9e03d9e6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8595,7 +8595,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, - nlflags, 0, 0, filter_mask, NULL); + 0, 0, nlflags, filter_mask, NULL); } #define I40E_MAX_TUNNEL_HDR_LEN 80 @@ -10853,6 +10853,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, dev_info(&pdev->dev, "%s: error %d\n", __func__, error); + if (!pf) { + dev_info(&pdev->dev, + "Cannot recover - error happened during device probe\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, &pf->state)) { rtnl_lock(); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5606a043063e3a83c5532ea0ebccbaf6fca77259..4b62aa1f9ff881ce9e4855f40c11974473bf491f 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); static void sky2_shutdown(struct pci_dev *pdev) { + struct sky2_hw *hw = pci_get_drvdata(pdev); + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *ndev = hw->dev[port]; + + rtnl_lock(); + if (netif_running(ndev)) { + dev_close(ndev); + netif_device_detach(ndev); + } + rtnl_unlock(); + } sky2_suspend(&pdev->dev); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 67e9633ea9c7cfa6aa2a6a93402d10a5d60da0ba..232191417b93ffad77f360c04236d128234765a2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2282,7 +2282,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) struct mlx4_en_dev *mdev = en_priv->mdev; u64 mac_u64 = mlx4_mac_to_u64(mac); - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) return -EINVAL; return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 037fc4cdf5af675e811f5f50950816d8dfeaff51..cc199063612ac97bf504680707399f8d7a7e8dd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) return cmd->cmd_buf + (idx << cmd->log_stride); } -static u8 xor8_buf(void *buf, int len) +static u8 xor8_buf(void *buf, size_t offset, int len) { u8 *ptr = buf; u8 sum = 0; int i; + int end = len + offset; - for (i = 0; i < len; i++) + for (i = offset; i < end; i++) sum ^= ptr[i]; return sum; @@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len) static int verify_block_sig(struct mlx5_cmd_prot_block *block) { - if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); + int xor_len = sizeof(*block) - sizeof(block->data) - 1; + + if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) return -EINVAL; - if (xor8_buf(block, sizeof(*block)) != 0xff) + if (xor8_buf(block, 0, sizeof(*block)) != 0xff) return -EINVAL; return 0; } -static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, - int csum) +static void calc_block_sig(struct mlx5_cmd_prot_block *block) { - block->token = token; - if (csum) { - block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - - sizeof(block->data) - 2); - block->sig = ~xor8_buf(block, sizeof(*block) - 1); - } + int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); + + block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); + block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); } -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) +static void calc_chain_sig(struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *next = msg->next; - - while (next) { - calc_block_sig(next->buf, token, csum); + int size = msg->len; + int blen = size - min_t(int, sizeof(msg->first.data), size); + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) + / MLX5_CMD_DATA_BLOCK_SIZE; + int i = 0; + + for (i = 0; i < n && next; i++) { + calc_block_sig(next->buf); next = next->next; } } static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { - ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); - calc_chain_sig(ent->in, ent->token, csum); - calc_chain_sig(ent->out, ent->token, csum); + ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); + if (csum) { + calc_chain_sig(ent->in); + calc_chain_sig(ent->out); + } } static void poll_timeout(struct mlx5_cmd_work_ent *ent) @@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) struct mlx5_cmd_mailbox *next = ent->out->next; int err; u8 sig; + int size = ent->out->len; + int blen = size - min_t(int, sizeof(ent->out->first.data), size); + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) + / MLX5_CMD_DATA_BLOCK_SIZE; + int i = 0; - sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); if (sig != 0xff) return -EINVAL; - while (next) { + for (i = 0; i < n && next; i++) { err = verify_block_sig(next->buf); if (err) return err; @@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work) spin_unlock_irqrestore(&cmd->alloc_lock, flags); } - ent->token = alloc_token(cmd); cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; @@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, - void *context, int page_queue, u8 *status) + void *context, int page_queue, u8 *status, + u8 token) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (IS_ERR(ent)) return PTR_ERR(ent); + ent->token = token; + if (!callback) init_completion(&ent->done); @@ -844,7 +860,8 @@ static const struct file_operations fops = { .write = dbg_write, }; -static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, + u8 token) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_mailbox *next; @@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) memcpy(block->data, from, copy); from += copy; size -= copy; + block->token = token; next = next->next; } @@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev, } static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, - gfp_t flags, int size) + gfp_t flags, int size, + u8 token) { struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_prot_block *block; @@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, tmp->next = head; block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); block->block_num = cpu_to_be32(n - i - 1); + block->token = token; head = tmp; } msg->next = head; @@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, } if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg; } @@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int err; u8 status = 0; u32 drv_synd; + u8 token; if (pci_channel_offline(dev->pdev) || dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { @@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, return err; } - err = mlx5_copy_to_msg(inb, in, in_size); + token = alloc_token(&dev->cmd); + + err = mlx5_copy_to_msg(inb, in, in_size, token); if (err) { mlx5_core_warn(dev, "err %d\n", err); goto out_in; } - outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, - pages_queue, &status); + pages_queue, &status, token); if (err) goto out_out; @@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) INIT_LIST_HEAD(&cmd->cache.med.head); for (i = 0; i < NUM_LONG_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; @@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) } for (i = 0; i < NUM_MED_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 36fc9427418f325b433349e08700801387b412ba..480f3dae0780deeea215848c44a1126a6ef94d15 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -832,7 +832,7 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xff7f009f, + .eesipr_value = 0xe77f009f, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0e2fc1a844ab2b2453681750368e1cf9d288e779..23a0388100834cfa59223788aee289bae0c42053 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else -#define smc_special_trylock(lock, flags) (flags == flags) +#define smc_special_trylock(lock, flags) ((void)flags, true) #define smc_special_lock(lock, flags) do { flags = 0; } while (0) #define smc_special_unlock(lock, flags) do { flags = 0; } while (0) #endif @@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev) if (pd) { memcpy(&lp->cfg, pd, sizeof(lp->cfg)); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); + + if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) { + dev_err(&pdev->dev, + "at least one of 8-bit or 16-bit access support is required.\n"); + ret = -ENXIO; + goto out_free_netdev; + } } #if IS_BUILTIN(CONFIG_OF) diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index a3c129e1e40ac1e2c6785ff30fd4af41cb68ecb2..29df0465daf46c29780e0834f92c13b7c3f00155 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -36,6 +36,27 @@ #include #include +/* + * Any 16-bit access is performed with two 8-bit accesses if the hardware + * can't do it directly. Most registers are 16-bit so those are mandatory. + */ +#define SMC_outw_b(x, a, r) \ + do { \ + unsigned int __val16 = (x); \ + unsigned int __reg = (r); \ + SMC_outb(__val16, a, __reg); \ + SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \ + } while (0) + +#define SMC_inw_b(a, r) \ + ({ \ + unsigned int __val16; \ + unsigned int __reg = r; \ + __val16 = SMC_inb(a, __reg); \ + __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \ + __val16; \ + }) + /* * Define your architecture specific bus configuration parameters here. */ @@ -55,10 +76,30 @@ #define SMC_IO_SHIFT (lp->io_shift) #define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inw(a, r) \ + ({ \ + unsigned int __smc_r = r; \ + SMC_16BIT(lp) ? readw((a) + __smc_r) : \ + SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \ + ({ BUG(); 0; }); \ + }) + #define SMC_inl(a, r) readl((a) + (r)) #define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) \ + do { \ + unsigned int __v = v, __smc_r = r; \ + if (SMC_16BIT(lp)) \ + __SMC_outw(__v, a, __smc_r); \ + else if (SMC_8BIT(lp)) \ + SMC_outw_b(__v, a, __smc_r); \ + else \ + BUG(); \ + } while (0) + #define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l) +#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) @@ -66,7 +107,7 @@ #define SMC_IRQ_FLAGS (-1) /* from resource */ /* We actually can't write halfwords properly if not word aligned */ -static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) +static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg) { if ((machine_is_mainstone() || machine_is_stargate2() || machine_is_pxa_idp()) && reg & 2) { @@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, #if ! SMC_CAN_USE_16BIT -/* - * Any 16-bit access is performed with two 8-bit accesses if the hardware - * can't do it directly. Most registers are 16-bit so those are mandatory. - */ -#define SMC_outw(x, ioaddr, reg) \ - do { \ - unsigned int __val16 = (x); \ - SMC_outb( __val16, ioaddr, reg ); \ - SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ - } while (0) -#define SMC_inw(ioaddr, reg) \ - ({ \ - unsigned int __val16; \ - __val16 = SMC_inb( ioaddr, reg ); \ - __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ - __val16; \ - }) - +#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg) +#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg) #define SMC_insw(a, r, p, l) BUG() #define SMC_outsw(a, r, p, l) BUG() diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 69e31e2a68fcc5782f8553eabbfe588158765d17..f0961cbaf87ed610371db39d24b40da0d2488888 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -815,7 +815,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs4 = geneve->sock4; struct rtable *rt = NULL; - const struct iphdr *iip; /* interior IP header */ int err = -EINVAL; struct flowi4 fl4; __u8 tos, ttl; @@ -842,8 +841,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); skb_reset_mac_header(skb); - iip = ip_hdr(skb); - if (info) { const struct ip_tunnel_key *key = &info->key; u8 *opts = NULL; @@ -859,7 +856,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto err; - tos = ip_tunnel_ecn_encap(key->tos, iip, skb); + tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { @@ -869,7 +866,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto err; - tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); + tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); ttl = geneve->ttl; if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) ttl = 1; @@ -903,7 +900,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; - const struct iphdr *iip; /* interior IP header */ int err = -EINVAL; struct flowi6 fl6; __u8 prio, ttl; @@ -927,8 +923,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); skb_reset_mac_header(skb); - iip = ip_hdr(skb); - if (info) { const struct ip_tunnel_key *key = &info->key; u8 *opts = NULL; @@ -945,7 +939,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto err; - prio = ip_tunnel_ecn_encap(key->tos, iip, skb); + prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; } else { udp_csum = false; @@ -954,7 +948,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto err; - prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb); + prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, ip_hdr(skb), skb); ttl = geneve->ttl; if (!ttl && ipv6_addr_is_multicast(&fl6.daddr)) ttl = 1; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 47cd306dbb3c4909ae95feb586f0b89198126995..bba0ca786aaac22bf5afc2d6acd5b584d0ecb958 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -640,8 +640,10 @@ phy_err: int phy_start_interrupts(struct phy_device *phydev) { atomic_set(&phydev->irq_disable, 0); - if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", - phydev) < 0) { + if (request_irq(phydev->irq, phy_interrupt, + IRQF_SHARED, + "phy_interrupt", + phydev) < 0) { pr_warn("%s: Can't get IRQ %d (PHY)\n", phydev->bus->name, phydev->irq); phydev->irq = PHY_POLL; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 935e0b45e15149189de9d3c87d365304f98accf2..dd7b7d64c90af1c56b5b83b7db42e8bd0b3230e7 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -862,10 +862,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) goto drop; - if (skb->sk && sk_fullsock(skb->sk)) { - sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); - sw_tx_timestamp(skb); - } + skb_tx_timestamp(skb); /* Orphan the skb - required as we might hang on to it * for indefinite time. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f94ab786088fc8fd7f848edeaedd4d2aff160606..0e2a19e589238ef3dcdd04cd7c691095385c9173 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1465,6 +1465,11 @@ static void virtnet_free_queues(struct virtnet_info *vi) netif_napi_del(&vi->rq[i].napi); } + /* We called napi_hash_del() before netif_napi_del(), + * we need to respect an RCU grace period before freeing vi->rq + */ + synchronize_net(); + kfree(vi->rq); kfree(vi->sq); } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 003780901628760e733b07b0fc7cf43e87e373f2..6fa8e165878e1d9968e2665e0e7bcd2ca0406e25 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, } } - pp = eth_gro_receive(head, skb); + pp = call_gro_receive(eth_gro_receive, head, skb); out: skb_gro_remcsum_cleanup(skb, &grc); diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index f1ead7c2882329120527cf5cd06f39fdafd31623..b8ef3780c2ac083fb177594bfc85bc4849a67ea8 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -21,7 +21,7 @@ /* * Support for Copy Engine hardware, which is mainly used for - * communication between Host and Target over a PCIe interconnect. + * communication between Host and Target over a PCIe/SNOC/AHB interconnect. */ /* @@ -32,7 +32,7 @@ * Each ring consists of a number of descriptors which specify * an address, length, and meta-data. * - * Typically, one side of the PCIe interconnect (Host or Target) + * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target) * controls one ring and the other side controls the other ring. * The source side chooses when to initiate a transfer and it * chooses what to send (buffer address, length). The destination @@ -62,70 +62,95 @@ static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ar->bus_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); } static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ar->bus_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + DST_WR_INDEX_ADDRESS); } static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ar->bus_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); } static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ar->bus_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + SR_WR_INDEX_ADDRESS); } static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ar->bus_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + CURRENT_SRRI_ADDRESS); } static inline void ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ar->bus_write32(ar, shadow_sr_wr_ind_addr(ar, ce_ctrl_addr), n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, shadow_sr_wr_ind_addr(ar, + ce_ctrl_addr), n); } static inline void ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ar->bus_write32(ar, shadow_dst_wr_ind_addr(ar, ce_ctrl_addr), n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, shadow_dst_wr_ind_addr(ar, + ce_ctrl_addr), + n); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int addr) { - ar->bus_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ar->bus_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); } static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ar->bus_read32((ar), + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 ctrl1_addr = ar_opaque->bus_ops->read32((ar), (ce_ctrl_addr) + CE_CTRL1_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | CE_CTRL1_DMAX_LENGTH_SET(n)); } @@ -134,9 +159,11 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ar->bus_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 ctrl1_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + CE_CTRL1_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); } @@ -145,9 +172,11 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ar->bus_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 ctrl1_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + CE_CTRL1_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); } @@ -155,30 +184,40 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ar->bus_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + return ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + CURRENT_DRRI_ADDRESS); } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, u32 addr) { - ar->bus_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ar->bus_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); } static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ar->bus_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); - ar->bus_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + SRC_WATERMARK_ADDRESS); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, (addr & ~SRC_WATERMARK_HIGH_MASK) | SRC_WATERMARK_HIGH_SET(n)); } @@ -187,9 +226,11 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ar->bus_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + SRC_WATERMARK_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, (addr & ~SRC_WATERMARK_LOW_MASK) | SRC_WATERMARK_LOW_SET(n)); } @@ -198,9 +239,11 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ar->bus_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + DST_WATERMARK_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, (addr & ~DST_WATERMARK_HIGH_MASK) | DST_WATERMARK_HIGH_SET(n)); } @@ -209,9 +252,11 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ar->bus_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + + DST_WATERMARK_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, (addr & ~DST_WATERMARK_LOW_MASK) | DST_WATERMARK_LOW_SET(n)); } @@ -219,50 +264,55 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ar->bus_read32(ar, + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 host_ie_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + HOST_IE_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); } static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ar->bus_read32(ar, + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 host_ie_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + HOST_IE_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); } static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ar->bus_read32(ar, + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 host_ie_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + HOST_IE_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, host_ie_addr & ~CE_WATERMARK_MASK); } static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 misc_ie_addr = ar->bus_read32(ar, + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 misc_ie_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + MISC_IE_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, misc_ie_addr | CE_ERROR_MASK); } static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 misc_ie_addr = ar->bus_read32(ar, + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + u32 misc_ie_addr = ar_opaque->bus_ops->read32(ar, ce_ctrl_addr + MISC_IE_ADDRESS); - ar->bus_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, misc_ie_addr & ~CE_ERROR_MASK); } @@ -270,7 +320,9 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int mask) { - ar->bus_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + + ar_opaque->bus_ops->write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); } u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr) @@ -422,10 +474,11 @@ exit: void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); struct ath10k_ce_ring *src_ring = pipe->src_ring; u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar->ce_lock); + lockdep_assert_held(&ar_opaque->ce_lock); /* * This function must be called only if there is an incomplete @@ -453,12 +506,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, unsigned int flags) { struct ath10k *ar = ce_state->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -466,13 +520,14 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int delta; - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, pipe->src_ring->write_index, pipe->src_ring->sw_index - 1); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return delta; } @@ -480,12 +535,13 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; unsigned int sw_index = dest_ring->sw_index; - lockdep_assert_held(&ar->ce_lock); + lockdep_assert_held(&ar_opaque->ce_lock); return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } @@ -494,6 +550,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, dma_addr_t paddr) { struct ath10k *ar = pipe->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; @@ -502,7 +559,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar->ce_lock); + lockdep_assert_held(&ar_opaque->ce_lock); if ((pipe->id != 5) && CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) @@ -542,11 +599,12 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, dma_addr_t paddr) { struct ath10k *ar = pipe->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -609,13 +667,14 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int *nbytesp) { struct ath10k *ar = ce_state->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = ath10k_ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, nbytesp); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -630,6 +689,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; + struct bus_opaque *ar_opaque; dest_ring = ce_state->dest_ring; @@ -637,8 +697,9 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; + ar_opaque = ath10k_bus_priv(ar); - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); nentries_mask = dest_ring->nentries_mask; sw_index = dest_ring->sw_index; @@ -666,7 +727,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -734,6 +795,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; + struct bus_opaque *ar_opaque; src_ring = ce_state->src_ring; @@ -741,8 +803,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; + ar_opaque = ath10k_bus_priv(ar); - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -773,7 +836,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -782,12 +845,13 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) { struct ath10k *ar = ce_state->ar; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ret; - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); ret = ath10k_ce_completed_send_next_nolock(ce_state, per_transfer_contextp); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); return ret; } @@ -800,17 +864,17 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, */ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { - struct ath10k_ce_pipe *ce_state = - ((struct ath10k_ce_pipe *)ar->ce_states + ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; u32 ctrl_addr = ce_state->ctrl_addr; - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); /* Clear the copy-complete interrupts that will be handled here. */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, HOST_IS_COPY_COMPLETE_MASK); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); if (ce_state->recv_cb) ce_state->recv_cb(ce_state); @@ -818,7 +882,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) if (ce_state->send_cb) ce_state->send_cb(ce_state); - spin_lock_bh(&ar->ce_lock); + spin_lock_bh(&ar_opaque->ce_lock); /* * Misc CE interrupts are not being handled, but still need @@ -826,7 +890,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); - spin_unlock_bh(&ar->ce_lock); + spin_unlock_bh(&ar_opaque->ce_lock); } /* @@ -840,11 +904,12 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) int ce_id; u32 intr_summary; struct ath10k_ce_pipe *ce_state; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); if (ar->target_version == ATH10K_HW_WCN3990) intr_summary = 0xFFF; else - intr_summary = CE_INTERRUPT_SUMMARY(ar); + intr_summary = CE_INTERRUPT_SUMMARY(ar, ar_opaque); for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { if (intr_summary & (1 << ce_id)) @@ -853,7 +918,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) /* no intr pending on this CE */ continue; - ce_state = ((struct ath10k_ce_pipe *)ar->ce_states + ce_id); + ce_state = &ar_opaque->ce_states[ce_id]; if (ce_state->send_cb || ce_state->recv_cb) ath10k_ce_per_engine_service(ar, ce_id); } @@ -899,42 +964,47 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) void ath10k_ce_enable_interrupts(struct ath10k *ar) { + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ce_id; + struct ath10k_ce_pipe *ce_state; /* Skip the last copy engine, CE7 the diagnostic window, as that * uses polling and isn't initialized for interrupts. */ - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) - ath10k_ce_per_engine_handler_adjust( - ((struct ath10k_ce_pipe *)ar->ce_states + ce_id)); + for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { + ce_state = &ar_opaque->ce_states[ce_id]; + ath10k_ce_per_engine_handler_adjust(ce_state); + } } void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id) { u32 offset; u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); offset = HOST_IE_ADDRESS + ctrl_addr; - ar->bus_write32(ar, offset, 1); - ar->bus_read32(ar, offset); + ar_opaque->bus_ops->write32(ar, offset, 1); + ar_opaque->bus_ops->read32(ar, offset); } void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id) { u32 offset; u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); offset = HOST_IE_ADDRESS + ctrl_addr; - ar->bus_write32(ar, offset, 0); - ar->bus_read32(ar, offset); + ar_opaque->bus_ops->write32(ar, offset, 0); + ar_opaque->bus_ops->read32(ar, offset); } static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_ce_pipe *ce_state = - ((struct ath10k_ce_pipe *)ar->ce_states + ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -970,8 +1040,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_ce_pipe *ce_state = - ((struct ath10k_ce_pipe *)ar->ce_states + ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -1178,8 +1248,8 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, const struct ce_attr *attr) { - struct ath10k_ce_pipe *ce_state = - ((struct ath10k_ce_pipe *)ar->ce_states + ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; int ret; /* @@ -1235,8 +1305,8 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { - struct ath10k_ce_pipe *ce_state = - ((struct ath10k_ce_pipe *)ar->ce_states + ce_id); + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_opaque->ce_states[ce_id]; if (ce_state->src_ring) { kfree(ce_state->src_ring->shadow_base_unaligned); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 160a13e681df03f29f9bb553d85242d73aff40b6..f4fa86b93082db856f62d2da0eeffd892a3de78c 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -201,6 +201,24 @@ struct ce_attr; u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr); u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr); +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); +}; + +static inline struct bus_opaque *ath10k_bus_priv(struct ath10k *ar) +{ + return (struct bus_opaque *)ar->drv_priv; +} + +struct bus_opaque { + /* protects CE info */ + spinlock_t ce_lock; + const struct ath10k_bus_ops *bus_ops; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; +}; + /*==================Send====================*/ /* ath10k_ce_send flags */ @@ -692,9 +710,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 -#define CE_INTERRUPT_SUMMARY(ar) \ +#define CE_INTERRUPT_SUMMARY(ar, ar_opaque) \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ - ar->bus_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ + ar_opaque->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + \ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index bb2c5fb9a125c5095d6cf53b95bc62a6d60c1f7f..dc4cefb1177fb80df5a5b9e769aba39e4f034b8f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -923,10 +923,6 @@ struct ath10k { struct net_device napi_dev; struct napi_struct napi; - void (*bus_write32)(void *ar, u32 offset, u32 value); - u32 (*bus_read32)(void *ar, u32 offset); - spinlock_t ce_lock; /* lock for CE access */ - void *ce_states; struct fw_flag *fw_flags; /* set for bmi chip sets */ bool is_bmi; diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 861446a41066623e5e45ab40c3e5c26bed9d02e0..65723124985e0575f7af5fa151bb7ed6a003d3c8 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -74,9 +74,9 @@ struct ath10k_hif_ops { u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); - u32 (*read32)(void *ar, u32 address); + u32 (*read32)(struct ath10k *ar, u32 address); - void (*write32)(void *ar, u32 address, u32 value); + void (*write32)(struct ath10k *ar, u32 address, u32 value); /* Power up the device and enter BMI transfer mode for FW download */ int (*power_up)(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 072e008900e6a0509db9b49e8144a296f7d1d2c1..9e607b2fa2d4f0c60e9bac384cdd780c384aec9a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -669,18 +669,18 @@ static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) return val; } -inline void ath10k_pci_write32(void *ar, u32 offset, u32 value) +inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ar_pci->bus_ops->write32(ar, offset, value); + ar_pci->opaque_ctx.bus_ops->write32(ar, offset, value); } -inline u32 ath10k_pci_read32(void *ar, u32 offset) +inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - return ar_pci->bus_ops->read32(ar, offset); + return ar_pci->opaque_ctx.bus_ops->read32(ar, offset); } u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) @@ -780,9 +780,9 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); @@ -806,9 +806,9 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) if (!ce_pipe->dest_ring) return; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); @@ -886,7 +886,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); ce_diag = ar_pci->ce_diag; @@ -987,7 +987,7 @@ done: dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return ret; } @@ -1044,7 +1044,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); ce_diag = ar_pci->ce_diag; @@ -1148,7 +1148,7 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return ret; } @@ -1351,7 +1351,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, unsigned int write_index; int err, i = 0; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ar_pci->opaque_ctx.ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -1397,14 +1397,14 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (err) goto err; - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ar_pci->opaque_ctx.ce_lock); return err; } @@ -1990,7 +1990,7 @@ static int ath10k_bus_get_num_banks(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - return ar_pci->bus_ops->get_num_banks(ar); + return ar_pci->opaque_ctx.bus_ops->get_num_banks(ar); } int ath10k_pci_init_config(struct ath10k *ar) @@ -2165,7 +2165,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar) for (i = 0; i < CE_COUNT; i++) { pipe = &ar_pci->pipe_info[i]; - pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->ce_hdl = &ar_pci->opaque_ctx.ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; @@ -2792,6 +2792,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) { struct ath10k *ar = container_of(ctx, struct ath10k, napi); int done = 0; + struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); if (ath10k_pci_has_fw_crashed(ar)) { ath10k_pci_fw_crashed_clear(ar); @@ -2814,7 +2815,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) * interrupts safer to check for pending interrupts for * immediate servicing. */ - if (CE_INTERRUPT_SUMMARY(ar)) { + if (CE_INTERRUPT_SUMMARY(ar, ar_opaque)) { napi_reschedule(ctx); goto out; } @@ -3132,7 +3133,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ar_pci->opaque_ctx.ce_lock); spin_lock_init(&ar_pci->ps_lock); setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, @@ -3243,7 +3244,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; - ar_pci->bus_ops = &ath10k_pci_bus_ops; + ar_pci->opaque_ctx.bus_ops = &ath10k_pci_bus_ops; ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_hard_reset = pci_hard_reset; @@ -3252,14 +3253,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar->id.subsystem_vendor = pdev->subsystem_vendor; ar->id.subsystem_device = pdev->subsystem_device; - spin_lock_init(&ar_pci->ce_lock); spin_lock_init(&ar_pci->ps_lock); - - ar->bus_write32 = ath10k_pci_write32; - ar->bus_read32 = ath10k_pci_read32; - ar->ce_lock = ar_pci->ce_lock; - ar->ce_states = ar_pci->ce_states; - setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 06d0bd3993d376c5432f353db8dd6e26eca82ae8..22730c700af38fb7d1c03b9c46c9f1f558010c36 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -155,12 +155,6 @@ struct ath10k_pci_supp_chip { u32 rev_id; }; -struct ath10k_bus_ops { - u32 (*read32)(struct ath10k *ar, u32 offset); - void (*write32)(struct ath10k *ar, u32 offset, u32 value); - int (*get_num_banks)(struct ath10k *ar); -}; - enum ath10k_pci_irq_mode { ATH10K_PCI_IRQ_AUTO = 0, ATH10K_PCI_IRQ_LEGACY = 1, @@ -168,6 +162,7 @@ enum ath10k_pci_irq_mode { }; struct ath10k_pci { + struct bus_opaque opaque_ctx; struct pci_dev *pdev; struct device *dev; struct ath10k *ar; @@ -182,11 +177,6 @@ struct ath10k_pci { /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; - /* FIXME: document what this really protects */ - spinlock_t ce_lock; - - /* Map CE id to ce_state */ - struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; struct timer_list rx_post_retry; /* Due to HW quirks it is recommended to disable ASPM during device @@ -230,8 +220,6 @@ struct ath10k_pci { */ bool pci_ps; - const struct ath10k_bus_ops *bus_ops; - /* Chip specific pci reset routine used to do a safe reset */ int (*pci_soft_reset)(struct ath10k *ar); @@ -263,11 +251,11 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 -void ath10k_pci_write32(void *ar, u32 offset, u32 value); +void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value); void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val); void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val); -u32 ath10k_pci_read32(void *ar, u32 offset); +u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 6c8797d5e5fcf179ce2f57af26a341eed7efdbca..081e44b3277a96215432142ce63c8be12175c8a4 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -413,9 +413,9 @@ static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { { 11, WCN3990_DST_WR_INDEX_OFFSET}, }; -void ath10k_snoc_write32(void *ar, u32 offset, u32 value) +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) { - struct ath10k_snoc *ar_snoc = ath10k_snoc_priv((struct ath10k *)ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); if (!ar_snoc) return; @@ -423,9 +423,9 @@ void ath10k_snoc_write32(void *ar, u32 offset, u32 value) iowrite32(value, ar_snoc->mem + offset); } -u32 ath10k_snoc_read32(void *ar, u32 offset) +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) { - struct ath10k_snoc *ar_snoc = ath10k_snoc_priv((struct ath10k *)ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); u32 val; if (!ar_snoc) @@ -462,9 +462,9 @@ static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; - spin_lock_bh(&ar_snoc->ce_lock); + spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); - spin_unlock_bh(&ar_snoc->ce_lock); + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); @@ -488,9 +488,9 @@ static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe) if (!ce_pipe->dest_ring) return; - spin_lock_bh(&ar_snoc->ce_lock); + spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); - spin_unlock_bh(&ar_snoc->ce_lock); + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); while (num--) { ret = __ath10k_snoc_rx_post_buf(pipe); if (ret) { @@ -638,7 +638,7 @@ static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, snoc_pipe = &ar_snoc->pipe_info[pipe_id]; ce_pipe = snoc_pipe->ce_hdl; src_ring = ce_pipe->src_ring; - spin_lock_bh(&ar_snoc->ce_lock); + spin_lock_bh(&ar_snoc->opaque_ctx.ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -678,14 +678,14 @@ static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (err) goto err; - spin_unlock_bh(&ar_snoc->ce_lock); + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); - spin_unlock_bh(&ar_snoc->ce_lock); + spin_unlock_bh(&ar_snoc->opaque_ctx.ce_lock); return err; } @@ -882,7 +882,7 @@ static int ath10k_snoc_alloc_pipes(struct ath10k *ar) for (i = 0; i < CE_COUNT; i++) { pipe = &ar_snoc->pipe_info[i]; - pipe->ce_hdl = &ar_snoc->ce_states[i]; + pipe->ce_hdl = &ar_snoc->opaque_ctx.ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; @@ -1184,6 +1184,11 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .write32 = ath10k_snoc_write32, }; +static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, +}; + static int ath10k_snoc_probe(struct platform_device *pdev) { int ret; @@ -1210,11 +1215,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ar); ar_snoc->ar = ar; - spin_lock_init(&ar_snoc->ce_lock); - ar->bus_write32 = ath10k_snoc_write32; - ar->bus_read32 = ath10k_snoc_read32; - ar->ce_lock = ar_snoc->ce_lock; - ar->ce_states = ar_snoc->ce_states; + spin_lock_init(&ar_snoc->opaque_ctx.ce_lock); + ar_snoc->opaque_ctx.bus_ops = &ath10k_snoc_bus_ops; ath10k_snoc_resource_init(ar); ar->target_version = ATH10K_HW_WCN3990; diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index 1754a3e91a0030b3f12e3e1e19c30f71825d2db4..0a5f5bff37ec2b86681998ed7961574e18766773 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -103,6 +103,7 @@ struct ath10k_target_info { * @is_driver_probed: flag to indicate driver state */ struct ath10k_snoc { + struct bus_opaque opaque_ctx; struct platform_device *dev; struct ath10k *ar; void __iomem *mem; @@ -110,9 +111,6 @@ struct ath10k_snoc { struct ath10k_target_info target_info; size_t mem_len; struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX]; - /* protects CE info */ - spinlock_t ce_lock; - struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; struct timer_list rx_post_retry; u32 ce_irqs[CE_COUNT_MAX]; u32 *vaddr_rri_on_ddr; @@ -191,10 +189,10 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) return (struct ath10k_snoc *)ar->drv_priv; } -void ath10k_snoc_write32(void *ar, u32 offset, u32 value); +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); void ath10k_snoc_soc_write32(struct ath10k *ar, u32 addr, u32 val); void ath10k_snoc_reg_write32(struct ath10k *ar, u32 addr, u32 val); -u32 ath10k_snoc_read32(void *ar, u32 offset); +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); u32 ath10k_snoc_soc_read32(struct ath10k *ar, u32 addr); u32 ath10k_snoc_reg_read32(struct ath10k *ar, u32 addr); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 8b4561e8ce1aacf0ff2523cad1069d67bf2ca15f..ef493271c7129307ecb3de7acebbd8a8d8f760bf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4176,7 +4176,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah)) ar9003_hw_internal_regulator_apply(ah); ar9003_hw_apply_tuning_caps(ah); - ar9003_hw_apply_minccapwr_thresh(ah, chan); + ar9003_hw_apply_minccapwr_thresh(ah, is2ghz); ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); ar9003_hw_thermometer_apply(ah); ar9003_hw_thermo_cal_apply(ah); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 1bdeacf7b25754fbe72a29009e5ea03dac58ce29..bc70ce62bc03ce5e82844afc11ad268426a0cd3a 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -869,8 +869,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE); - hw->wiphy->iface_combinations = if_comb; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + hw->wiphy->iface_combinations = if_comb; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 5cc0ddc254ebe0c9bd816069e01027f2313d27b6..7be31f27266ce1e31c2e0d60dca4e544da82eaeb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1550,13 +1550,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; - if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_ASSOC) { + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { ret = ath9k_sta_add(hw, vif, sta); ath_dbg(common, CONFIG, "Add station: %pM\n", sta->addr); - } else if (old_state == IEEE80211_STA_ASSOC && - new_state == IEEE80211_STA_AUTH) { + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) { ret = ath9k_sta_remove(hw, vif, sta); ath_dbg(common, CONFIG, "Remove station: %pM\n", sta->addr); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 75ae474367f9c3ea1edd79867fa56513a1ee0112..fe6d5ab7f95b3147e6044271734f7bf1b21b6fb7 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -705,9 +705,6 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); - /* Enable fix for PCIe HW bug, set "No snoop" for RX transactions */ - wil_s(wil, RGF_DMA_PEDI_DIF, BIT_DMA_WR_CMD_ATTR_NO_SNOOP); - wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 410a6645d316ec1c2cf5d09b54847e918dde0045..59cef6c69fe86a99623b8dc490d7c38c34bb581f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -726,8 +726,10 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, return -ENOMEM; err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, glom_skb); - if (err) + if (err) { + brcmu_pkt_buf_free_skb(glom_skb); goto done; + } skb_queue_walk(pktq, skb) { memcpy(skb->data, glom_skb->data, skb->len); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index deb5f78dcacc0d9ef188360d300a7b4f1bcfba19..70a6985334d5b518f517848b5f969cec80b3c5f2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -2408,7 +2408,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) WL_BSS_INFO_MAX); if (err) { brcmf_err("Failed to get bss info (%d)\n", err); - return; + goto out_kfree; } si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period); @@ -2420,6 +2420,9 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; + +out_kfree: + kfree(buf); } static s32 @@ -4099,7 +4102,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, (u8 *)&settings->beacon.head[ie_offset], settings->beacon.head_len - ie_offset, WLAN_EID_SSID); - if (!ssid_ie) + if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN) return -EINVAL; memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 796f5f9d5d5a45bc621b0fd7e880a112aee1067a..b7df576bb84d9bfe5f4134bad4323ab48b8c9ed8 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub) pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, DMA_FROM_DEVICE); - if (dma_mapping_error(di->dmadev, pa)) + if (dma_mapping_error(di->dmadev, pa)) { + brcmu_pkt_buf_free_skb(p); return false; + } /* save the free packet pointer */ di->rxp[rxout] = p; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c index dd9162722495333df13296c80d89c20ea9058d74..0ab865de14918cfd8939133eabf790d765ff5b07 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c @@ -87,7 +87,7 @@ void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel, u16 chanspec) { - struct tx_power power; + struct tx_power power = { }; u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id; /* Clear previous settings */ diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c index 93bdf684babe36392daa84eca81e7d52a529d8a5..ae047ab7a4dfa225908430a14255e2285d0dd888 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/iwlegacy/3945.c @@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il) int txq_id; /* Tx queues */ - if (il->txq) + if (il->txq) { for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) if (txq_id == IL39_CMD_QUEUE_NUM) il_cmd_queue_free(il); else il_tx_queue_free(il, txq_id); + } /* free tx queue structure */ il_free_txq_mem(il); diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c index 20e6aa9107009ccc33246190534cbc7674c54d03..c148085742a00caf29fcb0c007d9521c6c96ba3c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/iwlwifi/dvm/calib.c @@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, /* bound gain by 2 bits value max, 3rd bit is sign */ data->delta_gain_code[i] = min(abs(delta_g), - (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); if (delta_g < 0) /* diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 610c442c7ab2f8194f4c4777c44070eacab322a4..9584f950fd2f819a5add840fb74a33388ef83ee8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -935,7 +935,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) } mvm->fw_dbg_conf = conf_id; - return ret; + + return 0; } static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index f96ab2f4b90ebc2b3522764705f30fd816413c1b..ce12717e656ad771350c8b50a5e820e9d4da63a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -3992,8 +3992,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, if (idx != 0) return -ENOENT; - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); @@ -4039,8 +4039,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return; /* if beacon filtering isn't on mac80211 does it anyway */ diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index b0f59fdd287c787cbb0f65ca319aebfadb850d31..d7d72adb6343d4cdddf61fbc40d000a4a075e069 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c @@ -215,7 +215,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { - .state = cpu_to_le32(SF_FULL_ON), + .state = cpu_to_le32(new_state), }; struct ieee80211_sta *sta; int ret = 0; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index d58c094f2f044a67f7a4bb2d0fab0b3784ad7131..f7e6a09926dd4a939c87af63fb2cf08cbbc0baf8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -475,48 +475,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = { MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #ifdef CONFIG_ACPI -#define SPL_METHOD "SPLC" -#define SPL_DOMAINTYPE_MODULE BIT(0) -#define SPL_DOMAINTYPE_WIFI BIT(1) -#define SPL_DOMAINTYPE_WIGIG BIT(2) -#define SPL_DOMAINTYPE_RFEM BIT(3) +#define ACPI_SPLC_METHOD "SPLC" +#define ACPI_SPLC_DOMAIN_WIFI (0x07) -static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) +static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc) { - union acpi_object *limits, *domain_type, *power_limit; - - if (splx->type != ACPI_TYPE_PACKAGE || - splx->package.count != 2 || - splx->package.elements[0].type != ACPI_TYPE_INTEGER || - splx->package.elements[0].integer.value != 0) { - IWL_ERR(trans, "Unsupported splx structure\n"); + union acpi_object *data_pkg, *dflt_pwr_limit; + int i; + + /* We need at least two elements, one for the revision and one + * for the data itself. Also check that the revision is + * supported (currently only revision 0). + */ + if (splc->type != ACPI_TYPE_PACKAGE || + splc->package.count < 2 || + splc->package.elements[0].type != ACPI_TYPE_INTEGER || + splc->package.elements[0].integer.value != 0) { + IWL_DEBUG_INFO(trans, + "Unsupported structure returned by the SPLC method. Ignoring.\n"); return 0; } - limits = &splx->package.elements[1]; - if (limits->type != ACPI_TYPE_PACKAGE || - limits->package.count < 2 || - limits->package.elements[0].type != ACPI_TYPE_INTEGER || - limits->package.elements[1].type != ACPI_TYPE_INTEGER) { - IWL_ERR(trans, "Invalid limits element\n"); - return 0; + /* loop through all the packages to find the one for WiFi */ + for (i = 1; i < splc->package.count; i++) { + union acpi_object *domain; + + data_pkg = &splc->package.elements[i]; + + /* Skip anything that is not a package with the right + * amount of elements (i.e. at least 2 integers). + */ + if (data_pkg->type != ACPI_TYPE_PACKAGE || + data_pkg->package.count < 2 || + data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || + data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + continue; + + domain = &data_pkg->package.elements[0]; + if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI) + break; + + data_pkg = NULL; } - domain_type = &limits->package.elements[0]; - power_limit = &limits->package.elements[1]; - if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { - IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); + if (!data_pkg) { + IWL_DEBUG_INFO(trans, + "No element for the WiFi domain returned by the SPLC method.\n"); return 0; } - return power_limit->integer.value; + dflt_pwr_limit = &data_pkg->package.elements[1]; + return dflt_pwr_limit->integer.value; } static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) { acpi_handle pxsx_handle; acpi_handle handle; - struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; pxsx_handle = ACPI_HANDLE(&pdev->dev); @@ -527,23 +543,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) } /* Get the method's handle */ - status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); + status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD, + &handle); if (ACPI_FAILURE(status)) { - IWL_DEBUG_INFO(trans, "SPL method not found\n"); + IWL_DEBUG_INFO(trans, "SPLC method not found\n"); return; } /* Call SPLC with no arguments */ - status = acpi_evaluate_object(handle, NULL, NULL, &splx); + status = acpi_evaluate_object(handle, NULL, NULL, &splc); if (ACPI_FAILURE(status)) { IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); return; } - trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); + trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer); IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", trans->dflt_pwr_limit); - kfree(splx.pointer); + kfree(splc.pointer); } #else /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index a8c8a4a7420b53d02a798bcc261f408f77440ace..8dfe6b2bc703186080ccb510a03b40135346053c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1508,9 +1508,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* start the TFD with the scratchbuf */ scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); - memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); + memcpy(&txq->scratchbufs[idx], &out_cmd->hdr, scratch_size); iwl_pcie_txq_build_tfd(trans, txq, - iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), + iwl_pcie_get_scratchbuf_dma(txq, idx), scratch_size, true); /* map first command fragment, if any remains */ diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 4073116e6e9fa8915c9fd4d35d932ccd5f3f2a08..c3331d6201c364e45101c0de2c19cc051813af44 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2144,8 +2144,9 @@ done: is_scanning_required = 1; } else { mwifiex_dbg(priv->adapter, MSG, - "info: trying to associate to '%s' bssid %pM\n", - (char *)req_ssid.ssid, bss->bssid); + "info: trying to associate to '%.*s' bssid %pM\n", + req_ssid.ssid_len, (char *)req_ssid.ssid, + bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; } @@ -2202,8 +2203,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(adapter, INFO, - "info: Trying to associate to %s and bssid %pM\n", - (char *)sme->ssid, sme->bssid); + "info: Trying to associate to %.*s and bssid %pM\n", + (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); @@ -2333,8 +2334,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(priv->adapter, MSG, - "info: trying to join to %s and bssid %pM\n", - (char *)params->ssid, params->bssid); + "info: trying to join to %.*s and bssid %pM\n", + params->ssid_len, (char *)params->ssid, params->bssid); mwifiex_set_ibss_params(priv, params); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 3cda1f956f0b1654ec438291e22f416ada279e17..6378dfd3b4e864da8b347d4e308defe989129db9 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -661,9 +661,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN, sizeof(priv->assoc_rsp_buf)); - memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size); - assoc_rsp->a_id = cpu_to_le16(aid); + memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size); if (status_code) { priv->adapter->dbg.num_cmd_assoc_failure++; diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 5be34118e0af22b69392f5caf07f9eef5672c7fe..f67e7e5b13e192189c3f649a3aceb9ac50d6d57f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( return &rtl_regdom_no_midband; case COUNTRY_CODE_IC: return &rtl_regdom_11; - case COUNTRY_CODE_ETSI: case COUNTRY_CODE_TELEC_NETGEAR: return &rtl_regdom_60_64; + case COUNTRY_CODE_ETSI: case COUNTRY_CODE_SPAIN: case COUNTRY_CODE_FRANCE: case COUNTRY_CODE_ISRAEL: @@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan) return COUNTRY_CODE_WORLD_WIDE_13; case 0x22: return COUNTRY_CODE_IC; + case 0x25: + return COUNTRY_CODE_ETSI; case 0x32: return COUNTRY_CODE_TELEC_NETGEAR; case 0x41: diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index ccb07a1b153db27f5cadb57d1152d3fdd4b4e8b8..23e53780728beb766b77ddb4313722fd4b13753c 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -352,7 +352,7 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; - u8 conn_id; + int conn_id; int r = 0; if (info->otp_version >= info->otp_patch_version) @@ -423,7 +423,7 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; - u8 conn_id; + int conn_id; int r = 0; if (info->ram_version >= info->ram_patch_version) diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 83deda4bb4d6d52d4b529b71712f71114aac1445..6f9563a9648852e48929a39b29982a7bbef52c0e 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c @@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy) return -ENOMEM; bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + if (bytes_recv < 0 || bytes_recv < if_version_length) { pr_err("Could not read IF version\n"); r = -EIO; goto err; diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index cffd4d31d3c1b17cdf70574f2d4e32d89926b259..22528d051845b586846efb7996b6c860c9e17bd6 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -127,8 +127,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, } /* Need adjust the alignment to satisfy the CMA requirement */ - if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) - align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); + if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) { + unsigned long order = + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); + + align = max(align, (phys_addr_t)PAGE_SIZE << order); + } prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); if (prop) { diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 182224acedbe79c18dbc357cb45763afef40e387..58f1419a68aef0b7fd1e7eef352400a875f7ad94 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -283,20 +283,6 @@ out: return 0; } -static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) -{ - while (1) { - if (!pci_is_pcie(dev)) - break; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - return dev; - if (!dev->bus->self) - break; - dev = dev->bus->self; - } - return NULL; -} - static int find_aer_device_iter(struct device *device, void *data) { struct pcie_device **result = data; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 9757cf9037a2b0e9e917ac454b0f107e0e3b3b04..b5843c255263df2d04c12f36e819b60bab47369f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1415,6 +1415,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) dev_warn(&dev->dev, "PCI-X settings not supported\n"); } +static bool pcie_root_rcb_set(struct pci_dev *dev) +{ + struct pci_dev *rp = pcie_find_root_port(dev); + u16 lnkctl; + + if (!rp) + return false; + + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_RCB) + return true; + + return false; +} + static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) { int pos; @@ -1444,9 +1459,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (pcie_cap_has_lnkctl(dev)) + if (pcie_cap_has_lnkctl(dev)) { + + /* + * If the Root Port supports Read Completion Boundary of + * 128, set RCB to 128. Otherwise, clear it. + */ + hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; + hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; + if (pcie_root_rcb_set(dev)) + hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); + } /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 42774bc39786c9785a7f4dd3e8055266d2001f9c..254192b5dad1a7afece5731806e85e844398827d 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3136,6 +3136,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); static void quirk_no_pm_reset(struct pci_dev *dev) { diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 13b79438af1cfa6f851c9944b890a2cc537879d8..4c75b4d392c662cf17afb99ea3f4fc2718dc03b9 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -972,6 +972,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) if (i > 0 && spi != using_spi) { pr_err("PPI/SPI IRQ type mismatch for %s!\n", dn->name); + of_node_put(dn); kfree(irqs); return -EINVAL; } diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 4e377599d26622f25025119849ed7b76b78be594..a009ae34c5ef7f37c7131ec25e7d32abeb91a7f4 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1564,12 +1564,15 @@ static int chv_pinctrl_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int chv_pinctrl_suspend(struct device *dev) +static int chv_pinctrl_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&chv_lock, flags); + pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); for (i = 0; i < pctrl->community->npins; i++) { @@ -1590,15 +1593,20 @@ static int chv_pinctrl_suspend(struct device *dev) ctx->padctrl1 = readl(reg); } + raw_spin_unlock_irqrestore(&chv_lock, flags); + return 0; } -static int chv_pinctrl_resume(struct device *dev) +static int chv_pinctrl_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&chv_lock, flags); + /* * Mask all interrupts before restoring per-pin configuration * registers because we don't know in which state BIOS left them @@ -1643,12 +1651,15 @@ static int chv_pinctrl_resume(struct device *dev) chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); + raw_spin_unlock_irqrestore(&chv_lock, flags); + return 0; } #endif static const struct dev_pm_ops chv_pinctrl_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq, + chv_pinctrl_resume_noirq) }; static const struct acpi_device_id chv_pinctrl_acpi_match[] = { diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index b3235fd2950c6624a12477e452331e41dd95e92c..271cca63e9bd237f96a878f4dfe7c69b3f07f3aa 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -1002,7 +1002,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) atmel_pioctrl->irqs[i] = res->start; irq_set_chained_handler(res->start, atmel_gpio_irq_handler); irq_set_handler_data(res->start, atmel_pioctrl); - dev_dbg(dev, "bank %i: hwirq=%u\n", i, res->start); + dev_dbg(dev, "bank %i: irq=%pr\n", i, res); } atmel_pioctrl->irq_domain = irq_domain_add_linear(dev->of_node, diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 6b1a47f8c0969208406d1dcc662f21389a0a2027..98a459b1c095a68fe526ff04309e9138bfe7aa34 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = { PADS_FUNCTION_SELECT2, 12, 0x3), MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, PADS_FUNCTION_SELECT2, 14, 0x3), - MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, + MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, PADS_FUNCTION_SELECT2, 16, 0x3), - MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, + MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, PADS_FUNCTION_SELECT2, 18, 0x3), - MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, + MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, PADS_FUNCTION_SELECT2, 20, 0x3), - MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG, + MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG, PADS_FUNCTION_SELECT2, 22, 0x3), - MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG, + MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG, PADS_FUNCTION_SELECT2, 24, 0x3), - MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5, + MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5, PADS_FUNCTION_SELECT2, 26, 0x3), PIN_GROUP(TCK, "tck"), PIN_GROUP(TRSTN, "trstn"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c index 55083d278bb1eb36e38772fc36206f41a1460138..51fbf85301be61277c1410a309d5515655a20288 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c @@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ + SUNXI_FUNCTION(0x2, "uart1"), /* RTS */ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ + SUNXI_FUNCTION(0x2, "uart1"), /* CTS */ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c index 8b381d69df8632c806f45bde5a5d1e6645b35d40..584cdedea7a42eb213d23397fc68ee909447b710 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c @@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ + SUNXI_FUNCTION(0x2, "uart1"), /* RTS */ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ + SUNXI_FUNCTION(0x2, "uart1"), /* CTS */ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c index 589872cc8adbbd93bfd94a4b32c58f461b12b890..a19c29c79b0adc1756888bcd0751111aaee522be 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c @@ -73,6 +73,12 @@ static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev, case UNIPHIER_PIN_PULL_DOWN: pull_dir = "DOWN"; break; + case UNIPHIER_PIN_PULL_UP_FIXED: + pull_dir = "UP(FIXED)"; + break; + case UNIPHIER_PIN_PULL_DOWN_FIXED: + pull_dir = "DOWN(FIXED)"; + break; case UNIPHIER_PIN_PULL_NONE: pull_dir = "NONE"; break; diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile index d3487125838cdce457c2f7acefdd9052b92cbde1..277a820ee4e11c2477060eb7af79936a2a5a4f15 100644 --- a/drivers/platform/goldfish/Makefile +++ b/drivers/platform/goldfish/Makefile @@ -2,4 +2,5 @@ # Makefile for Goldfish platform specific drivers # obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o -obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o +obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe_all.o +goldfish_pipe_all-objs := goldfish_pipe.o goldfish_pipe_v2.o diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 3215a33cf4fe516e7af76215945ecb3be5ccfcd8..fd1452e283522d0e79943e9fa85db57351a32221 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -15,51 +15,11 @@ * */ -/* This source file contains the implementation of a special device driver - * that intends to provide a *very* fast communication channel between the - * guest system and the QEMU emulator. - * - * Usage from the guest is simply the following (error handling simplified): - * - * int fd = open("/dev/qemu_pipe",O_RDWR); - * .... write() or read() through the pipe. - * - * This driver doesn't deal with the exact protocol used during the session. - * It is intended to be as simple as something like: - * - * // do this _just_ after opening the fd to connect to a specific - * // emulator service. - * const char* msg = ""; - * if (write(fd, msg, strlen(msg)+1) < 0) { - * ... could not connect to service - * close(fd); - * } - * - * // after this, simply read() and write() to communicate with the - * // service. Exact protocol details left as an exercise to the reader. - * - * This driver is very fast because it doesn't copy any data through - * intermediate buffers, since the emulator is capable of translating - * guest user addresses into host ones. - * - * Note that we must however ensure that each user page involved in the - * exchange is properly mapped during a transfer. +/* This source file contains the implementation of the legacy version of + * a goldfish pipe device driver. See goldfish_pipe_v2.c for the current + * version. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "goldfish_pipe.h" /* * IMPORTANT: The following constants must match the ones used and defined @@ -109,29 +69,15 @@ #define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */ #define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */ -struct access_params { - unsigned long channel; - u32 size; - unsigned long address; - u32 cmd; - u32 result; - /* reserved for future extension */ - u32 flags; -}; +#define MAX_PAGES_TO_GRAB 32 -/* The global driver data. Holds a reference to the i/o page used to - * communicate with the emulator, and a wake queue for blocked tasks - * waiting to be awoken. - */ -struct goldfish_pipe_dev { - spinlock_t lock; - unsigned char __iomem *base; - struct access_params *aps; - int irq; - u32 version; -}; +#define DEBUG 0 -static struct goldfish_pipe_dev pipe_dev[1]; +#if DEBUG +#define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); } +#else +#define DPRINT(...) +#endif /* This data type models a given pipe instance */ struct goldfish_pipe { @@ -141,6 +87,15 @@ struct goldfish_pipe { wait_queue_head_t wake_queue; }; +struct access_params { + unsigned long channel; + u32 size; + unsigned long address; + u32 cmd; + u32 result; + /* reserved for future extension */ + u32 flags; +}; /* Bit flags for the 'flags' field */ enum { @@ -231,8 +186,10 @@ static int setup_access_params_addr(struct platform_device *pdev, if (valid_batchbuffer_addr(dev, aps)) { dev->aps = aps; return 0; - } else + } else { + devm_kfree(&pdev->dev, aps); return -1; + } } /* A value that will not be set by qemu emulator */ @@ -269,6 +226,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, struct goldfish_pipe *pipe = filp->private_data; struct goldfish_pipe_dev *dev = pipe->dev; unsigned long address, address_end; + struct page* pages[MAX_PAGES_TO_GRAB] = {}; int count = 0, ret = -EINVAL; /* If the emulator already closed the pipe, no need to go further */ @@ -292,45 +250,62 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, address_end = address + bufflen; while (address < address_end) { - unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE; - unsigned long next = page_end < address_end ? page_end - : address_end; - unsigned long avail = next - address; - int status, wakeBit; - - struct page *page; - - /* Either vaddr or paddr depending on the device version */ - unsigned long xaddr; + unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE; + unsigned long next, avail; + int status, wakeBit, page_i, num_contiguous_pages; + long first_page, last_page, requested_pages; + unsigned long xaddr, xaddr_prev, xaddr_i; /* - * We grab the pages on a page-by-page basis in case user - * space gives us a potentially huge buffer but the read only - * returns a small amount, then there's no need to pin that - * much memory to the process. + * Attempt to grab multiple physically contiguous pages. */ - down_read(¤t->mm->mmap_sem); - ret = get_user_pages(current, current->mm, address, 1, - !is_write, 0, &page, NULL); - up_read(¤t->mm->mmap_sem); - if (ret < 0) + first_page = address & PAGE_MASK; + last_page = (address_end - 1) & PAGE_MASK; + requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; + if (requested_pages > MAX_PAGES_TO_GRAB) { + requested_pages = MAX_PAGES_TO_GRAB; + } + ret = get_user_pages_fast(first_page, requested_pages, + !is_write, pages); + + DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__, + ret, requested_pages, first_page); + if (ret == 0) { + DPRINT("%s: error: (requested pages == 0) (wanted %d)\n", + __FUNCTION__, requested_pages); + mutex_unlock(&pipe->lock); return ret; + } + if (ret < 0) { + DPRINT("%s: (requested pages < 0) %d \n", + __FUNCTION__, requested_pages); + mutex_unlock(&pipe->lock); + return ret; + } - if (dev->version) { - /* Device version 1 or newer (qemu-android) expects the - * physical address. */ - xaddr = page_to_phys(page) | (address & ~PAGE_MASK); - } else { - /* Device version 0 (classic emulator) expects the - * virtual address. */ - xaddr = address; + xaddr = page_to_phys(pages[0]) | (address & ~PAGE_MASK); + xaddr_prev = xaddr; + num_contiguous_pages = ret == 0 ? 0 : 1; + for (page_i = 1; page_i < ret; page_i++) { + xaddr_i = page_to_phys(pages[page_i]) | (address & ~PAGE_MASK); + if (xaddr_i == xaddr_prev + PAGE_SIZE) { + page_end += PAGE_SIZE; + xaddr_prev = xaddr_i; + num_contiguous_pages++; + } else { + DPRINT("%s: discontinuous page boundary: %d pages instead\n", + __FUNCTION__, page_i); + break; + } } + next = page_end < address_end ? page_end : address_end; + avail = next - address; /* Now, try to transfer the bytes in the current page */ spin_lock_irqsave(&dev->lock, irq_flags); if (access_with_param(dev, - is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER, - xaddr, avail, pipe, &status)) { + is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER, + xaddr, avail, pipe, &status)) { gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL, dev->base + PIPE_REG_CHANNEL_HIGH); writel(avail, dev->base + PIPE_REG_SIZE); @@ -343,9 +318,13 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, } spin_unlock_irqrestore(&dev->lock, irq_flags); - if (status > 0 && !is_write) - set_page_dirty(page); - put_page(page); + for (page_i = 0; page_i < ret; page_i++) { + if (status > 0 && !is_write && + page_i < num_contiguous_pages) { + set_page_dirty(pages[page_i]); + } + put_page(pages[page_i]); + } if (status > 0) { /* Correct transfer */ count += status; @@ -367,7 +346,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, */ if (status != PIPE_ERROR_AGAIN) pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n", - status, is_write ? "write" : "read"); + status, is_write ? "write" : "read"); ret = 0; break; } @@ -377,7 +356,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, * non-blocking mode, just return the error code. */ if (status != PIPE_ERROR_AGAIN || - (filp->f_flags & O_NONBLOCK) != 0) { + (filp->f_flags & O_NONBLOCK) != 0) { ret = goldfish_pipe_error_convert(status); break; } @@ -391,7 +370,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, /* Tell the emulator we're going to wait for a wake event */ goldfish_cmd(pipe, - is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ); + is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ); /* Unlock the pipe, then wait for the wake signal */ mutex_unlock(&pipe->lock); @@ -399,22 +378,16 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, while (test_bit(wakeBit, &pipe->flags)) { if (wait_event_interruptible( pipe->wake_queue, - !test_bit(wakeBit, &pipe->flags))) { - ret = -ERESTARTSYS; - break; - } + !test_bit(wakeBit, &pipe->flags))) + return -ERESTARTSYS; - if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) { - ret = -EIO; - break; - } + if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) + return -EIO; } /* Try to re-acquire the lock */ - if (mutex_lock_interruptible(&pipe->lock)) { - ret = -ERESTARTSYS; - break; - } + if (mutex_lock_interruptible(&pipe->lock)) + return -ERESTARTSYS; } mutex_unlock(&pipe->lock); @@ -543,6 +516,8 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) pipe->dev = dev; mutex_init(&pipe->lock); + DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file); + // spin lock init, write head of list, i guess init_waitqueue_head(&pipe->wake_queue); /* @@ -565,6 +540,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp) { struct goldfish_pipe *pipe = filp->private_data; + DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp); /* The guest is closing the channel, so tell the emulator right now */ goldfish_cmd(pipe, CMD_CLOSE); kfree(pipe); @@ -581,96 +557,33 @@ static const struct file_operations goldfish_pipe_fops = { .release = goldfish_pipe_release, }; -static struct miscdevice goldfish_pipe_device = { +static struct miscdevice goldfish_pipe_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "goldfish_pipe", .fops = &goldfish_pipe_fops, }; -static int goldfish_pipe_probe(struct platform_device *pdev) +int goldfish_pipe_device_init_v1(struct platform_device *pdev) { - int err; - struct resource *r; struct goldfish_pipe_dev *dev = pipe_dev; - - /* not thread safe, but this should not happen */ - WARN_ON(dev->base != NULL); - - spin_lock_init(&dev->lock); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL || resource_size(r) < PAGE_SIZE) { - dev_err(&pdev->dev, "can't allocate i/o page\n"); - return -EINVAL; - } - dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); - if (dev->base == NULL) { - dev_err(&pdev->dev, "ioremap failed\n"); - return -EINVAL; - } - - r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (r == NULL) { - err = -EINVAL; - goto error; - } - dev->irq = r->start; - - err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt, + int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt, IRQF_SHARED, "goldfish_pipe", dev); if (err) { - dev_err(&pdev->dev, "unable to allocate IRQ\n"); - goto error; + dev_err(&pdev->dev, "unable to allocate IRQ for v1\n"); + return err; } - err = misc_register(&goldfish_pipe_device); + err = misc_register(&goldfish_pipe_dev); if (err) { - dev_err(&pdev->dev, "unable to register device\n"); - goto error; + dev_err(&pdev->dev, "unable to register v1 device\n"); + return err; } - setup_access_params_addr(pdev, dev); - /* Although the pipe device in the classic Android emulator does not - * recognize the 'version' register, it won't treat this as an error - * either and will simply return 0, which is fine. */ - dev->version = readl(dev->base + PIPE_REG_VERSION); + setup_access_params_addr(pdev, dev); return 0; - -error: - dev->base = NULL; - return err; } -static int goldfish_pipe_remove(struct platform_device *pdev) +void goldfish_pipe_device_deinit_v1(struct platform_device *pdev) { - struct goldfish_pipe_dev *dev = pipe_dev; - misc_deregister(&goldfish_pipe_device); - dev->base = NULL; - return 0; + misc_deregister(&goldfish_pipe_dev); } - -static const struct acpi_device_id goldfish_pipe_acpi_match[] = { - { "GFSH0003", 0 }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); - -static const struct of_device_id goldfish_pipe_of_match[] = { - { .compatible = "generic,android-pipe", }, - {}, -}; -MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); - -static struct platform_driver goldfish_pipe = { - .probe = goldfish_pipe_probe, - .remove = goldfish_pipe_remove, - .driver = { - .name = "goldfish_pipe", - .of_match_table = goldfish_pipe_of_match, - .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), - } -}; - -module_platform_driver(goldfish_pipe); -MODULE_AUTHOR("David Turner "); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/goldfish/goldfish_pipe.h b/drivers/platform/goldfish/goldfish_pipe.h new file mode 100644 index 0000000000000000000000000000000000000000..9b75a51dba24e9bfcc63eadf8efa1e32439de266 --- /dev/null +++ b/drivers/platform/goldfish/goldfish_pipe.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef GOLDFISH_PIPE_H +#define GOLDFISH_PIPE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Initialize the legacy version of the pipe device driver */ +int goldfish_pipe_device_init_v1(struct platform_device *pdev); + +/* Deinitialize the legacy version of the pipe device driver */ +void goldfish_pipe_device_deinit_v1(struct platform_device *pdev); + +/* Forward declarations for the device struct */ +struct goldfish_pipe; +struct goldfish_pipe_device_buffers; + +/* The global driver data. Holds a reference to the i/o page used to + * communicate with the emulator, and a wake queue for blocked tasks + * waiting to be awoken. + */ +struct goldfish_pipe_dev { + /* + * Global device spinlock. Protects the following members: + * - pipes, pipes_capacity + * - [*pipes, *pipes + pipes_capacity) - array data + * - first_signalled_pipe, + * goldfish_pipe::prev_signalled, + * goldfish_pipe::next_signalled, + * goldfish_pipe::signalled_flags - all singnalled-related fields, + * in all allocated pipes + * - open_command_params - PIPE_CMD_OPEN-related buffers + * + * It looks like a lot of different fields, but the trick is that the only + * operation that happens often is the signalled pipes array manipulation. + * That's why it's OK for now to keep the rest of the fields under the same + * lock. If we notice too much contention because of PIPE_CMD_OPEN, + * then we should add a separate lock there. + */ + spinlock_t lock; + + /* + * Array of the pipes of |pipes_capacity| elements, + * indexed by goldfish_pipe::id + */ + struct goldfish_pipe **pipes; + u32 pipes_capacity; + + /* Pointers to the buffers host uses for interaction with this driver */ + struct goldfish_pipe_dev_buffers *buffers; + + /* Head of a doubly linked list of signalled pipes */ + struct goldfish_pipe *first_signalled_pipe; + + /* Some device-specific data */ + int irq; + int version; + unsigned char __iomem *base; + + /* v1-specific access parameters */ + struct access_params *aps; +}; + +extern struct goldfish_pipe_dev pipe_dev[1]; + +#endif /* GOLDFISH_PIPE_H */ diff --git a/drivers/platform/goldfish/goldfish_pipe_v2.c b/drivers/platform/goldfish/goldfish_pipe_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..ad373ed36555233215349eb804dd344a4020290a --- /dev/null +++ b/drivers/platform/goldfish/goldfish_pipe_v2.c @@ -0,0 +1,889 @@ +/* + * Copyright (C) 2012 Intel, Inc. + * Copyright (C) 2013 Intel, Inc. + * Copyright (C) 2014 Linaro Limited + * Copyright (C) 2011-2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* This source file contains the implementation of a special device driver + * that intends to provide a *very* fast communication channel between the + * guest system and the QEMU emulator. + * + * Usage from the guest is simply the following (error handling simplified): + * + * int fd = open("/dev/qemu_pipe",O_RDWR); + * .... write() or read() through the pipe. + * + * This driver doesn't deal with the exact protocol used during the session. + * It is intended to be as simple as something like: + * + * // do this _just_ after opening the fd to connect to a specific + * // emulator service. + * const char* msg = ""; + * if (write(fd, msg, strlen(msg)+1) < 0) { + * ... could not connect to service + * close(fd); + * } + * + * // after this, simply read() and write() to communicate with the + * // service. Exact protocol details left as an exercise to the reader. + * + * This driver is very fast because it doesn't copy any data through + * intermediate buffers, since the emulator is capable of translating + * guest user addresses into host ones. + * + * Note that we must however ensure that each user page involved in the + * exchange is properly mapped during a transfer. + */ + +#include "goldfish_pipe.h" + + +/* + * Update this when something changes in the driver's behavior so the host + * can benefit from knowing it + */ +enum { + PIPE_DRIVER_VERSION = 2, + PIPE_CURRENT_DEVICE_VERSION = 2 +}; + +/* + * IMPORTANT: The following constants must match the ones used and defined + * in external/qemu/hw/goldfish_pipe.c in the Android source tree. + */ + +/* List of bitflags returned in status of CMD_POLL command */ +enum PipePollFlags { + PIPE_POLL_IN = 1 << 0, + PIPE_POLL_OUT = 1 << 1, + PIPE_POLL_HUP = 1 << 2 +}; + +/* Possible status values used to signal errors - see goldfish_pipe_error_convert */ +enum PipeErrors { + PIPE_ERROR_INVAL = -1, + PIPE_ERROR_AGAIN = -2, + PIPE_ERROR_NOMEM = -3, + PIPE_ERROR_IO = -4 +}; + +/* Bit-flags used to signal events from the emulator */ +enum PipeWakeFlags { + PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */ + PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */ + PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */ +}; + +/* Bit flags for the 'flags' field */ +enum PipeFlagsBits { + BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */ + BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */ + BIT_WAKE_ON_READ = 2, /* want to be woken on reads */ +}; + +enum PipeRegs { + PIPE_REG_CMD = 0, + + PIPE_REG_SIGNAL_BUFFER_HIGH = 4, + PIPE_REG_SIGNAL_BUFFER = 8, + PIPE_REG_SIGNAL_BUFFER_COUNT = 12, + + PIPE_REG_OPEN_BUFFER_HIGH = 20, + PIPE_REG_OPEN_BUFFER = 24, + + PIPE_REG_VERSION = 36, + + PIPE_REG_GET_SIGNALLED = 48, +}; + +enum PipeCmdCode { + PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */ + PIPE_CMD_CLOSE, + PIPE_CMD_POLL, + PIPE_CMD_WRITE, + PIPE_CMD_WAKE_ON_WRITE, + PIPE_CMD_READ, + PIPE_CMD_WAKE_ON_READ, + + /* + * TODO(zyy): implement a deferred read/write execution to allow parallel + * processing of pipe operations on the host. + */ + PIPE_CMD_WAKE_ON_DONE_IO, +}; + +enum { + MAX_BUFFERS_PER_COMMAND = 336, + MAX_SIGNALLED_PIPES = 64, + INITIAL_PIPES_CAPACITY = 64 +}; + +struct goldfish_pipe_dev; +struct goldfish_pipe; +struct goldfish_pipe_command; + +/* A per-pipe command structure, shared with the host */ +struct goldfish_pipe_command { + s32 cmd; /* PipeCmdCode, guest -> host */ + s32 id; /* pipe id, guest -> host */ + s32 status; /* command execution status, host -> guest */ + s32 reserved; /* to pad to 64-bit boundary */ + union { + /* Parameters for PIPE_CMD_{READ,WRITE} */ + struct { + u32 buffers_count; /* number of buffers, guest -> host */ + s32 consumed_size; /* number of consumed bytes, host -> guest */ + u64 ptrs[MAX_BUFFERS_PER_COMMAND]; /* buffer pointers, guest -> host */ + u32 sizes[MAX_BUFFERS_PER_COMMAND]; /* buffer sizes, guest -> host */ + } rw_params; + }; +}; + +/* A single signalled pipe information */ +struct signalled_pipe_buffer { + u32 id; + u32 flags; +}; + +/* Parameters for the PIPE_CMD_OPEN command */ +struct open_command_param { + u64 command_buffer_ptr; + u32 rw_params_max_count; +}; + +/* Device-level set of buffers shared with the host */ +struct goldfish_pipe_dev_buffers { + struct open_command_param open_command_params; + struct signalled_pipe_buffer signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; +}; + +/* This data type models a given pipe instance */ +struct goldfish_pipe { + u32 id; /* pipe ID - index into goldfish_pipe_dev::pipes array */ + unsigned long flags; /* The wake flags pipe is waiting for + * Note: not protected with any lock, uses atomic operations + * and barriers to make it thread-safe. + */ + unsigned long signalled_flags; /* wake flags host have signalled, + * - protected by goldfish_pipe_dev::lock */ + + struct goldfish_pipe_command *command_buffer; /* A pointer to command buffer */ + + /* doubly linked list of signalled pipes, protected by goldfish_pipe_dev::lock */ + struct goldfish_pipe *prev_signalled; + struct goldfish_pipe *next_signalled; + + /* + * A pipe's own lock. Protects the following: + * - *command_buffer - makes sure a command can safely write its parameters + * to the host and read the results back. + */ + struct mutex lock; + + wait_queue_head_t wake_queue; /* A wake queue for sleeping until host signals an event */ + struct goldfish_pipe_dev *dev; /* Pointer to the parent goldfish_pipe_dev instance */ +}; + +struct goldfish_pipe_dev pipe_dev[1] = {}; + +static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) +{ + pipe->command_buffer->cmd = cmd; + pipe->command_buffer->status = PIPE_ERROR_INVAL; /* failure by default */ + writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); + return pipe->command_buffer->status; +} + +static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) +{ + int status; + if (mutex_lock_interruptible(&pipe->lock)) + return PIPE_ERROR_IO; + status = goldfish_cmd_locked(pipe, cmd); + mutex_unlock(&pipe->lock); + return status; +} + +/* + * This function converts an error code returned by the emulator through + * the PIPE_REG_STATUS i/o register into a valid negative errno value. + */ +static int goldfish_pipe_error_convert(int status) +{ + switch (status) { + case PIPE_ERROR_AGAIN: + return -EAGAIN; + case PIPE_ERROR_NOMEM: + return -ENOMEM; + case PIPE_ERROR_IO: + return -EIO; + default: + return -EINVAL; + } +} + +static int pin_user_pages(unsigned long first_page, unsigned long last_page, + unsigned last_page_size, int is_write, + struct page *pages[MAX_BUFFERS_PER_COMMAND], unsigned *iter_last_page_size) +{ + int ret; + int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; + if (requested_pages > MAX_BUFFERS_PER_COMMAND) { + requested_pages = MAX_BUFFERS_PER_COMMAND; + *iter_last_page_size = PAGE_SIZE; + } else { + *iter_last_page_size = last_page_size; + } + + ret = get_user_pages_fast( + first_page, requested_pages, !is_write, pages); + if (ret <= 0) + return -EFAULT; + if (ret < requested_pages) + *iter_last_page_size = PAGE_SIZE; + return ret; + +} + +static void release_user_pages(struct page **pages, int pages_count, + int is_write, s32 consumed_size) +{ + int i; + for (i = 0; i < pages_count; i++) { + if (!is_write && consumed_size > 0) { + set_page_dirty(pages[i]); + } + put_page(pages[i]); + } +} + +/* Populate the call parameters, merging adjacent pages together */ +static void populate_rw_params( + struct page **pages, int pages_count, + unsigned long address, unsigned long address_end, + unsigned long first_page, unsigned long last_page, + unsigned iter_last_page_size, int is_write, + struct goldfish_pipe_command *command) +{ + /* + * Process the first page separately - it's the only page that + * needs special handling for its start address. + */ + unsigned long xaddr = page_to_phys(pages[0]); + unsigned long xaddr_prev = xaddr; + int buffer_idx = 0; + int i = 1; + int size_on_page = first_page == last_page + ? (int)(address_end - address) + : (PAGE_SIZE - (address & ~PAGE_MASK)); + command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); + command->rw_params.sizes[0] = size_on_page; + for (; i < pages_count; ++i) { + xaddr = page_to_phys(pages[i]); + size_on_page = (i == pages_count - 1) ? iter_last_page_size : PAGE_SIZE; + if (xaddr == xaddr_prev + PAGE_SIZE) { + command->rw_params.sizes[buffer_idx] += size_on_page; + } else { + ++buffer_idx; + command->rw_params.ptrs[buffer_idx] = (u64)xaddr; + command->rw_params.sizes[buffer_idx] = size_on_page; + } + xaddr_prev = xaddr; + } + command->rw_params.buffers_count = buffer_idx + 1; +} + +static int transfer_max_buffers(struct goldfish_pipe* pipe, + unsigned long address, unsigned long address_end, int is_write, + unsigned long last_page, unsigned int last_page_size, + s32* consumed_size, int* status) +{ + struct page *pages[MAX_BUFFERS_PER_COMMAND]; + unsigned long first_page = address & PAGE_MASK; + unsigned int iter_last_page_size; + int pages_count = pin_user_pages(first_page, last_page, + last_page_size, is_write, + pages, &iter_last_page_size); + if (pages_count < 0) + return pages_count; + + /* Serialize access to the pipe command buffers */ + if (mutex_lock_interruptible(&pipe->lock)) + return -ERESTARTSYS; + + populate_rw_params(pages, pages_count, address, address_end, + first_page, last_page, iter_last_page_size, is_write, + pipe->command_buffer); + + /* Transfer the data */ + *status = goldfish_cmd_locked(pipe, + is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); + + *consumed_size = pipe->command_buffer->rw_params.consumed_size; + + mutex_unlock(&pipe->lock); + + release_user_pages(pages, pages_count, is_write, *consumed_size); + + return 0; +} + +static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) +{ + u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; + set_bit(wakeBit, &pipe->flags); + + /* Tell the emulator we're going to wait for a wake event */ + (void)goldfish_cmd(pipe, + is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); + + while (test_bit(wakeBit, &pipe->flags)) { + if (wait_event_interruptible( + pipe->wake_queue, + !test_bit(wakeBit, &pipe->flags))) + return -ERESTARTSYS; + + if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) + return -EIO; + } + + return 0; +} + +static ssize_t goldfish_pipe_read_write(struct file *filp, + char __user *buffer, size_t bufflen, int is_write) +{ + struct goldfish_pipe *pipe = filp->private_data; + int count = 0, ret = -EINVAL; + unsigned long address, address_end, last_page; + unsigned int last_page_size; + + /* If the emulator already closed the pipe, no need to go further */ + if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) + return -EIO; + /* Null reads or writes succeeds */ + if (unlikely(bufflen == 0)) + return 0; + /* Check the buffer range for access */ + if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, + buffer, bufflen))) + return -EFAULT; + + address = (unsigned long)buffer; + address_end = address + bufflen; + last_page = (address_end - 1) & PAGE_MASK; + last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; + + while (address < address_end) { + s32 consumed_size; + int status; + ret = transfer_max_buffers(pipe, address, address_end, is_write, + last_page, last_page_size, &consumed_size, &status); + if (ret < 0) + break; + + if (consumed_size > 0) { + /* No matter what's the status, we've transfered something */ + count += consumed_size; + address += consumed_size; + } + if (status > 0) + continue; + if (status == 0) { + /* EOF */ + ret = 0; + break; + } + if (count > 0) { + /* + * An error occured, but we already transfered + * something on one of the previous iterations. + * Just return what we already copied and log this + * err. + */ + if (status != PIPE_ERROR_AGAIN) + pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", + status, is_write ? "write" : "read"); + break; + } + + /* + * If the error is not PIPE_ERROR_AGAIN, or if we are in + * non-blocking mode, just return the error code. + */ + if (status != PIPE_ERROR_AGAIN || (filp->f_flags & O_NONBLOCK) != 0) { + ret = goldfish_pipe_error_convert(status); + break; + } + + status = wait_for_host_signal(pipe, is_write); + if (status < 0) + return status; + } + + if (count > 0) + return count; + return ret; +} + +static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, + size_t bufflen, loff_t *ppos) +{ + return goldfish_pipe_read_write(filp, buffer, bufflen, /* is_write */ 0); +} + +static ssize_t goldfish_pipe_write(struct file *filp, + const char __user *buffer, size_t bufflen, + loff_t *ppos) +{ + return goldfish_pipe_read_write(filp, + /* cast away the const */(char __user *)buffer, bufflen, + /* is_write */ 1); +} + +static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait) +{ + struct goldfish_pipe *pipe = filp->private_data; + unsigned int mask = 0; + int status; + + poll_wait(filp, &pipe->wake_queue, wait); + + status = goldfish_cmd(pipe, PIPE_CMD_POLL); + if (status < 0) { + return -ERESTARTSYS; + } + + if (status & PIPE_POLL_IN) + mask |= POLLIN | POLLRDNORM; + if (status & PIPE_POLL_OUT) + mask |= POLLOUT | POLLWRNORM; + if (status & PIPE_POLL_HUP) + mask |= POLLHUP; + if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) + mask |= POLLERR; + + return mask; +} + +static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, + u32 id, u32 flags) +{ + struct goldfish_pipe *pipe; + + BUG_ON(id >= dev->pipes_capacity); + + pipe = dev->pipes[id]; + if (!pipe) + return; + pipe->signalled_flags |= flags; + + if (pipe->prev_signalled || pipe->next_signalled + || dev->first_signalled_pipe == pipe) + return; /* already in the list */ + pipe->next_signalled = dev->first_signalled_pipe; + if (dev->first_signalled_pipe) { + dev->first_signalled_pipe->prev_signalled = pipe; + } + dev->first_signalled_pipe = pipe; +} + +static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, + struct goldfish_pipe *pipe) { + if (pipe->prev_signalled) + pipe->prev_signalled->next_signalled = pipe->next_signalled; + if (pipe->next_signalled) + pipe->next_signalled->prev_signalled = pipe->prev_signalled; + if (pipe == dev->first_signalled_pipe) + dev->first_signalled_pipe = pipe->next_signalled; + pipe->prev_signalled = NULL; + pipe->next_signalled = NULL; +} + +static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev *dev, + int *wakes) +{ + struct goldfish_pipe *pipe; + unsigned long flags; + spin_lock_irqsave(&dev->lock, flags); + + pipe = dev->first_signalled_pipe; + if (pipe) { + *wakes = pipe->signalled_flags; + pipe->signalled_flags = 0; + /* + * This is an optimized version of signalled_pipes_remove_locked() - + * we want to make it as fast as possible to wake the sleeping pipe + * operations faster + */ + dev->first_signalled_pipe = pipe->next_signalled; + if (dev->first_signalled_pipe) + dev->first_signalled_pipe->prev_signalled = NULL; + pipe->next_signalled = NULL; + } + + spin_unlock_irqrestore(&dev->lock, flags); + return pipe; +} + +static void goldfish_interrupt_task(unsigned long unused) +{ + struct goldfish_pipe_dev *dev = pipe_dev; + /* Iterate over the signalled pipes and wake them one by one */ + struct goldfish_pipe *pipe; + int wakes; + while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) { + if (wakes & PIPE_WAKE_CLOSED) { + pipe->flags = 1 << BIT_CLOSED_ON_HOST; + } else { + if (wakes & PIPE_WAKE_READ) + clear_bit(BIT_WAKE_ON_READ, &pipe->flags); + if (wakes & PIPE_WAKE_WRITE) + clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); + } + /* + * wake_up_interruptible() implies a write barrier, so don't explicitly + * add another one here. + */ + wake_up_interruptible(&pipe->wake_queue); + } +} +DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); + +/* + * The general idea of the interrupt handling: + * + * 1. device raises an interrupt if there's at least one signalled pipe + * 2. IRQ handler reads the signalled pipes and their count from the device + * 3. device writes them into a shared buffer and returns the count + * it only resets the IRQ if it has returned all signalled pipes, + * otherwise it leaves it raised, so IRQ handler will be called + * again for the next chunk + * 4. IRQ handler adds all returned pipes to the device's signalled pipes list + * 5. IRQ handler launches a tasklet to process the signalled pipes from the + * list in a separate context + */ +static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) +{ + u32 count; + u32 i; + unsigned long flags; + struct goldfish_pipe_dev *dev = dev_id; + if (dev != pipe_dev) + return IRQ_NONE; + + /* Request the signalled pipes from the device */ + spin_lock_irqsave(&dev->lock, flags); + + count = readl(dev->base + PIPE_REG_GET_SIGNALLED); + if (count == 0) { + spin_unlock_irqrestore(&dev->lock, flags); + return IRQ_NONE; + } + if (count > MAX_SIGNALLED_PIPES) + count = MAX_SIGNALLED_PIPES; + + for (i = 0; i < count; ++i) + signalled_pipes_add_locked(dev, + dev->buffers->signalled_pipe_buffers[i].id, + dev->buffers->signalled_pipe_buffers[i].flags); + + spin_unlock_irqrestore(&dev->lock, flags); + + tasklet_schedule(&goldfish_interrupt_tasklet); + return IRQ_HANDLED; +} + +static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) +{ + int id; + for (id = 0; id < dev->pipes_capacity; ++id) + if (!dev->pipes[id]) + return id; + + { + /* Reallocate the array */ + u32 new_capacity = 2 * dev->pipes_capacity; + struct goldfish_pipe **pipes = + kcalloc(new_capacity, sizeof(*pipes), + GFP_ATOMIC); + if (!pipes) + return -ENOMEM; + memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); + kfree(dev->pipes); + dev->pipes = pipes; + id = dev->pipes_capacity; + dev->pipes_capacity = new_capacity; + } + return id; +} + +/** + * goldfish_pipe_open - open a channel to the AVD + * @inode: inode of device + * @file: file struct of opener + * + * Create a new pipe link between the emulator and the use application. + * Each new request produces a new pipe. + * + * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit + * right now so this is fine. A move to 64bit will need this addressing + */ +static int goldfish_pipe_open(struct inode *inode, struct file *file) +{ + struct goldfish_pipe_dev *dev = pipe_dev; + unsigned long flags; + int id; + int status; + + /* Allocate new pipe kernel object */ + struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); + if (pipe == NULL) + return -ENOMEM; + + pipe->dev = dev; + mutex_init(&pipe->lock); + init_waitqueue_head(&pipe->wake_queue); + + /* + * Command buffer needs to be allocated on its own page to make sure it is + * physically contiguous in host's address space. + */ + pipe->command_buffer = + (struct goldfish_pipe_command*)__get_free_page(GFP_KERNEL); + if (!pipe->command_buffer) { + status = -ENOMEM; + goto err_pipe; + } + + spin_lock_irqsave(&dev->lock, flags); + + id = get_free_pipe_id_locked(dev); + if (id < 0) { + status = id; + goto err_id_locked; + } + + dev->pipes[id] = pipe; + pipe->id = id; + pipe->command_buffer->id = id; + + /* Now tell the emulator we're opening a new pipe. */ + dev->buffers->open_command_params.rw_params_max_count = + MAX_BUFFERS_PER_COMMAND; + dev->buffers->open_command_params.command_buffer_ptr = + (u64)(unsigned long)__pa(pipe->command_buffer); + status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); + spin_unlock_irqrestore(&dev->lock, flags); + if (status < 0) + goto err_cmd; + /* All is done, save the pipe into the file's private data field */ + file->private_data = pipe; + return 0; + +err_cmd: + spin_lock_irqsave(&dev->lock, flags); + dev->pipes[id] = NULL; +err_id_locked: + spin_unlock_irqrestore(&dev->lock, flags); + free_page((unsigned long)pipe->command_buffer); +err_pipe: + kfree(pipe); + return status; +} + +static int goldfish_pipe_release(struct inode *inode, struct file *filp) +{ + unsigned long flags; + struct goldfish_pipe *pipe = filp->private_data; + struct goldfish_pipe_dev *dev = pipe->dev; + + /* The guest is closing the channel, so tell the emulator right now */ + (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); + + spin_lock_irqsave(&dev->lock, flags); + dev->pipes[pipe->id] = NULL; + signalled_pipes_remove_locked(dev, pipe); + spin_unlock_irqrestore(&dev->lock, flags); + + filp->private_data = NULL; + free_page((unsigned long)pipe->command_buffer); + kfree(pipe); + return 0; +} + +static const struct file_operations goldfish_pipe_fops = { + .owner = THIS_MODULE, + .read = goldfish_pipe_read, + .write = goldfish_pipe_write, + .poll = goldfish_pipe_poll, + .open = goldfish_pipe_open, + .release = goldfish_pipe_release, +}; + +static struct miscdevice goldfish_pipe_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "goldfish_pipe", + .fops = &goldfish_pipe_fops, +}; + +static int goldfish_pipe_device_init_v2(struct platform_device *pdev) +{ + char *page; + struct goldfish_pipe_dev *dev = pipe_dev; + int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt, + IRQF_SHARED, "goldfish_pipe", dev); + if (err) { + dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); + return err; + } + + err = misc_register(&goldfish_pipe_dev); + if (err) { + dev_err(&pdev->dev, "unable to register v2 device\n"); + return err; + } + + dev->first_signalled_pipe = NULL; + dev->pipes_capacity = INITIAL_PIPES_CAPACITY; + dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), GFP_KERNEL); + if (!dev->pipes) + return -ENOMEM; + + /* + * We're going to pass two buffers, open_command_params and + * signalled_pipe_buffers, to the host. This means each of those buffers + * needs to be contained in a single physical page. The easiest choice is + * to just allocate a page and place the buffers in it. + */ + BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE); + page = (char*)__get_free_page(GFP_KERNEL); + if (!page) { + kfree(dev->pipes); + return -ENOMEM; + } + dev->buffers = (struct goldfish_pipe_dev_buffers*)page; + + /* Send the buffer addresses to the host */ + { + u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); + writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); + writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_SIGNAL_BUFFER); + writel((u32)MAX_SIGNALLED_PIPES, dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); + + paddr = __pa(&dev->buffers->open_command_params); + writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_OPEN_BUFFER_HIGH); + writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_OPEN_BUFFER); + } + return 0; +} + +static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev) { + struct goldfish_pipe_dev *dev = pipe_dev; + misc_deregister(&goldfish_pipe_dev); + kfree(dev->pipes); + free_page((unsigned long)dev->buffers); +} + +static int goldfish_pipe_probe(struct platform_device *pdev) +{ + int err; + struct resource *r; + struct goldfish_pipe_dev *dev = pipe_dev; + + BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE); + + /* not thread safe, but this should not happen */ + WARN_ON(dev->base != NULL); + + spin_lock_init(&dev->lock); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL || resource_size(r) < PAGE_SIZE) { + dev_err(&pdev->dev, "can't allocate i/o page\n"); + return -EINVAL; + } + dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); + if (dev->base == NULL) { + dev_err(&pdev->dev, "ioremap failed\n"); + return -EINVAL; + } + + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (r == NULL) { + err = -EINVAL; + goto error; + } + dev->irq = r->start; + + /* + * Exchange the versions with the host device + * + * Note: v1 driver used to not report its version, so we write it before + * reading device version back: this allows the host implementation to + * detect the old driver (if there was no version write before read). + */ + writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); + dev->version = readl(dev->base + PIPE_REG_VERSION); + if (dev->version < PIPE_CURRENT_DEVICE_VERSION) { + /* initialize the old device version */ + err = goldfish_pipe_device_init_v1(pdev); + } else { + /* Host device supports the new interface */ + err = goldfish_pipe_device_init_v2(pdev); + } + if (!err) + return 0; + +error: + dev->base = NULL; + return err; +} + +static int goldfish_pipe_remove(struct platform_device *pdev) +{ + struct goldfish_pipe_dev *dev = pipe_dev; + if (dev->version < PIPE_CURRENT_DEVICE_VERSION) + goldfish_pipe_device_deinit_v1(pdev); + else + goldfish_pipe_device_deinit_v2(pdev); + dev->base = NULL; + return 0; +} + +static const struct acpi_device_id goldfish_pipe_acpi_match[] = { + { "GFSH0003", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); + +static const struct of_device_id goldfish_pipe_of_match[] = { + { .compatible = "google,android-pipe", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); + +static struct platform_driver goldfish_pipe_driver = { + .probe = goldfish_pipe_probe, + .remove = goldfish_pipe_remove, + .driver = { + .name = "goldfish_pipe", + .of_match_table = goldfish_pipe_of_match, + .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), + } +}; + +module_platform_driver(goldfish_pipe_driver); +MODULE_AUTHOR("David Turner "); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index fde087f072261fd7e86554247d70920535c28ed2..d4b0dd9910e1e901d5a7a1a162afd2790091d490 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -1581,7 +1581,7 @@ bail: static int ipa_init_smem_region(int memory_region_size, int memory_region_offset) { - struct ipa_hw_imm_cmd_dma_shared_mem cmd; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_desc desc; struct ipa_mem_buffer mem; int rc; @@ -1590,7 +1590,6 @@ static int ipa_init_smem_region(int memory_region_size, return 0; memset(&desc, 0, sizeof(desc)); - memset(&cmd, 0, sizeof(cmd)); memset(&mem, 0, sizeof(mem)); mem.size = memory_region_size; @@ -1602,13 +1601,22 @@ static int ipa_init_smem_region(int memory_region_size, } memset(mem.base, 0, mem.size); - cmd.size = mem.size; - cmd.system_addr = mem.phys_base; - cmd.local_addr = ipa_ctx->smem_restricted_bytes + + + cmd = kzalloc(sizeof(*cmd), + GFP_KERNEL); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->size = mem.size; + cmd->system_addr = mem.phys_base; + cmd->local_addr = ipa_ctx->smem_restricted_bytes + memory_region_offset; desc.opcode = IPA_DMA_SHARED_MEM; - desc.pyld = &cmd; - desc.len = sizeof(cmd); + desc.pyld = cmd; + desc.len = sizeof(*cmd); desc.type = IPA_IMM_CMD_DESC; rc = ipa_send_cmd(1, &desc); @@ -1617,6 +1625,8 @@ static int ipa_init_smem_region(int memory_region_size, rc = -EFAULT; } + kfree(cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); @@ -2153,7 +2163,7 @@ int _ipa_init_sram_v2(void) { u32 *ipa_sram_mmio; unsigned long phys_addr; - struct ipa_hw_imm_cmd_dma_shared_mem cmd = {0}; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_desc desc = {0}; struct ipa_mem_buffer mem; int rc = 0; @@ -2193,11 +2203,18 @@ int _ipa_init_sram_v2(void) } memset(mem.base, 0, mem.size); - cmd.size = mem.size; - cmd.system_addr = mem.phys_base; - cmd.local_addr = IPA_STATUS_CLEAR_OFST; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->size = mem.size; + cmd->system_addr = mem.phys_base; + cmd->local_addr = IPA_STATUS_CLEAR_OFST; desc.opcode = IPA_DMA_SHARED_MEM; - desc.pyld = &cmd; + desc.pyld = (void *)cmd; desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc.type = IPA_IMM_CMD_DESC; @@ -2206,6 +2223,8 @@ int _ipa_init_sram_v2(void) rc = -EFAULT; } + kfree(cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return rc; } @@ -2294,7 +2313,7 @@ int _ipa_init_hdr_v2(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_hdr_init_local cmd; + struct ipa_hdr_init_local *cmd = NULL; int rc = 0; mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); @@ -2306,13 +2325,20 @@ int _ipa_init_hdr_v2(void) } memset(mem.base, 0, mem.size); - cmd.hdr_table_src_addr = mem.phys_base; - cmd.size_hdr_table = mem.size; - cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) { + IPAERR("Failed to alloc header init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->hdr_table_src_addr = mem.phys_base; + cmd->size_hdr_table = mem.size; + cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(modem_hdr_ofst); desc.opcode = IPA_HDR_INIT_LOCAL; - desc.pyld = &cmd; + desc.pyld = (void *)cmd; desc.len = sizeof(struct ipa_hdr_init_local); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); @@ -2322,6 +2348,8 @@ int _ipa_init_hdr_v2(void) rc = -EFAULT; } + kfree(cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return rc; } @@ -2330,8 +2358,8 @@ int _ipa_init_hdr_v2_5(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_hdr_init_local cmd = { 0 }; - struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd = { 0 }; + struct ipa_hdr_init_local *cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, @@ -2342,25 +2370,34 @@ int _ipa_init_hdr_v2_5(void) } memset(mem.base, 0, mem.size); - cmd.hdr_table_src_addr = mem.phys_base; - cmd.size_hdr_table = mem.size; - cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) { + IPAERR("Failed to alloc header init command object\n"); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + return -ENOMEM; + } + + cmd->hdr_table_src_addr = mem.phys_base; + cmd->size_hdr_table = mem.size; + cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(modem_hdr_ofst); desc.opcode = IPA_HDR_INIT_LOCAL; - desc.pyld = &cmd; + desc.pyld = (void *)cmd; desc.len = sizeof(struct ipa_hdr_init_local); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); if (ipa_send_cmd(1, &desc)) { IPAERR("fail to send immediate command\n"); - dma_free_coherent(ipa_ctx->pdev, - mem.size, mem.base, + kfree(cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return -EFAULT; } + kfree(cmd); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) + @@ -2374,18 +2411,29 @@ int _ipa_init_hdr_v2_5(void) memset(mem.base, 0, mem.size); memset(&desc, 0, sizeof(desc)); - dma_cmd.system_addr = mem.phys_base; - dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes + + dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL); + if (dma_cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + dma_free_coherent(ipa_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -ENOMEM; + } + + dma_cmd->system_addr = mem.phys_base; + dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(modem_hdr_proc_ctx_ofst); - dma_cmd.size = mem.size; + dma_cmd->size = mem.size; desc.opcode = IPA_DMA_SHARED_MEM; - desc.pyld = &dma_cmd; + desc.pyld = (void *)dma_cmd; desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); if (ipa_send_cmd(1, &desc)) { IPAERR("fail to send immediate command\n"); + kfree(dma_cmd); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, @@ -2395,8 +2443,9 @@ int _ipa_init_hdr_v2_5(void) ipa_write_reg(ipa_ctx->mmio, IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST, - dma_cmd.local_addr); + dma_cmd->local_addr); + kfree(dma_cmd); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return 0; @@ -2412,7 +2461,7 @@ int _ipa_init_rt4_v2(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_ip_v4_routing_init v4_cmd; + struct ipa_ip_v4_routing_init *v4_cmd = NULL; u32 *entry; int i; int rc = 0; @@ -2437,15 +2486,22 @@ int _ipa_init_rt4_v2(void) entry++; } + v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL); + if (v4_cmd == NULL) { + IPAERR("Failed to alloc v4 routing init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + desc.opcode = IPA_IP_V4_ROUTING_INIT; - v4_cmd.ipv4_rules_addr = mem.phys_base; - v4_cmd.size_ipv4_rules = mem.size; - v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes + + v4_cmd->ipv4_rules_addr = mem.phys_base; + v4_cmd->size_ipv4_rules = mem.size; + v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v4_rt_ofst); IPADBG("putting Routing IPv4 rules to phys 0x%x", - v4_cmd.ipv4_addr); + v4_cmd->ipv4_addr); - desc.pyld = &v4_cmd; + desc.pyld = (void *)v4_cmd; desc.len = sizeof(struct ipa_ip_v4_routing_init); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); @@ -2455,6 +2511,8 @@ int _ipa_init_rt4_v2(void) rc = -EFAULT; } + kfree(v4_cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return rc; } @@ -2463,7 +2521,7 @@ int _ipa_init_rt6_v2(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_ip_v6_routing_init v6_cmd; + struct ipa_ip_v6_routing_init *v6_cmd = NULL; u32 *entry; int i; int rc = 0; @@ -2488,15 +2546,22 @@ int _ipa_init_rt6_v2(void) entry++; } + v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL); + if (v6_cmd == NULL) { + IPAERR("Failed to alloc v6 routing init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + desc.opcode = IPA_IP_V6_ROUTING_INIT; - v6_cmd.ipv6_rules_addr = mem.phys_base; - v6_cmd.size_ipv6_rules = mem.size; - v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes + + v6_cmd->ipv6_rules_addr = mem.phys_base; + v6_cmd->size_ipv6_rules = mem.size; + v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v6_rt_ofst); IPADBG("putting Routing IPv6 rules to phys 0x%x", - v6_cmd.ipv6_addr); + v6_cmd->ipv6_addr); - desc.pyld = &v6_cmd; + desc.pyld = (void *)v6_cmd; desc.len = sizeof(struct ipa_ip_v6_routing_init); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); @@ -2506,6 +2571,8 @@ int _ipa_init_rt6_v2(void) rc = -EFAULT; } + kfree(v6_cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return rc; } @@ -2514,7 +2581,7 @@ int _ipa_init_flt4_v2(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_ip_v4_filter_init v4_cmd; + struct ipa_ip_v4_filter_init *v4_cmd = NULL; u32 *entry; int i; int rc = 0; @@ -2537,15 +2604,22 @@ int _ipa_init_flt4_v2(void) entry++; } + v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL); + if (v4_cmd == NULL) { + IPAERR("Failed to alloc v4 fliter init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + desc.opcode = IPA_IP_V4_FILTER_INIT; - v4_cmd.ipv4_rules_addr = mem.phys_base; - v4_cmd.size_ipv4_rules = mem.size; - v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes + + v4_cmd->ipv4_rules_addr = mem.phys_base; + v4_cmd->size_ipv4_rules = mem.size; + v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v4_flt_ofst); IPADBG("putting Filtering IPv4 rules to phys 0x%x", - v4_cmd.ipv4_addr); + v4_cmd->ipv4_addr); - desc.pyld = &v4_cmd; + desc.pyld = (void *)v4_cmd; desc.len = sizeof(struct ipa_ip_v4_filter_init); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); @@ -2555,6 +2629,8 @@ int _ipa_init_flt4_v2(void) rc = -EFAULT; } + kfree(v4_cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return rc; } @@ -2563,7 +2639,7 @@ int _ipa_init_flt6_v2(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_ip_v6_filter_init v6_cmd; + struct ipa_ip_v6_filter_init *v6_cmd = NULL; u32 *entry; int i; int rc = 0; @@ -2586,15 +2662,22 @@ int _ipa_init_flt6_v2(void) entry++; } + v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL); + if (v6_cmd == NULL) { + IPAERR("Failed to alloc v6 fliter init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + desc.opcode = IPA_IP_V6_FILTER_INIT; - v6_cmd.ipv6_rules_addr = mem.phys_base; - v6_cmd.size_ipv6_rules = mem.size; - v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes + + v6_cmd->ipv6_rules_addr = mem.phys_base; + v6_cmd->size_ipv6_rules = mem.size; + v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v6_flt_ofst); IPADBG("putting Filtering IPv6 rules to phys 0x%x", - v6_cmd.ipv6_addr); + v6_cmd->ipv6_addr); - desc.pyld = &v6_cmd; + desc.pyld = (void *)v6_cmd; desc.len = sizeof(struct ipa_ip_v6_filter_init); desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); @@ -2604,6 +2687,8 @@ int _ipa_init_flt6_v2(void) rc = -EFAULT; } + kfree(v6_cmd); +fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); return rc; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c index 51f34f0d51013cdc25119ff2c74676fb864d74a4..e23de3f266132316af7effddcb990ffb99a69569 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -266,8 +266,8 @@ int __ipa_commit_hdr_v2(void) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; - struct ipa_hdr_init_system cmd; - struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd; + struct ipa_hdr_init_system *cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; int rc = -EFAULT; if (ipa_generate_hdr_hw_tbl(&mem)) { @@ -279,14 +279,21 @@ int __ipa_commit_hdr_v2(void) if (mem.size > IPA_MEM_PART(apps_hdr_size)) { IPAERR("tbl too big, needed %d avail %d\n", mem.size, IPA_MEM_PART(apps_hdr_size)); - goto end; + goto fail_send_cmd; } else { - dma_cmd.system_addr = mem.phys_base; - dma_cmd.size = mem.size; - dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes + + dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC); + if (dma_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + dma_cmd->system_addr = mem.phys_base; + dma_cmd->size = mem.size; + dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(apps_hdr_ofst); desc.opcode = IPA_DMA_SHARED_MEM; - desc.pyld = &dma_cmd; + desc.pyld = (void *)dma_cmd; desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); } @@ -294,11 +301,17 @@ int __ipa_commit_hdr_v2(void) if (mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { IPAERR("tbl too big, needed %d avail %d\n", mem.size, IPA_MEM_PART(apps_hdr_size_ddr)); - goto end; + goto fail_send_cmd; } else { - cmd.hdr_table_addr = mem.phys_base; + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (cmd == NULL) { + IPAERR("fail to alloc hdr init cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + cmd->hdr_table_addr = mem.phys_base; desc.opcode = IPA_HDR_INIT_SYSTEM; - desc.pyld = &cmd; + desc.pyld = (void *)cmd; desc.len = sizeof(struct ipa_hdr_init_system); } } @@ -311,6 +324,10 @@ int __ipa_commit_hdr_v2(void) else rc = 0; + kfree(dma_cmd); + kfree(cmd); + +fail_send_cmd: if (ipa_ctx->hdr_tbl_lcl) { dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); @@ -322,6 +339,9 @@ int __ipa_commit_hdr_v2(void) ipa_ctx->hdr_mem.base, ipa_ctx->hdr_mem.phys_base); ipa_ctx->hdr_mem = mem; + } else { + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); } } @@ -335,10 +355,10 @@ int __ipa_commit_hdr_v2_5(void) struct ipa_mem_buffer hdr_mem; struct ipa_mem_buffer ctx_mem; struct ipa_mem_buffer aligned_ctx_mem; - struct ipa_hdr_init_system hdr_init_cmd = {0}; - struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_hdr = {0}; - struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_ctx = {0}; - struct ipa_register_write reg_write_cmd = {0}; + struct ipa_hdr_init_system *hdr_init_cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL; + struct ipa_register_write *reg_write_cmd = NULL; int rc = -EFAULT; u32 proc_ctx_size; u32 proc_ctx_ofst; @@ -361,15 +381,21 @@ int __ipa_commit_hdr_v2_5(void) if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) { IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, IPA_MEM_PART(apps_hdr_size)); - goto end; + goto fail_send_cmd1; } else { - dma_cmd_hdr.system_addr = hdr_mem.phys_base; - dma_cmd_hdr.size = hdr_mem.size; - dma_cmd_hdr.local_addr = + dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC); + if (dma_cmd_hdr == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + dma_cmd_hdr->system_addr = hdr_mem.phys_base; + dma_cmd_hdr->size = hdr_mem.size; + dma_cmd_hdr->local_addr = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(apps_hdr_ofst); desc[0].opcode = IPA_DMA_SHARED_MEM; - desc[0].pyld = &dma_cmd_hdr; + desc[0].pyld = (void *)dma_cmd_hdr; desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); } @@ -377,11 +403,18 @@ int __ipa_commit_hdr_v2_5(void) if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, IPA_MEM_PART(apps_hdr_size_ddr)); - goto end; + goto fail_send_cmd1; } else { - hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base; + hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd), + GFP_ATOMIC); + if (hdr_init_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + hdr_init_cmd->hdr_table_addr = hdr_mem.phys_base; desc[0].opcode = IPA_HDR_INIT_SYSTEM; - desc[0].pyld = &hdr_init_cmd; + desc[0].pyld = (void *)hdr_init_cmd; desc[0].len = sizeof(struct ipa_hdr_init_system); } } @@ -395,15 +428,22 @@ int __ipa_commit_hdr_v2_5(void) IPAERR("tbl too big needed %d avail %d\n", aligned_ctx_mem.size, proc_ctx_size); - goto end; + goto fail_send_cmd1; } else { - dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base; - dma_cmd_ctx.size = aligned_ctx_mem.size; - dma_cmd_ctx.local_addr = + dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx), + GFP_ATOMIC); + if (dma_cmd_ctx == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + dma_cmd_ctx->system_addr = aligned_ctx_mem.phys_base; + dma_cmd_ctx->size = aligned_ctx_mem.size; + dma_cmd_ctx->local_addr = ipa_ctx->smem_restricted_bytes + proc_ctx_ofst; desc[1].opcode = IPA_DMA_SHARED_MEM; - desc[1].pyld = &dma_cmd_ctx; + desc[1].pyld = (void *)dma_cmd_ctx; desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); } @@ -413,15 +453,23 @@ int __ipa_commit_hdr_v2_5(void) IPAERR("tbl too big, needed %d avail %d\n", aligned_ctx_mem.size, proc_ctx_size_ddr); - goto end; + goto fail_send_cmd1; } else { - reg_write_cmd.offset = IPA_SYS_PKT_PROC_CNTXT_BASE_OFST; - reg_write_cmd.value = aligned_ctx_mem.phys_base; - reg_write_cmd.value_mask = + reg_write_cmd = kzalloc(sizeof(*reg_write_cmd), + GFP_ATOMIC); + if (reg_write_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + reg_write_cmd->offset = + IPA_SYS_PKT_PROC_CNTXT_BASE_OFST; + reg_write_cmd->value = aligned_ctx_mem.phys_base; + reg_write_cmd->value_mask = ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1); - desc[1].pyld = ®_write_cmd; + desc[1].pyld = (void *)reg_write_cmd; desc[1].opcode = IPA_REGISTER_WRITE; - desc[1].len = sizeof(reg_write_cmd); + desc[1].len = sizeof(*reg_write_cmd); } } desc[1].type = IPA_IMM_CMD_DESC; @@ -432,22 +480,16 @@ int __ipa_commit_hdr_v2_5(void) else rc = 0; - if (ipa_ctx->hdr_tbl_lcl) { - dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, hdr_mem.base, - hdr_mem.phys_base); - } else { - if (!rc) { - if (ipa_ctx->hdr_mem.phys_base) - dma_free_coherent(ipa_ctx->pdev, - ipa_ctx->hdr_mem.size, - ipa_ctx->hdr_mem.base, - ipa_ctx->hdr_mem.phys_base); - ipa_ctx->hdr_mem = hdr_mem; - } - } +fail_send_cmd1: + + kfree(dma_cmd_hdr); + kfree(hdr_init_cmd); + kfree(dma_cmd_ctx); + kfree(reg_write_cmd); if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { - dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, ctx_mem.base, + dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, + ctx_mem.base, ctx_mem.phys_base); } else { if (!rc) { @@ -457,9 +499,31 @@ int __ipa_commit_hdr_v2_5(void) ipa_ctx->hdr_proc_ctx_mem.base, ipa_ctx->hdr_proc_ctx_mem.phys_base); ipa_ctx->hdr_proc_ctx_mem = ctx_mem; + } else { + dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, + ctx_mem.base, + ctx_mem.phys_base); } } + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, + hdr_mem.base, + hdr_mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + ipa_ctx->hdr_mem = hdr_mem; + } else { + dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, + hdr_mem.base, + hdr_mem.phys_base); + } + } end: return rc; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index 069c5cbcf4f3633edc6ca6dbbed558cb7f8f4f3f..f5dea76764f879535f8ba84078487230fe10e01f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -696,8 +696,8 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip) struct ipa_desc desc[2]; struct ipa_mem_buffer body; struct ipa_mem_buffer head; - struct ipa_hw_imm_cmd_dma_shared_mem cmd1 = {0}; - struct ipa_hw_imm_cmd_dma_shared_mem cmd2 = {0}; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL; u16 avail; u32 num_modem_rt_index; int rc = 0; @@ -747,34 +747,50 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip) goto fail_send_cmd; } - cmd1.size = head.size; - cmd1.system_addr = head.phys_base; - cmd1.local_addr = local_addr1; + cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + GFP_KERNEL); + if (cmd1 == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd1->size = head.size; + cmd1->system_addr = head.phys_base; + cmd1->local_addr = local_addr1; desc[0].opcode = IPA_DMA_SHARED_MEM; - desc[0].pyld = &cmd1; + desc[0].pyld = (void *)cmd1; desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc[0].type = IPA_IMM_CMD_DESC; if (lcl) { - cmd2.size = body.size; - cmd2.system_addr = body.phys_base; - cmd2.local_addr = local_addr2; + cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + GFP_KERNEL); + if (cmd2 == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + + cmd2->size = body.size; + cmd2->system_addr = body.phys_base; + cmd2->local_addr = local_addr2; desc[1].opcode = IPA_DMA_SHARED_MEM; - desc[1].pyld = &cmd2; + desc[1].pyld = (void *)cmd2; desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc[1].type = IPA_IMM_CMD_DESC; if (ipa_send_cmd(2, desc)) { IPAERR("fail to send immediate command\n"); rc = -EFAULT; - goto fail_send_cmd; + goto fail_send_cmd2; } } else { if (ipa_send_cmd(1, desc)) { IPAERR("fail to send immediate command\n"); rc = -EFAULT; - goto fail_send_cmd; + goto fail_send_cmd1; } } @@ -785,6 +801,11 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip) IPA_DUMP_BUFF(body.base, body.phys_base, body.size); } __ipa_reap_sys_rt_tbls(ip); + +fail_send_cmd2: + kfree(cmd2); +fail_send_cmd1: + kfree(cmd1); fail_send_cmd: dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base); if (body.size) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h index 154045fe4f56379696a2e2c18374cd90291d91dc..8f85d4ede6b1e381a0cff403bef16d89d38b87ed 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h @@ -322,11 +322,13 @@ struct ipahal_imm_cmd_dma_task_32b_addr { /* * struct ipahal_imm_cmd_pyld - Immediate cmd payload information * @len: length of the buffer + * @reserved: padding bytes to make data buffer aligned * @data: buffer contains the immediate command payload. Buffer goes * back to back with this structure */ struct ipahal_imm_cmd_pyld { u16 len; + u16 reserved; u8 data[0]; }; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c index 72cc4764e7aa86b308aa229bff4ee59f5b84e69f..053a581b1d262e122311f0ed77510429c2dc16f6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -2728,7 +2728,7 @@ static int ipa_fltrt_alloc_init_tbl_hdr( params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, params->nhash_hdr.size, ¶ms->nhash_hdr.phys_base, GFP_KERNEL); - if (!params->nhash_hdr.size) { + if (!params->nhash_hdr.base) { IPAHAL_ERR("fail to alloc DMA buff of size %d\n", params->nhash_hdr.size); goto nhash_alloc_fail; diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c index 88a213b1b241de9c3592ba1aad729d006829fa47..96c4671f994ff036b71bb9b9405e5bfbf3ea32c3 100644 --- a/drivers/platform/msm/mhi_uci/mhi_uci.c +++ b/drivers/platform/msm/mhi_uci/mhi_uci.c @@ -27,18 +27,17 @@ #include #include #include +#include #define MHI_DEV_NODE_NAME_LEN 13 -#define MHI_MAX_NR_OF_CLIENTS 23 -#define MHI_SOFTWARE_CLIENT_START 0 #define MHI_SOFTWARE_CLIENT_LIMIT 23 -#define MHI_MAX_SOFTWARE_CHANNELS 46 #define TRE_TYPICAL_SIZE 0x1000 #define TRE_MAX_SIZE 0xFFFF -#define MHI_UCI_IPC_LOG_PAGES (100) +#define MHI_UCI_IPC_LOG_PAGES (25) #define MAX_NR_TRBS_PER_CHAN 10 #define DEVICE_NAME "mhi" +#define MHI_UCI_DRIVER_NAME "mhi_uci" #define CTRL_MAGIC 0x4C525443 enum UCI_DBG_LEVEL { @@ -58,8 +57,6 @@ enum UCI_DBG_LEVEL mhi_uci_ipc_log_lvl = UCI_DBG_VERBOSE; enum UCI_DBG_LEVEL mhi_uci_ipc_log_lvl = UCI_DBG_ERROR; #endif -void *mhi_uci_ipc_log; - struct __packed rs232_ctrl_msg { u32 preamble; u32 msg_id; @@ -103,11 +100,12 @@ struct chan_attr { }; struct uci_client { - u32 client_index; u32 out_chan; u32 in_chan; u32 out_chan_state; u32 in_chan_state; + struct chan_attr in_attr; + struct chan_attr out_attr; struct mhi_client_handle *out_handle; struct mhi_client_handle *in_handle; size_t pending_data; @@ -126,22 +124,29 @@ struct uci_client { struct mhi_uci_ctxt_t *uci_ctxt; struct mutex in_chan_lock; struct mutex out_chan_lock; + void *uci_ipc_log; }; struct mhi_uci_ctxt_t { - struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS]; + struct list_head node; + struct platform_dev *pdev; struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT]; struct mhi_client_info_t client_info; - dev_t start_ctrl_nr; - struct mhi_client_handle *ctrl_handle; + dev_t dev_t; struct mutex ctrl_mutex; - struct cdev cdev[MHI_MAX_SOFTWARE_CHANNELS]; - struct class *mhi_uci_class; - u32 ctrl_chan_id; + struct cdev cdev[MHI_SOFTWARE_CLIENT_LIMIT]; + struct uci_client *ctrl_client; atomic_t mhi_disabled; atomic_t mhi_enable_notif_wq_active; }; +struct mhi_uci_drv_ctxt { + struct list_head head; + struct mutex list_lock; + struct class *mhi_uci_class; + void *mhi_uci_ipc_log; +}; + #define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2) #define CTRL_MSG_ID @@ -219,13 +224,13 @@ struct mhi_uci_ctxt_t { (_PKT)->size = new_val; \ }; -#define uci_log(_msg_lvl, _msg, ...) do { \ +#define uci_log(uci_ipc_log, _msg_lvl, _msg, ...) do { \ if (_msg_lvl >= mhi_uci_msg_lvl) { \ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ } \ - if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \ - ipc_log_string(mhi_uci_ipc_log, \ - "[%s] " _msg, __func__, ##__VA_ARGS__); \ + if (uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \ + ipc_log_string(uci_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ } \ } while (0) @@ -246,20 +251,20 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait); static long mhi_uci_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -static struct mhi_uci_ctxt_t uci_ctxt; +static struct mhi_uci_drv_ctxt mhi_uci_drv_ctxt; -static int mhi_init_inbound(struct uci_client *client_handle, - enum MHI_CLIENT_CHANNEL chan) +static int mhi_init_inbound(struct uci_client *client_handle) { int ret_val = 0; u32 i = 0; - struct chan_attr *chan_attributes = - &uci_ctxt.chan_attrib[chan]; + struct chan_attr *chan_attributes = &client_handle->in_attr; void *data_loc = NULL; size_t buf_size = chan_attributes->max_packet_size; if (client_handle == NULL) { - uci_log(UCI_DBG_ERROR, "Bad Input data, quitting\n"); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_ERROR, + "Bad Input data, quitting\n"); return -EINVAL; } chan_attributes->nr_trbs = @@ -270,12 +275,17 @@ static int mhi_init_inbound(struct uci_client *client_handle, if (!client_handle->in_buf_list) return -ENOMEM; - uci_log(UCI_DBG_INFO, "Channel %d supports %d desc\n", - i, chan_attributes->nr_trbs); + uci_log(client_handle->uci_ipc_log, + UCI_DBG_INFO, "Channel %d supports %d desc\n", + chan_attributes->chan_id, + chan_attributes->nr_trbs); for (i = 0; i < chan_attributes->nr_trbs; ++i) { data_loc = kmalloc(buf_size, GFP_KERNEL); - uci_log(UCI_DBG_INFO, "Allocated buffer %p size %zd\n", - data_loc, buf_size); + uci_log(client_handle->uci_ipc_log, + UCI_DBG_INFO, + "Allocated buffer %p size %zd\n", + data_loc, + buf_size); if (data_loc == NULL) return -ENOMEM; client_handle->in_buf_list[i] = data_loc; @@ -283,9 +293,11 @@ static int mhi_init_inbound(struct uci_client *client_handle, data_loc, buf_size, MHI_EOT); if (0 != ret_val) { kfree(data_loc); - uci_log(UCI_DBG_ERROR, + uci_log(client_handle->uci_ipc_log, + UCI_DBG_ERROR, "Failed insertion for chan %d, ret %d\n", - chan, ret_val); + chan_attributes->chan_id, + ret_val); break; } } @@ -325,7 +337,8 @@ static int mhi_uci_send_packet(struct mhi_client_handle **client_handle, if (is_uspace_buf) { data_loc = kmalloc(data_to_insert_now, GFP_KERNEL); if (NULL == data_loc) { - uci_log(UCI_DBG_ERROR, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Failed to allocate memory 0x%zx\n", data_to_insert_now); return -ENOMEM; @@ -343,13 +356,15 @@ static int mhi_uci_send_packet(struct mhi_client_handle **client_handle, flags = MHI_EOT; if (data_left_to_insert - data_to_insert_now > 0) flags |= MHI_CHAIN | MHI_EOB; - uci_log(UCI_DBG_VERBOSE, - "At trb i = %d/%d, chain = %d, eob = %d, addr 0x%p chan %d\n", - i, nr_avail_trbs, - flags & MHI_CHAIN, - flags & MHI_EOB, - data_loc, - uci_handle->out_chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "At trb i = %d/%d, chain = %d, eob = %d, addr 0x%p chan %d\n", + i, + nr_avail_trbs, + flags & MHI_CHAIN, + flags & MHI_EOB, + data_loc, + uci_handle->out_chan); ret_val = mhi_queue_xfer(*client_handle, data_loc, data_to_insert_now, flags); @@ -375,25 +390,34 @@ static int mhi_uci_send_status_cmd(struct uci_client *client) { struct rs232_ctrl_msg *rs232_pkt = NULL; struct uci_client *uci_ctrl_handle; - u32 ctrl_chan_id = uci_ctxt.ctrl_chan_id; - + struct mhi_uci_ctxt_t *uci_ctxt = client->uci_ctxt; int ret_val = 0; size_t pkt_size = sizeof(struct rs232_ctrl_msg); u32 amount_sent; - uci_ctrl_handle = &uci_ctxt.client_handles[ctrl_chan_id/2]; + if (!uci_ctxt->ctrl_client) { + uci_log(client->uci_ipc_log, + UCI_DBG_INFO, + "Control channel is not defined\n"); + return -EIO; + } + + uci_ctrl_handle = uci_ctxt->ctrl_client; mutex_lock(&uci_ctrl_handle->out_chan_lock); if (!atomic_read(&uci_ctrl_handle->mhi_disabled) && !uci_ctrl_handle->out_chan_state) { - uci_log(UCI_DBG_INFO, - "Opening outbound control channel %d\n", - uci_ctrl_handle->out_chan); + uci_log(uci_ctrl_handle->uci_ipc_log, + UCI_DBG_INFO, + "Opening outbound control channel %d for chan:%d\n", + uci_ctrl_handle->out_chan, + client->out_chan); ret_val = mhi_open_channel(uci_ctrl_handle->out_handle); if (0 != ret_val) { - uci_log(UCI_DBG_CRITICAL, + uci_log(uci_ctrl_handle->uci_ipc_log, + UCI_DBG_CRITICAL, "Could not open chan %d, for sideband ctrl\n", - client->out_chan); + uci_ctrl_handle->out_chan); ret_val = -EIO; goto error_open; } @@ -405,7 +429,8 @@ static int mhi_uci_send_status_cmd(struct uci_client *client) ret_val = -ENOMEM; goto error_open; } - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_ctrl_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Received request to send msg for chan %d\n", client->out_chan); rs232_pkt->preamble = CTRL_MAGIC; @@ -423,9 +448,11 @@ static int mhi_uci_send_status_cmd(struct uci_client *client) pkt_size, 0); if (pkt_size != amount_sent) { - uci_log(UCI_DBG_INFO, - "Failed to send signal on chan %d, ret : %d\n", - client->out_chan, ret_val); + uci_log(uci_ctrl_handle->uci_ipc_log, + UCI_DBG_INFO, + "Failed to send signal for chan %d, ret : %d\n", + client->out_chan, + ret_val); goto error; } error_open: @@ -451,15 +478,20 @@ static int mhi_uci_tiocm_set(struct uci_client *client_ctxt, u32 set, u32 clear) client_ctxt->local_tiocm |= status_set; client_ctxt->local_tiocm &= ~status_clear; - uci_log(UCI_DBG_VERBOSE, + uci_log(client_ctxt->uci_ipc_log, + UCI_DBG_VERBOSE, "Old TIOCM0x%x for chan %d, Current TIOCM 0x%x\n", - old_status, client_ctxt->out_chan, client_ctxt->local_tiocm); + old_status, + client_ctxt->out_chan, + client_ctxt->local_tiocm); mutex_unlock(&client_ctxt->uci_ctxt->ctrl_mutex); if (client_ctxt->local_tiocm != old_status) { - uci_log(UCI_DBG_VERBOSE, + uci_log(client_ctxt->uci_ipc_log, + UCI_DBG_VERBOSE, "Setting TIOCM to 0x%x for chan %d\n", - client_ctxt->local_tiocm, client_ctxt->out_chan); + client_ctxt->local_tiocm, + client_ctxt->out_chan); return mhi_uci_send_status_cmd(client_ctxt); } return 0; @@ -474,26 +506,35 @@ static long mhi_uci_ctl_ioctl(struct file *file, unsigned int cmd, uci_handle = file->private_data; if (uci_handle == NULL) { - uci_log(UCI_DBG_VERBOSE, - "Invalid handle for client.\n"); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_VERBOSE, + "Invalid handle for client\n"); return -ENODEV; } - uci_log(UCI_DBG_VERBOSE, - "Attempting to dtr cmd 0x%x arg 0x%lx for chan %d\n", - cmd, arg, uci_handle->out_chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Attempting to dtr cmd 0x%x arg 0x%lx for chan %d\n", + cmd, + arg, + uci_handle->out_chan); switch (cmd) { case TIOCMGET: - uci_log(UCI_DBG_VERBOSE, - "Returning 0x%x mask\n", uci_handle->local_tiocm); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Returning 0x%x mask\n", + uci_handle->local_tiocm); ret_val = uci_handle->local_tiocm; break; case TIOCMSET: if (0 != copy_from_user(&set_val, (void *)arg, sizeof(set_val))) return -ENOMEM; - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Attempting to set cmd 0x%x arg 0x%x for chan %d\n", - cmd, set_val, uci_handle->out_chan); + cmd, + set_val, + uci_handle->out_chan); ret_val = mhi_uci_tiocm_set(uci_handle, set_val, ~set_val); break; default: @@ -506,28 +547,36 @@ static long mhi_uci_ctl_ioctl(struct file *file, unsigned int cmd, static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; - struct uci_client *uci_handle = NULL; - uci_handle = file->private_data; + struct uci_client *uci_handle = file->private_data; + struct mhi_uci_ctxt_t *uci_ctxt; if (uci_handle == NULL) return -ENODEV; + + uci_ctxt = uci_handle->uci_ctxt; poll_wait(file, &uci_handle->read_wq, wait); poll_wait(file, &uci_handle->write_wq, wait); if (atomic_read(&uci_handle->avail_pkts) > 0) { - uci_log(UCI_DBG_VERBOSE, - "Client can read chan %d\n", uci_handle->in_chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Client can read chan %d\n", + uci_handle->in_chan); mask |= POLLIN | POLLRDNORM; } - if (!atomic_read(&uci_ctxt.mhi_disabled) && + if (!atomic_read(&uci_ctxt->mhi_disabled) && (mhi_get_free_desc(uci_handle->out_handle) > 0)) { - uci_log(UCI_DBG_VERBOSE, - "Client can write chan %d\n", uci_handle->out_chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Client can write chan %d\n", + uci_handle->out_chan); mask |= POLLOUT | POLLWRNORM; } - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Client attempted to poll chan %d, returning mask 0x%x\n", - uci_handle->in_chan, mask); + uci_handle->in_chan, + mask); return mask; } @@ -535,10 +584,12 @@ static int open_client_mhi_channels(struct uci_client *uci_client) { int ret_val = 0; int r = 0; - uci_log(UCI_DBG_INFO, - "Starting channels %d %d.\n", - uci_client->out_chan, - uci_client->in_chan); + + uci_log(uci_client->uci_ipc_log, + UCI_DBG_INFO, + "Starting channels %d %d\n", + uci_client->out_chan, + uci_client->in_chan); mutex_lock(&uci_client->out_chan_lock); mutex_lock(&uci_client->in_chan_lock); ret_val = mhi_open_channel(uci_client->out_handle); @@ -553,21 +604,26 @@ static int open_client_mhi_channels(struct uci_client *uci_client) ret_val = mhi_open_channel(uci_client->in_handle); if (ret_val != 0) { - uci_log(UCI_DBG_ERROR, - "Failed to open chan %d, ret 0x%x\n", - uci_client->out_chan, ret_val); + uci_log(uci_client->uci_ipc_log, + UCI_DBG_ERROR, + "Failed to open chan %d, ret 0x%x\n", + uci_client->out_chan, + ret_val); goto handle_in_err; } - uci_log(UCI_DBG_INFO, - "Initializing inbound chan %d.\n", - uci_client->in_chan); + uci_log(uci_client->uci_ipc_log, + UCI_DBG_INFO, + "Initializing inbound chan %d\n", + uci_client->in_chan); uci_client->in_chan_state = 1; - ret_val = mhi_init_inbound(uci_client, uci_client->in_chan); + ret_val = mhi_init_inbound(uci_client); if (0 != ret_val) { - uci_log(UCI_DBG_ERROR, - "Failed to init inbound 0x%x, ret 0x%x\n", - uci_client->in_chan, ret_val); + uci_log(uci_client->uci_ipc_log, + UCI_DBG_ERROR, + "Failed to init inbound 0x%x, ret 0x%x\n", + uci_client->in_chan, + ret_val); } mutex_unlock(&uci_client->in_chan_lock); @@ -583,37 +639,54 @@ handle_not_rdy_err: return r; } -static int mhi_uci_client_open(struct inode *mhi_inode, +static int mhi_uci_client_open(struct inode *inode, struct file *file_handle) { struct uci_client *uci_handle = NULL; + struct mhi_uci_ctxt_t *uci_ctxt = NULL, *itr; int r = 0; - int client_id = iminor(mhi_inode); - uci_handle = &uci_ctxt.client_handles[client_id]; - - if (NULL == uci_handle) + int client_id = iminor(inode); + int major = imajor(inode); + + /* Find the uci ctxt from major */ + mutex_lock(&mhi_uci_drv_ctxt.list_lock); + list_for_each_entry(itr, &mhi_uci_drv_ctxt.head, node) { + if (MAJOR(itr->dev_t) == major) { + uci_ctxt = itr; + break; + } + } + mutex_unlock(&mhi_uci_drv_ctxt.list_lock); + if (!uci_ctxt || client_id >= MHI_SOFTWARE_CLIENT_LIMIT) return -EINVAL; + uci_handle = &uci_ctxt->client_handles[client_id]; if (atomic_read(&uci_handle->mhi_disabled)) { - uci_log(UCI_DBG_INFO, - "MHI is still recovering from SSR, client %d\n", + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_INFO, + "MHI channel still disable for, client %d\n", client_id); msleep(500); return -EAGAIN; } - uci_log(UCI_DBG_INFO, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_INFO, "Client opened device node 0x%x, ref count 0x%x\n", - iminor(mhi_inode), atomic_read(&uci_handle->ref_count)); + client_id, + atomic_read(&uci_handle->ref_count)); if (1 == atomic_add_return(1, &uci_handle->ref_count)) { - uci_handle->uci_ctxt = &uci_ctxt; - uci_log(UCI_DBG_INFO, - "Opening channels client %d\n", client_id); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_INFO, + "Opening channels client %d\n", + client_id); r = open_client_mhi_channels(uci_handle); if (r) - uci_log(UCI_DBG_INFO, - "Failed to open channels ret %d\n", r); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_INFO, + "Failed to open channels ret %d\n", + r); } file_handle->private_data = uci_handle; return r; @@ -623,7 +696,6 @@ static int mhi_uci_client_release(struct inode *mhi_inode, struct file *file_handle) { struct uci_client *uci_handle = file_handle->private_data; - struct mhi_uci_ctxt_t *uci_ctxt; u32 nr_in_bufs = 0; int in_chan = 0; int i = 0; @@ -632,21 +704,22 @@ static int mhi_uci_client_release(struct inode *mhi_inode, if (uci_handle == NULL) return -EINVAL; - uci_ctxt = uci_handle->uci_ctxt; - in_chan = iminor(mhi_inode) + 1; - nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs; - buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size; + in_chan = uci_handle->in_attr.chan_id; + nr_in_bufs = uci_handle->in_attr.nr_trbs; + buf_size = uci_handle->in_attr.max_packet_size; if (atomic_sub_return(1, &uci_handle->ref_count) == 0) { - uci_log(UCI_DBG_ERROR, - "Last client left, closing channel 0x%x\n", - iminor(mhi_inode)); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, + "Last client left, closing channel 0x%x\n", + in_chan); if (atomic_read(&uci_handle->out_pkt_pend_ack)) - uci_log(UCI_DBG_CRITICAL, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_CRITICAL, "Still waiting on %d acks!, chan %d\n", atomic_read(&uci_handle->out_pkt_pend_ack), - iminor(mhi_inode)); + uci_handle->out_attr.chan_id); mhi_close_channel(uci_handle->out_handle); mhi_close_channel(uci_handle->in_handle); @@ -659,7 +732,8 @@ static int mhi_uci_client_release(struct inode *mhi_inode, kfree(uci_handle->in_buf_list); atomic_set(&uci_handle->avail_pkts, 0); } else { - uci_log(UCI_DBG_ERROR, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, "Client close chan %d, ref count 0x%x\n", iminor(mhi_inode), atomic_read(&uci_handle->ref_count)); @@ -689,17 +763,23 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, mutex = &uci_handle->in_chan_lock; chan = uci_handle->in_chan; mutex_lock(mutex); - buf_size = uci_ctxt.chan_attrib[chan].max_packet_size; + buf_size = uci_handle->in_attr.max_packet_size; + result.buf_addr = NULL; - uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n", chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Client attempted read on chan %d\n", + chan); do { if (!uci_handle->pkt_loc && - !atomic_read(&uci_ctxt.mhi_disabled)) { + !atomic_read(&uci_handle->uci_ctxt->mhi_disabled)) { ret_val = mhi_poll_inbound(client_handle, &result); if (ret_val) { - uci_log(UCI_DBG_ERROR, - "Failed to poll inbound ret %d avail pkt %d\n", - ret_val, atomic_read(&uci_handle->avail_pkts)); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, + "Failed to poll inbound ret %d avail pkt %d\n", + ret_val, + atomic_read(&uci_handle->avail_pkts)); } if (result.buf_addr) uci_handle->pkt_loc = result.buf_addr; @@ -707,23 +787,27 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, uci_handle->pkt_loc = 0; uci_handle->pkt_size = result.bytes_xferd; *bytes_pending = uci_handle->pkt_size; - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Got pkt size 0x%zx at addr 0x%lx, chan %d\n", uci_handle->pkt_size, - (uintptr_t)result.buf_addr, chan); + (uintptr_t)result.buf_addr, + chan); } if ((*bytes_pending == 0 || uci_handle->pkt_loc == 0) && (atomic_read(&uci_handle->avail_pkts) <= 0)) { /* If nothing was copied yet, wait for data */ - uci_log(UCI_DBG_VERBOSE, - "No data avail_pkts %d, chan %d\n", - atomic_read(&uci_handle->avail_pkts), - chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "No data avail_pkts %d, chan %d\n", + atomic_read(&uci_handle->avail_pkts), + chan); ret_val = wait_event_interruptible( uci_handle->read_wq, (atomic_read(&uci_handle->avail_pkts) > 0)); if (ret_val == -ERESTARTSYS) { - uci_log(UCI_DBG_ERROR, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, "Exit signal caught\n"); goto error; } @@ -732,7 +816,8 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, 0 == uci_handle->pkt_size && 0 == uci_handle->pkt_loc && uci_handle->mhi_status == -ENETRESET) { - uci_log(UCI_DBG_ERROR, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, "Detected pending reset, reporting chan %d\n", chan); atomic_dec(&uci_handle->avail_pkts); @@ -743,23 +828,24 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, } else if (atomic_read(&uci_handle->avail_pkts) && uci_handle->pkt_size != 0 && uci_handle->pkt_loc != 0) { - uci_log(UCI_DBG_VERBOSE, - "Got packet: avail pkts %d phy_adr 0x%p, chan %d\n", - atomic_read(&uci_handle->avail_pkts), - result.buf_addr, - chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Got packet: avail pkts %d phy_adr 0x%p, chan %d\n", + atomic_read(&uci_handle->avail_pkts), + result.buf_addr, + chan); break; /* * MHI did not return a valid packet, but we have one * which we did not finish returning to user */ } else { - uci_log(UCI_DBG_CRITICAL, - "chan %d err: avail pkts %d phy_adr 0x%p mhi_stat%d\n", - chan, - atomic_read(&uci_handle->avail_pkts), - result.buf_addr, - uci_handle->mhi_status); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_CRITICAL, + "chan %d err: avail pkts %d mhi_stat%d\n", + chan, + atomic_read(&uci_handle->avail_pkts), + uci_handle->mhi_status); return -EIO; } } while (!uci_handle->pkt_loc); @@ -775,9 +861,12 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, bytes_copied = *bytes_pending; *bytes_pending = 0; - uci_log(UCI_DBG_VERBOSE, - "Copied 0x%zx of 0x%x, chan %d\n", - bytes_copied, (u32)*bytes_pending, chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Copied 0x%zx of 0x%x, chan %d\n", + bytes_copied, + (u32)*bytes_pending, + chan); } else { addr_offset = uci_handle->pkt_size - *bytes_pending; if (copy_to_user(buf, @@ -789,39 +878,50 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, } bytes_copied = uspace_buf_size; *bytes_pending -= uspace_buf_size; - uci_log(UCI_DBG_VERBOSE, - "Copied 0x%zx of 0x%x,chan %d\n", - bytes_copied, - (u32)*bytes_pending, - chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Copied 0x%zx of 0x%x,chan %d\n", + bytes_copied, + (u32)*bytes_pending, + chan); } /* We finished with this buffer, map it back */ if (*bytes_pending == 0) { - uci_log(UCI_DBG_VERBOSE, "Pkt loc %p ,chan %d\n", - uci_handle->pkt_loc, chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Pkt loc %p ,chan %d\n", + uci_handle->pkt_loc, + chan); memset(uci_handle->pkt_loc, 0, buf_size); atomic_dec(&uci_handle->avail_pkts); - uci_log(UCI_DBG_VERBOSE, - "Decremented avail pkts avail 0x%x\n", - atomic_read(&uci_handle->avail_pkts)); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Decremented avail pkts avail 0x%x\n", + atomic_read(&uci_handle->avail_pkts)); ret_val = mhi_queue_xfer(client_handle, uci_handle->pkt_loc, buf_size, MHI_EOT); if (0 != ret_val) { - uci_log(UCI_DBG_ERROR, - "Failed to recycle element\n"); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, + "Failed to recycle element\n"); ret_val = -EIO; goto error; } uci_handle->pkt_loc = 0; } - uci_log(UCI_DBG_VERBOSE, - "Returning 0x%zx bytes, 0x%x bytes left\n", - bytes_copied, (u32)*bytes_pending); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "Returning 0x%zx bytes, 0x%x bytes left\n", + bytes_copied, + (u32)*bytes_pending); mutex_unlock(mutex); return bytes_copied; error: mutex_unlock(mutex); - uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_ERROR, + "Returning %d\n", + ret_val); return ret_val; } @@ -846,9 +946,10 @@ static ssize_t mhi_uci_client_write(struct file *file, ret_val = mhi_uci_send_packet(&uci_handle->out_handle, (void *)buf, count, 1); if (!ret_val) { - uci_log(UCI_DBG_VERBOSE, - "No descriptors available, did we poll, chan %d?\n", - chan); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, + "No descriptors available, did we poll, chan %d?\n", + chan); mutex_unlock(&uci_handle->out_chan_lock); ret_val = wait_event_interruptible( @@ -857,8 +958,9 @@ static ssize_t mhi_uci_client_write(struct file *file, mutex_lock(&uci_handle->out_chan_lock); if (-ERESTARTSYS == ret_val) { goto sys_interrupt; - uci_log(UCI_DBG_WARNING, - "Waitqueue cancelled by system\n"); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_WARNING, + "Waitqueue cancelled by system\n"); } } } @@ -867,60 +969,79 @@ sys_interrupt: return ret_val; } -static int uci_init_client_attributes(struct mhi_uci_ctxt_t - *uci_ctxt) +static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt, + struct device_node *of_node) { - u32 i = 0; - struct chan_attr *chan_attrib = NULL; - size_t max_packet_size = TRE_TYPICAL_SIZE; - - for (i = 0; i < MHI_MAX_SOFTWARE_CHANNELS; ++i) { - max_packet_size = TRE_TYPICAL_SIZE; - chan_attrib = &uci_ctxt->chan_attrib[i]; - switch (i) { - case MHI_CLIENT_SAHARA_IN: - max_packet_size = TRE_MAX_SIZE; - case MHI_CLIENT_SAHARA_OUT: - case MHI_CLIENT_LOOPBACK_OUT: - case MHI_CLIENT_LOOPBACK_IN: - case MHI_CLIENT_EFS_OUT: - case MHI_CLIENT_EFS_IN: - case MHI_CLIENT_QMI_OUT: - case MHI_CLIENT_QMI_IN: - case MHI_CLIENT_IP_CTRL_0_OUT: - case MHI_CLIENT_IP_CTRL_0_IN: - case MHI_CLIENT_IP_CTRL_1_OUT: - case MHI_CLIENT_IP_CTRL_1_IN: - case MHI_CLIENT_BL_OUT: - case MHI_CLIENT_BL_IN: - case MHI_CLIENT_DUN_OUT: - case MHI_CLIENT_DUN_IN: - case MHI_CLIENT_TF_OUT: - case MHI_CLIENT_TF_IN: + int num_rows, ret_val = 0; + int i, dir; + u32 ctrl_chan = -1; + u32 *chan_info, *itr; + const char *prop_name = "qcom,mhi-uci-channels"; + + ret_val = of_property_read_u32(of_node, "qcom,mhi-uci-ctrlchan", + &ctrl_chan); + if (ret_val) { + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_INFO, + "Could not find property 'qcom,mhi-uci-ctrlchan'\n"); + } + + num_rows = of_property_count_elems_of_size(of_node, prop_name, + sizeof(u32) * 4); + /* At least one pair of channels should exist */ + if (num_rows < 1) { + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_CRITICAL, + "Missing or invalid property 'qcom,mhi-uci-channels'\n"); + return -ENODEV; + } + + if (num_rows > MHI_SOFTWARE_CLIENT_LIMIT) + num_rows = MHI_SOFTWARE_CLIENT_LIMIT; + + chan_info = kmalloc_array(num_rows, 4 * sizeof(*chan_info), GFP_KERNEL); + if (!chan_info) + return -ENOMEM; + + ret_val = of_property_read_u32_array(of_node, prop_name, chan_info, + num_rows * 4); + if (ret_val) + goto error_dts; + + for (i = 0, itr = chan_info; i < num_rows; i++) { + struct uci_client *client = &uci_ctxt->client_handles[i]; + struct chan_attr *chan_attrib; + + for (dir = 0; dir < 2; dir++) { + chan_attrib = (dir) ? + &client->in_attr : &client->out_attr; chan_attrib->uci_ownership = 1; - break; - default: - chan_attrib->uci_ownership = 0; - break; - } - if (chan_attrib->uci_ownership) { - chan_attrib->chan_id = i; - chan_attrib->max_packet_size = max_packet_size; + chan_attrib->chan_id = *itr++; + chan_attrib->max_packet_size = *itr++; + if (dir == 0) + chan_attrib->dir = MHI_DIR_OUT; + else + chan_attrib->dir = MHI_DIR_IN; + + if (chan_attrib->chan_id == ctrl_chan) + uci_ctxt->ctrl_client = client; } - if (i % 2 == 0) - chan_attrib->dir = MHI_DIR_OUT; - else - chan_attrib->dir = MHI_DIR_IN; } - return 0; + +error_dts: + kfree(chan_info); + return ret_val; } static int process_mhi_disabled_notif_sync(struct uci_client *uci_handle) { - uci_log(UCI_DBG_INFO, "Entered.\n"); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_INFO, + "Entered.\n"); if (uci_handle->mhi_status != -ENETRESET) { - uci_log(UCI_DBG_CRITICAL, - "Setting reset for chan %d.\n", + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_CRITICAL, + "Setting reset for chan %d\n", uci_handle->out_chan); uci_handle->pkt_size = 0; uci_handle->pkt_loc = NULL; @@ -932,39 +1053,53 @@ static int process_mhi_disabled_notif_sync(struct uci_client *uci_handle) uci_handle->in_chan_state = 0; wake_up(&uci_handle->read_wq); } else { - uci_log(UCI_DBG_CRITICAL, - "Chan %d state already reset.\n", + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_CRITICAL, + "Chan %d state already reset\n", uci_handle->out_chan); } - uci_log(UCI_DBG_INFO, "Exited.\n"); + uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO, "Exited\n"); return 0; } -static void process_rs232_state(struct mhi_result *result) +static void process_rs232_state(struct uci_client *ctrl_client, + struct mhi_result *result) { struct rs232_ctrl_msg *rs232_pkt; - struct uci_client *client; + struct uci_client *client = NULL; + struct mhi_uci_ctxt_t *uci_ctxt = ctrl_client->uci_ctxt; u32 msg_id; - int ret_val; + int ret_val, i; u32 chan; - mutex_lock(&uci_ctxt.ctrl_mutex); + mutex_lock(&uci_ctxt->ctrl_mutex); if (result->transaction_status != 0) { - uci_log(UCI_DBG_ERROR, + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_ERROR, "Non successful transfer code 0x%x\n", - result->transaction_status); + result->transaction_status); goto error_bad_xfer; } if (result->bytes_xferd != sizeof(struct rs232_ctrl_msg)) { - uci_log(UCI_DBG_ERROR, - "Buffer is of wrong size is: 0x%zx: expected 0x%zx\n", - result->bytes_xferd, sizeof(struct rs232_ctrl_msg)); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_ERROR, + "Buffer is of wrong size is: 0x%zx: expected 0x%zx\n", + result->bytes_xferd, + sizeof(struct rs232_ctrl_msg)); goto error_size; } rs232_pkt = result->buf_addr; MHI_GET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, chan); - client = &uci_ctxt.client_handles[chan / 2]; + for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) + if (chan == uci_ctxt->client_handles[i].out_chan || + chan == uci_ctxt->client_handles[i].in_chan) { + client = &uci_ctxt->client_handles[i]; + break; + } + /* No valid channel found */ + if (!client) + goto error_bad_xfer; MHI_GET_CTRL_MSG_ID(CTRL_MSG_ID, rs232_pkt, msg_id); client->local_tiocm = 0; @@ -982,35 +1117,38 @@ static void process_rs232_state(struct mhi_result *result) error_bad_xfer: error_size: memset(rs232_pkt, 0, sizeof(struct rs232_ctrl_msg)); - ret_val = mhi_queue_xfer(client->in_handle, - result->buf_addr, - result->bytes_xferd, - result->flags); + ret_val = mhi_queue_xfer(ctrl_client->in_handle, + result->buf_addr, + result->bytes_xferd, + result->flags); if (0 != ret_val) { - uci_log(UCI_DBG_ERROR, - "Failed to recycle ctrl msg buffer\n"); + uci_log(ctrl_client->uci_ipc_log, + UCI_DBG_ERROR, + "Failed to recycle ctrl msg buffer\n"); } - mutex_unlock(&uci_ctxt.ctrl_mutex); + mutex_unlock(&uci_ctxt->ctrl_mutex); } static void parse_inbound_ack(struct uci_client *uci_handle, struct mhi_result *result) { atomic_inc(&uci_handle->avail_pkts); - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Received cb on chan %d, avail pkts: 0x%x\n", uci_handle->in_chan, atomic_read(&uci_handle->avail_pkts)); wake_up(&uci_handle->read_wq); - if (uci_handle->in_chan == MHI_CLIENT_IP_CTRL_1_IN) - process_rs232_state(result); + if (uci_handle == uci_handle->uci_ctxt->ctrl_client) + process_rs232_state(uci_handle, result); } static void parse_outbound_ack(struct uci_client *uci_handle, struct mhi_result *result) { kfree(result->buf_addr); - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Received ack on chan %d, pending acks: 0x%x\n", uci_handle->out_chan, atomic_read(&uci_handle->out_pkt_pend_ack)); @@ -1021,88 +1159,96 @@ static void parse_outbound_ack(struct uci_client *uci_handle, static void uci_xfer_cb(struct mhi_cb_info *cb_info) { - int chan_nr; struct uci_client *uci_handle = NULL; - u32 client_index; struct mhi_result *result; if (!cb_info || !cb_info->result) { - uci_log(UCI_DBG_CRITICAL, "Bad CB info from MHI.\n"); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_CRITICAL, + "Bad CB info from MHI\n"); return; } - chan_nr = (uintptr_t)cb_info->result->user_data; - client_index = CHAN_TO_CLIENT(chan_nr); - uci_handle = &uci_ctxt.client_handles[client_index]; - + uci_handle = cb_info->result->user_data; switch (cb_info->cb_reason) { case MHI_CB_MHI_ENABLED: atomic_set(&uci_handle->mhi_disabled, 0); break; case MHI_CB_MHI_DISABLED: atomic_set(&uci_handle->mhi_disabled, 1); - uci_log(UCI_DBG_INFO, "MHI disabled CB received.\n"); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_INFO, + "MHI disabled CB received\n"); process_mhi_disabled_notif_sync(uci_handle); break; case MHI_CB_XFER: if (!cb_info->result) { - uci_log(UCI_DBG_CRITICAL, - "Failed to obtain mhi result from CB.\n"); + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_CRITICAL, + "Failed to obtain mhi result from CB\n"); return; } result = cb_info->result; - chan_nr = (uintptr_t)result->user_data; - client_index = chan_nr / 2; - uci_handle = - &uci_ctxt.client_handles[client_index]; - if (chan_nr % 2) + if (cb_info->chan % 2) parse_inbound_ack(uci_handle, result); else parse_outbound_ack(uci_handle, result); break; default: - uci_log(UCI_DBG_VERBOSE, + uci_log(uci_handle->uci_ipc_log, + UCI_DBG_VERBOSE, "Cannot handle cb reason 0x%x\n", cb_info->cb_reason); } } -static int mhi_register_client(struct uci_client *mhi_client, int index) +static int mhi_register_client(struct uci_client *mhi_client) { int ret_val = 0; - uci_log(UCI_DBG_INFO, "Setting up workqueues.\n"); + uci_log(mhi_client->uci_ipc_log, + UCI_DBG_INFO, + "Setting up workqueues\n"); init_waitqueue_head(&mhi_client->read_wq); init_waitqueue_head(&mhi_client->write_wq); - mhi_client->out_chan = index * 2; - mhi_client->in_chan = index * 2 + 1; - mhi_client->client_index = index; + mhi_client->out_chan = mhi_client->out_attr.chan_id; + mhi_client->in_chan = mhi_client->in_attr.chan_id; mutex_init(&mhi_client->in_chan_lock); mutex_init(&mhi_client->out_chan_lock); atomic_set(&mhi_client->mhi_disabled, 1); - uci_log(UCI_DBG_INFO, "Registering chan %d.\n", mhi_client->out_chan); + uci_log(mhi_client->uci_ipc_log, + UCI_DBG_INFO, + "Registering chan %d\n", + mhi_client->out_chan); ret_val = mhi_register_channel(&mhi_client->out_handle, mhi_client->out_chan, 0, - &uci_ctxt.client_info, - (void *)(uintptr_t)(mhi_client->out_chan)); + &mhi_client->uci_ctxt->client_info, + mhi_client); if (0 != ret_val) - uci_log(UCI_DBG_ERROR, + uci_log(mhi_client->uci_ipc_log, + UCI_DBG_ERROR, "Failed to init outbound chan 0x%x, ret 0x%x\n", - mhi_client->out_chan, ret_val); + mhi_client->out_chan, + ret_val); - uci_log(UCI_DBG_INFO, "Registering chan %d.\n", mhi_client->in_chan); + uci_log(mhi_client->uci_ipc_log, + UCI_DBG_INFO, + "Registering chan %d\n", + mhi_client->in_chan); ret_val = mhi_register_channel(&mhi_client->in_handle, mhi_client->in_chan, 0, - &uci_ctxt.client_info, - (void *)(uintptr_t)(mhi_client->in_chan)); + &mhi_client->uci_ctxt->client_info, + mhi_client); if (0 != ret_val) - uci_log(UCI_DBG_ERROR, + uci_log(mhi_client->uci_ipc_log, + UCI_DBG_ERROR, "Failed to init inbound chan 0x%x, ret 0x%x\n", - mhi_client->in_chan, ret_val); + mhi_client->in_chan, + ret_val); return 0; } @@ -1115,119 +1261,215 @@ static const struct file_operations mhi_uci_client_fops = { .unlocked_ioctl = mhi_uci_ctl_ioctl, }; -static int mhi_uci_init(void) +static int mhi_uci_probe(struct platform_device *pdev) { - u32 i = 0; - int ret_val = 0; - struct uci_client *mhi_client = NULL; - s32 r = 0; - mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES, - "mhi-uci", 0); - if (mhi_uci_ipc_log == NULL) { - uci_log(UCI_DBG_WARNING, - "Failed to create IPC logging context\n"); - } - uci_log(UCI_DBG_INFO, "Setting up work queues.\n"); - uci_ctxt.client_info.mhi_client_cb = uci_xfer_cb; + struct mhi_uci_ctxt_t *uci_ctxt; + int ret_val; + int i; + char node_name[16]; + + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_INFO, + "Entered with pdev:%p\n", + pdev); + + if (pdev->dev.of_node == NULL) + return -ENODEV; + + pdev->id = of_alias_get_id(pdev->dev.of_node, "mhi_uci"); + if (pdev->id < 0) + return -ENODEV; + + uci_ctxt = devm_kzalloc(&pdev->dev, + sizeof(*uci_ctxt), + GFP_KERNEL); + if (!uci_ctxt) + return -ENOMEM; - mutex_init(&uci_ctxt.ctrl_mutex); + uci_ctxt->client_info.mhi_client_cb = uci_xfer_cb; + mutex_init(&uci_ctxt->ctrl_mutex); - uci_log(UCI_DBG_INFO, "Setting up channel attributes.\n"); - ret_val = uci_init_client_attributes(&uci_ctxt); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_INFO, + "Setting up channel attributes\n"); + ret_val = uci_init_client_attributes(uci_ctxt, + pdev->dev.of_node); if (ret_val) { - uci_log(UCI_DBG_ERROR, - "Failed to init client attributes\n"); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_ERROR, + "Failed to init client attributes\n"); return -EIO; } - uci_ctxt.ctrl_chan_id = MHI_CLIENT_IP_CTRL_1_OUT; - uci_log(UCI_DBG_INFO, "Registering for MHI events.\n"); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_INFO, + "Registering for MHI events\n"); for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; ++i) { - if (uci_ctxt.chan_attrib[i * 2].uci_ownership) { - mhi_client = &uci_ctxt.client_handles[i]; - r = mhi_register_client(mhi_client, i); - if (r) { - uci_log(UCI_DBG_CRITICAL, + struct uci_client *uci_client = &uci_ctxt->client_handles[i]; + + uci_client->uci_ctxt = uci_ctxt; + if (uci_client->in_attr.uci_ownership) { + ret_val = mhi_register_client(uci_client); + if (ret_val) { + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_CRITICAL, "Failed to reg client %d ret %d\n", - r, i); + ret_val, + i); + + return -EIO; } + snprintf(node_name, sizeof(node_name), "mhi-uci%d", + uci_client->out_attr.chan_id); + uci_client->uci_ipc_log = ipc_log_context_create + (MHI_UCI_IPC_LOG_PAGES, + node_name, + 0); } } - uci_log(UCI_DBG_INFO, "Allocating char devices.\n"); - r = alloc_chrdev_region(&uci_ctxt.start_ctrl_nr, - 0, MHI_MAX_SOFTWARE_CHANNELS, - DEVICE_NAME); - - if (IS_ERR_VALUE(r)) { - uci_log(UCI_DBG_ERROR, - "Failed to alloc char devs, ret 0x%x\n", r); + + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_INFO, + "Allocating char devices\n"); + ret_val = alloc_chrdev_region(&uci_ctxt->dev_t, + 0, + MHI_SOFTWARE_CLIENT_LIMIT, + DEVICE_NAME); + if (IS_ERR_VALUE(ret_val)) { + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_ERROR, + "Failed to alloc char devs, ret 0x%x\n", ret_val); goto failed_char_alloc; } - uci_log(UCI_DBG_INFO, "Creating class\n"); - uci_ctxt.mhi_uci_class = class_create(THIS_MODULE, - DEVICE_NAME); - if (IS_ERR(uci_ctxt.mhi_uci_class)) { - uci_log(UCI_DBG_ERROR, - "Failed to instantiate class, ret 0x%x\n", r); - r = -ENOMEM; - goto failed_class_add; - } - uci_log(UCI_DBG_INFO, "Setting up device nodes.\n"); + uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log, + UCI_DBG_INFO, + "Setting up device nodes. for dev_t: 0x%x major:0x%x\n", + uci_ctxt->dev_t, + MAJOR(uci_ctxt->dev_t)); for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; ++i) { - if (uci_ctxt.chan_attrib[i*2].uci_ownership) { - cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops); - uci_ctxt.cdev[i].owner = THIS_MODULE; - r = cdev_add(&uci_ctxt.cdev[i], - uci_ctxt.start_ctrl_nr + i , 1); - if (IS_ERR_VALUE(r)) { - uci_log(UCI_DBG_ERROR, + struct uci_client *uci_client = &uci_ctxt->client_handles[i]; + + if (uci_client->in_attr.uci_ownership) { + cdev_init(&uci_ctxt->cdev[i], &mhi_uci_client_fops); + uci_ctxt->cdev[i].owner = THIS_MODULE; + ret_val = cdev_add(&uci_ctxt->cdev[i], + uci_ctxt->dev_t + i, 1); + if (IS_ERR_VALUE(ret_val)) { + uci_log(uci_client->uci_ipc_log, + UCI_DBG_ERROR, "Failed to add cdev %d, ret 0x%x\n", - i, r); + i, ret_val); goto failed_char_add; } - uci_ctxt.client_handles[i].dev = - device_create(uci_ctxt.mhi_uci_class, NULL, - uci_ctxt.start_ctrl_nr + i, - NULL, DEVICE_NAME "_pipe_%d", - i * 2); - - if (IS_ERR(uci_ctxt.client_handles[i].dev)) { - uci_log(UCI_DBG_ERROR, - "Failed to add cdev %d\n", i); - cdev_del(&uci_ctxt.cdev[i]); + uci_client->dev = + device_create(mhi_uci_drv_ctxt.mhi_uci_class, + NULL, + uci_ctxt->dev_t + i, + NULL, + DEVICE_NAME "_pipe_%d", + uci_client->out_chan); + if (IS_ERR(uci_client->dev)) { + uci_log(uci_client->uci_ipc_log, + UCI_DBG_ERROR, + "Failed to add cdev %d\n", i); + cdev_del(&uci_ctxt->cdev[i]); + ret_val = -EIO; goto failed_device_create; } } } + platform_set_drvdata(pdev, uci_ctxt); + mutex_lock(&mhi_uci_drv_ctxt.list_lock); + list_add_tail(&uci_ctxt->node, &mhi_uci_drv_ctxt.head); + mutex_unlock(&mhi_uci_drv_ctxt.list_lock); return 0; failed_char_add: failed_device_create: while (--i >= 0) { - cdev_del(&uci_ctxt.cdev[i]); - device_destroy(uci_ctxt.mhi_uci_class, - MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2)); + cdev_del(&uci_ctxt->cdev[i]); + device_destroy(mhi_uci_drv_ctxt.mhi_uci_class, + MKDEV(MAJOR(uci_ctxt->dev_t), i)); }; - class_destroy(uci_ctxt.mhi_uci_class); -failed_class_add: - unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr), - MHI_MAX_SOFTWARE_CHANNELS); + + unregister_chrdev_region(MAJOR(uci_ctxt->dev_t), + MHI_SOFTWARE_CLIENT_LIMIT); failed_char_alloc: - return r; -} -static void __exit mhi_uci_exit(void) + return ret_val; +}; + +static int mhi_uci_remove(struct platform_device *pdev) { + struct mhi_uci_ctxt_t *uci_ctxt = platform_get_drvdata(pdev); int i; + for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; ++i) { - cdev_del(&uci_ctxt.cdev[i]); - device_destroy(uci_ctxt.mhi_uci_class, - MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2)); + struct uci_client *uci_client = &uci_ctxt->client_handles[i]; + + uci_client->uci_ctxt = uci_ctxt; + if (uci_client->in_attr.uci_ownership) { + mhi_deregister_channel(uci_client->out_handle); + mhi_deregister_channel(uci_client->in_handle); + cdev_del(&uci_ctxt->cdev[i]); + device_destroy(mhi_uci_drv_ctxt.mhi_uci_class, + MKDEV(MAJOR(uci_ctxt->dev_t), i)); + } } - class_destroy(uci_ctxt.mhi_uci_class); - unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr), - MHI_MAX_SOFTWARE_CHANNELS); + + unregister_chrdev_region(MAJOR(uci_ctxt->dev_t), + MHI_SOFTWARE_CLIENT_LIMIT); + + mutex_lock(&mhi_uci_drv_ctxt.list_lock); + list_del(&uci_ctxt->node); + mutex_unlock(&mhi_uci_drv_ctxt.list_lock); + return 0; +}; + +static const struct of_device_id mhi_uci_match_table[] = { + {.compatible = "qcom,mhi-uci"}, + {}, +}; + +static struct platform_driver mhi_uci_driver = { + .probe = mhi_uci_probe, + .remove = mhi_uci_remove, + .driver = { + .name = MHI_UCI_DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = mhi_uci_match_table, + }, +}; + +static int mhi_uci_init(void) +{ + mhi_uci_drv_ctxt.mhi_uci_ipc_log = + ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES, + "mhi-uci", + 0); + if (mhi_uci_drv_ctxt.mhi_uci_ipc_log == NULL) { + uci_log(NULL, + UCI_DBG_WARNING, + "Failed to create IPC logging context"); + } + + mhi_uci_drv_ctxt.mhi_uci_class = + class_create(THIS_MODULE, DEVICE_NAME); + + if (IS_ERR(mhi_uci_drv_ctxt.mhi_uci_class)) + return -ENODEV; + + mutex_init(&mhi_uci_drv_ctxt.list_lock); + INIT_LIST_HEAD(&mhi_uci_drv_ctxt.head); + + return platform_driver_register(&mhi_uci_driver); +} + +static void __exit mhi_uci_exit(void) +{ + class_destroy(mhi_uci_drv_ctxt.mhi_uci_class); + platform_driver_unregister(&mhi_uci_driver); } module_exit(mhi_uci_exit); diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index feac4576b837101c35cf7442ca07ee47496633fe..2df07ee8f3c33e9f5e92fd776554bd44e614e836 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -24,14 +24,15 @@ #include #include #include +#include MODULE_AUTHOR("Azael Avalos"); MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); MODULE_LICENSE("GPL"); -#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" +#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" -MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); +MODULE_ALIAS("wmi:"WMI_EVENT_GUID); static struct input_dev *toshiba_wmi_input_dev; @@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } +static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { + { + .ident = "Toshiba laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + }, + }, + {} +}; + static int __init toshiba_wmi_input_setup(void) { acpi_status status; @@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void) if (err) goto err_free_dev; - status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, + status = wmi_install_notify_handler(WMI_EVENT_GUID, toshiba_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; @@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void) return 0; err_remove_notifier: - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); + wmi_remove_notify_handler(WMI_EVENT_GUID); err_free_keymap: sparse_keymap_free(toshiba_wmi_input_dev); err_free_dev: @@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void) static void toshiba_wmi_input_destroy(void) { - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); + wmi_remove_notify_handler(WMI_EVENT_GUID); sparse_keymap_free(toshiba_wmi_input_dev); input_unregister_device(toshiba_wmi_input_dev); } @@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void) { int ret; - if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) + if (!wmi_has_guid(WMI_EVENT_GUID) || + !dmi_check_system(toshiba_wmi_dmi_table)) return -ENODEV; ret = toshiba_wmi_input_setup(); @@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void) static void __exit toshiba_wmi_exit(void) { - if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) + if (wmi_has_guid(WMI_EVENT_GUID)) toshiba_wmi_input_destroy(); } diff --git a/drivers/power/bq24257_charger.c b/drivers/power/bq24257_charger.c index 1fea2c7ef97feda56cfc71f9b1ab82e7d3d35351..6fc31bdc639bc54ac0fbd9008740d19912391330 100644 --- a/drivers/power/bq24257_charger.c +++ b/drivers/power/bq24257_charger.c @@ -1068,6 +1068,12 @@ static int bq24257_probe(struct i2c_client *client, return ret; } + ret = bq24257_power_supply_init(bq); + if (ret < 0) { + dev_err(dev, "Failed to register power supply\n"); + return ret; + } + ret = devm_request_threaded_irq(dev, client->irq, NULL, bq24257_irq_handler_thread, IRQF_TRIGGER_FALLING | @@ -1078,12 +1084,6 @@ static int bq24257_probe(struct i2c_client *client, return ret; } - ret = bq24257_power_supply_init(bq); - if (ret < 0) { - dev_err(dev, "Failed to register power supply\n"); - return ret; - } - ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group); if (ret < 0) { dev_err(dev, "Can't create sysfs entries\n"); diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c index a50bb988c69a0bbd49cbf455ee3304c066599bbb..f5c525e4482a18b533ad80c09b1e8a7da8be06a8 100644 --- a/drivers/power/goldfish_battery.c +++ b/drivers/power/goldfish_battery.c @@ -24,6 +24,7 @@ #include #include #include +#include struct goldfish_battery_data { void __iomem *reg_base; @@ -227,11 +228,25 @@ static int goldfish_battery_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_battery_of_match[] = { + { .compatible = "google,goldfish-battery", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_battery_of_match); + +static const struct acpi_device_id goldfish_battery_acpi_match[] = { + { "GFSH0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match); + static struct platform_driver goldfish_battery_device = { .probe = goldfish_battery_probe, .remove = goldfish_battery_remove, .driver = { - .name = "goldfish-battery" + .name = "goldfish-battery", + .of_match_table = goldfish_battery_of_match, + .acpi_match_table = ACPI_PTR(goldfish_battery_acpi_match), } }; module_platform_driver(goldfish_battery_device); diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 9c65f134d4474d843ffa907051191e2d8c484a24..da7a75f824891200f9db4c9dd4d274c5125d3dd8 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, } static inline void max17042_read_model_data(struct max17042_chip *chip, - u8 addr, u32 *data, int size) + u8 addr, u16 *data, int size) { struct regmap *map = chip->regmap; int i; + u32 tmp; - for (i = 0; i < size; i++) - regmap_read(map, addr + i, &data[i]); + for (i = 0; i < size; i++) { + regmap_read(map, addr + i, &tmp); + data[i] = (u16)tmp; + } } static inline int max17042_model_data_compare(struct max17042_chip *chip, @@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip) { int ret; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); if (!temp_data) @@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip) ret = max17042_model_data_compare( chip, chip->pdata->config_data->cell_char_tbl, - (u16 *)temp_data, + temp_data, table_size); max10742_lock_model(chip); @@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) { int i; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; int ret = 0; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); diff --git a/drivers/power/qcom/apm.c b/drivers/power/qcom/apm.c index 6181b08a41a5aa19fb3af8b275fadc0e62993f9b..b5ae7f4ade031e14c236137ced7d42192311eec4 100644 --- a/drivers/power/qcom/apm.c +++ b/drivers/power/qcom/apm.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * VDD_APCC @@ -79,7 +80,6 @@ #define MSM_APM_DRIVER_NAME "qcom,msm-apm" -asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); enum { CLOCK_ASSERT_ENABLE, diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c index 9ab7f562a83ba6538054e1e39a8b758489a4f8d7..f69387e12c1e545a3cedcb126247050401c8996a 100644 --- a/drivers/power/reset/hisi-reboot.c +++ b/drivers/power/reset/hisi-reboot.c @@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev) if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { pr_err("failed to find reboot-offset property\n"); + iounmap(base); return -EINVAL; } err = register_restart_handler(&hisi_restart_nb); - if (err) + if (err) { dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", err); + iounmap(base); + } return err; } diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig index b919c688e6278e486e73a56300a9d87dd65417ec..47b2017386729361bad31b0593dd3f9a8d69d029 100644 --- a/drivers/power/supply/qcom/Kconfig +++ b/drivers/power/supply/qcom/Kconfig @@ -1,23 +1,5 @@ menu "Qualcomm Technologies Inc Charger and Fuel Gauge support" -config QPNP_SMBCHARGER - tristate "QPNP SMB Charger driver" - depends on MFD_SPMI_PMIC - help - Say Y here to enable the dual path switch mode battery charger which - supports USB detection and battery charging up to 3A. - The driver also offers relevant information to userspace via the - power supply framework. - -config QPNP_FG - tristate "QPNP fuel gauge driver" - depends on MFD_SPMI_PMIC - help - Say Y here to enable the Fuel Gauge driver. This adds support for - battery fuel gauging and state of charge of battery connected to the - fuel gauge. The state of charge is reported through a BMS power - supply property and also sends uevents when the capacity is updated. - config QPNP_FG_GEN3 tristate "QPNP GEN3 fuel gauge driver" depends on MFD_SPMI_PMIC diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile index a3aff858c1901f201778f9f5260c4f9f83faf426..dfa83f2304b26f8ef38ae4fae19e02cfe2305253 100644 --- a/drivers/power/supply/qcom/Makefile +++ b/drivers/power/supply/qcom/Makefile @@ -1,5 +1,3 @@ -obj-$(CONFIG_QPNP_SMBCHARGER) += qpnp-smbcharger.o batterydata-lib.o pmic-voter.o -obj-$(CONFIG_QPNP_FG) += qpnp-fg.o obj-$(CONFIG_QPNP_FG_GEN3) += qpnp-fg-gen3.o fg-memif.o fg-util.o obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o pmic-voter.o obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c index 4973f91c8af122f10e0337a65c48e48dd2cc7d6d..5b320ca8e74eeb7afb4db0076a4c4c8418d43950 100644 --- a/drivers/power/supply/qcom/battery.c +++ b/drivers/power/supply/qcom/battery.c @@ -35,12 +35,15 @@ #define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER" #define PL_HW_ABSENT_VOTER "PL_HW_ABSENT_VOTER" #define PL_VOTER "PL_VOTER" +#define RESTRICT_CHG_VOTER "RESTRICT_CHG_VOTER" struct pl_data { int pl_mode; int slave_pct; int taper_pct; int slave_fcc_ua; + int restricted_current; + bool restricted_charging_enabled; struct votable *fcc_votable; struct votable *fv_votable; struct votable *pl_disable_votable; @@ -80,6 +83,8 @@ module_param_named(debug_mask, debug_mask, int, S_IRUSR | S_IWUSR); enum { VER = 0, SLAVE_PCT, + RESTRICT_CHG_ENABLE, + RESTRICT_CHG_CURRENT, }; /******* @@ -180,10 +185,78 @@ static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr, return count; } +/********************** +* RESTICTED CHARGIGNG * +***********************/ +static ssize_t restrict_chg_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct pl_data *chip = container_of(c, struct pl_data, + qcom_batt_class); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", + chip->restricted_charging_enabled); +} + +static ssize_t restrict_chg_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct pl_data *chip = container_of(c, struct pl_data, + qcom_batt_class); + unsigned long val; + + if (kstrtoul(ubuf, 10, &val)) + return -EINVAL; + + if (chip->restricted_charging_enabled == !!val) + goto no_change; + + chip->restricted_charging_enabled = !!val; + + vote(chip->fcc_votable, RESTRICT_CHG_VOTER, + chip->restricted_charging_enabled, + chip->restricted_current); + +no_change: + return count; +} + +static ssize_t restrict_cur_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct pl_data *chip = container_of(c, struct pl_data, + qcom_batt_class); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->restricted_current); +} + +static ssize_t restrict_cur_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct pl_data *chip = container_of(c, struct pl_data, + qcom_batt_class); + unsigned long val; + + if (kstrtoul(ubuf, 10, &val)) + return -EINVAL; + + chip->restricted_current = val; + + vote(chip->fcc_votable, RESTRICT_CHG_VOTER, + chip->restricted_charging_enabled, + chip->restricted_current); + + return count; +} + static struct class_attribute pl_attributes[] = { [VER] = __ATTR_RO(version), [SLAVE_PCT] = __ATTR(parallel_pct, S_IRUGO | S_IWUSR, slave_pct_show, slave_pct_store), + [RESTRICT_CHG_ENABLE] = __ATTR(restricted_charging, S_IRUGO | S_IWUSR, + restrict_chg_show, restrict_chg_store), + [RESTRICT_CHG_CURRENT] = __ATTR(restricted_current, S_IRUGO | S_IWUSR, + restrict_cur_show, restrict_cur_store), __ATTR_NULL, }; @@ -750,6 +823,7 @@ static int pl_determine_initial_status(struct pl_data *chip) return 0; } +#define DEFAULT_RESTRICTED_CURRENT_UA 1000000 static int pl_init(void) { struct pl_data *chip; @@ -759,6 +833,7 @@ static int pl_init(void) if (!chip) return -ENOMEM; chip->slave_pct = 50; + chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA; chip->pl_ws = wakeup_source_register("qcom-battery"); if (!chip->pl_ws) diff --git a/drivers/power/supply/qcom/battery_current_limit.c b/drivers/power/supply/qcom/battery_current_limit.c index 022c665ad60b2f8af2b377175625bee4203ed7fb..410e64321ba6a394d43e414b585ceb4d3c14486a 100644 --- a/drivers/power/supply/qcom/battery_current_limit.c +++ b/drivers/power/supply/qcom/battery_current_limit.c @@ -288,23 +288,13 @@ static void soc_mitigate(struct work_struct *work) update_cpu_freq(); } -static int power_supply_callback(struct notifier_block *nb, - unsigned long event, void *data) +static int get_and_evaluate_battery_soc(void) { - struct power_supply *psy = data; static struct power_supply *batt_psy; union power_supply_propval ret = {0,}; int battery_percentage; enum bcl_threshold_state prev_soc_state; - if (gbcl->bcl_mode != BCL_DEVICE_ENABLED) { - pr_debug("BCL is not enabled\n"); - return NOTIFY_OK; - } - - if (strcmp(psy->desc->name, "battery")) - return NOTIFY_OK; - if (!batt_psy) batt_psy = power_supply_get_by_name("battery"); if (batt_psy) { @@ -328,6 +318,22 @@ static int power_supply_callback(struct notifier_block *nb, return NOTIFY_OK; } +static int power_supply_callback(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct power_supply *psy = data; + + if (gbcl->bcl_mode != BCL_DEVICE_ENABLED) { + pr_debug("BCL is not enabled\n"); + return NOTIFY_OK; + } + + if (strcmp(psy->desc->name, "battery")) + return NOTIFY_OK; + + return get_and_evaluate_battery_soc(); +} + static int bcl_get_battery_voltage(int *vbatt_mv) { static struct power_supply *psy; @@ -646,7 +652,7 @@ static void bcl_periph_mode_set(enum bcl_device_mode mode) * power state changes. Make sure we read the current SoC * and mitigate. */ - power_supply_callback(&gbcl->psy_nb, 1, gbcl); + get_and_evaluate_battery_soc(); ret = power_supply_reg_notifier(&gbcl->psy_nb); if (ret < 0) { pr_err("Unable to register soc notifier rc = %d\n", diff --git a/drivers/power/supply/qcom/bcl_peripheral.c b/drivers/power/supply/qcom/bcl_peripheral.c index cae4967f1ef445f6a1185891056f1e08ac807d87..2d237f27b062c470cb408dc38703e025faa2f6f3 100644 --- a/drivers/power/supply/qcom/bcl_peripheral.c +++ b/drivers/power/supply/qcom/bcl_peripheral.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -482,8 +482,10 @@ static int bcl_access_monitor_enable(bool enable) if (enable == bcl_perph->enabled) goto access_exit; - if ((bcl_perph_version == BCL_PMI8998) && !hw_enabled && enable) + if ((bcl_perph_version == BCL_PMI8998) && !hw_enabled && enable) { bcl_lmh_dcvs_enable(); + hw_enabled = true; + } for (; i < BCL_PARAM_MAX; i++) { perph_data = &bcl_perph->param[i]; diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index c146654e438bf7a4d5b4fce631a5001968a42310..32a25b4e2c7ba9a9551e6897b921a0884295ffda 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -143,6 +143,7 @@ enum fg_sram_param_id { FG_SRAM_FULL_SOC, FG_SRAM_VOLTAGE_PRED, FG_SRAM_OCV, + FG_SRAM_ESR, FG_SRAM_RSLOW, FG_SRAM_ALG_FLAGS, FG_SRAM_CC_SOC, @@ -233,6 +234,7 @@ struct fg_dt_props { int esr_timer_awake; int esr_timer_asleep; int rconn_mohms; + int esr_clamp_mohms; int cl_start_soc; int cl_max_temp; int cl_min_temp; diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index d5c4f8ffaac375f3e3976927fd87184954cdfcd3..f2395b6ba4abc09b627c54d258dab152cd3483dd 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -358,7 +358,7 @@ int fg_write(struct fg_chip *chip, int addr, u8 *val, int len) return -ENXIO; mutex_lock(&chip->bus_lock); - sec_access = (addr & 0xFF00) > 0xD0; + sec_access = (addr & 0x00FF) > 0xD0; if (sec_access) { rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5); if (rc < 0) { @@ -398,7 +398,7 @@ int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val) return -ENXIO; mutex_lock(&chip->bus_lock); - sec_access = (addr & 0xFF00) > 0xD0; + sec_access = (addr & 0x00FF) > 0xD0; if (sec_access) { rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5); if (rc < 0) { diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 1fd092c550f336783ef8ccf9a4797c243b2082c9..39afc235fbc3c0794f97af75e824a83dc9220796 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -99,6 +99,8 @@ #define VOLTAGE_PRED_OFFSET 0 #define OCV_WORD 97 #define OCV_OFFSET 2 +#define ESR_WORD 99 +#define ESR_OFFSET 0 #define RSLOW_WORD 101 #define RSLOW_OFFSET 0 #define ACT_BATT_CAP_WORD 117 @@ -173,6 +175,8 @@ static struct fg_sram_param pmi8998_v1_sram_params[] = { 244141, 0, NULL, fg_decode_voltage_15b), PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_voltage_15b), + PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default, + fg_decode_value_16b), PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_value_16b), PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL, @@ -235,6 +239,8 @@ static struct fg_sram_param pmi8998_v2_sram_params[] = { 244141, 0, NULL, fg_decode_voltage_15b), PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_voltage_15b), + PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default, + fg_decode_value_16b), PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_value_16b), PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL, @@ -571,38 +577,11 @@ static int fg_get_battery_temp(struct fg_chip *chip, int *val) return 0; } -#define BATT_ESR_NUMR 244141 -#define BATT_ESR_DENR 1000 -static int fg_get_battery_esr(struct fg_chip *chip, int *val) -{ - int rc = 0; - u16 temp = 0; - u8 buf[2]; - - rc = fg_read(chip, BATT_INFO_ESR_LSB(chip), buf, 2); - if (rc < 0) { - pr_err("failed to read addr=0x%04x, rc=%d\n", - BATT_INFO_ESR_LSB(chip), rc); - return rc; - } - - if (chip->wa_flags & PMI8998_V1_REV_WA) - temp = ((buf[0] & ESR_MSB_MASK) << 8) | - (buf[1] & ESR_LSB_MASK); - else - temp = ((buf[1] & ESR_MSB_MASK) << 8) | - (buf[0] & ESR_LSB_MASK); - - pr_debug("buf: %x %x temp: %x\n", buf[0], buf[1], temp); - *val = div_u64((u64)temp * BATT_ESR_NUMR, BATT_ESR_DENR); - return 0; -} - static int fg_get_battery_resistance(struct fg_chip *chip, int *val) { int rc, esr_uohms, rslow_uohms; - rc = fg_get_battery_esr(chip, &esr_uohms); + rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms); if (rc < 0) { pr_err("failed to get ESR, rc=%d\n", rc); return rc; @@ -1631,7 +1610,7 @@ static int fg_rconn_config(struct fg_chip *chip) return 0; } - rc = fg_get_battery_esr(chip, &esr_uohms); + rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms); if (rc < 0) { pr_err("failed to get ESR, rc=%d\n", rc); return rc; @@ -2744,6 +2723,39 @@ out: return rc; } +static int fg_esr_validate(struct fg_chip *chip) +{ + int rc, esr_uohms; + u8 buf[2]; + + if (chip->dt.esr_clamp_mohms <= 0) + return 0; + + rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms); + if (rc < 0) { + pr_err("failed to get ESR, rc=%d\n", rc); + return rc; + } + + if (esr_uohms >= chip->dt.esr_clamp_mohms * 1000) { + pr_debug("ESR %d is > ESR_clamp\n", esr_uohms); + return 0; + } + + esr_uohms = chip->dt.esr_clamp_mohms * 1000; + fg_encode(chip->sp, FG_SRAM_ESR, esr_uohms, buf); + rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR].addr_word, + chip->sp[FG_SRAM_ESR].addr_byte, buf, + chip->sp[FG_SRAM_ESR].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR, rc=%d\n", rc); + return rc; + } + + fg_dbg(chip, FG_STATUS, "ESR clamped to %duOhms\n", esr_uohms); + return 0; +} + /* PSY CALLBACKS STAY HERE */ static int fg_psy_get_property(struct power_supply *psy, @@ -3371,6 +3383,10 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in updating maint_soc, rc=%d\n", rc); + rc = fg_esr_validate(chip); + if (rc < 0) + pr_err("Error in validating ESR, rc=%d\n", rc); + if (batt_psy_initialized(chip)) power_supply_changed(chip->batt_psy); @@ -3659,6 +3675,7 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip) #define DEFAULT_ESR_BROAD_FLT_UPCT 99610 #define DEFAULT_ESR_TIGHT_LT_FLT_UPCT 48829 #define DEFAULT_ESR_BROAD_LT_FLT_UPCT 148438 +#define DEFAULT_ESR_CLAMP_MOHMS 20 static int fg_parse_dt(struct fg_chip *chip) { struct device_node *child, *revid_node, *node = chip->dev->of_node; @@ -3972,6 +3989,12 @@ static int fg_parse_dt(struct fg_chip *chip) if (rc < 0) pr_err("Error in parsing slope limit coeffs, rc=%d\n", rc); + rc = of_property_read_u32(node, "qcom,fg-esr-clamp-mohms", &temp); + if (rc < 0) + chip->dt.esr_clamp_mohms = DEFAULT_ESR_CLAMP_MOHMS; + else + chip->dt.esr_clamp_mohms = temp; + return 0; } diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c deleted file mode 100644 index cfd2f64a9bb8d564e0b2c010ef47da7fc986f8e4..0000000000000000000000000000000000000000 --- a/drivers/power/supply/qcom/qpnp-fg.c +++ /dev/null @@ -1,7051 +0,0 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) "FG: %s: " fmt, __func__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Register offsets */ - -/* Interrupt offsets */ -#define INT_RT_STS(base) (base + 0x10) -#define INT_EN_CLR(base) (base + 0x16) - -/* SPMI Register offsets */ -#define SOC_MONOTONIC_SOC 0x09 -#define SOC_BOOT_MOD 0x50 -#define SOC_RESTART 0x51 - -#define REG_OFFSET_PERP_SUBTYPE 0x05 - -/* RAM register offsets */ -#define RAM_OFFSET 0x400 - -/* Bit/Mask definitions */ -#define FULL_PERCENT 0xFF -#define MAX_TRIES_SOC 5 -#define MA_MV_BIT_RES 39 -#define MSB_SIGN BIT(7) -#define IBAT_VBAT_MASK 0x7F -#define NO_OTP_PROF_RELOAD BIT(6) -#define REDO_FIRST_ESTIMATE BIT(3) -#define RESTART_GO BIT(0) -#define THERM_DELAY_MASK 0xE0 - -/* SUBTYPE definitions */ -#define FG_SOC 0x9 -#define FG_BATT 0xA -#define FG_ADC 0xB -#define FG_MEMIF 0xC - -#define QPNP_FG_DEV_NAME "qcom,qpnp-fg" -#define MEM_IF_TIMEOUT_MS 5000 -#define BUCKET_COUNT 8 -#define BUCKET_SOC_PCT (256 / BUCKET_COUNT) - -#define BCL_MA_TO_ADC(_current, _adc_val) { \ - _adc_val = (u8)((_current) * 100 / 976); \ -} - -/* Debug Flag Definitions */ -enum { - FG_SPMI_DEBUG_WRITES = BIT(0), /* Show SPMI writes */ - FG_SPMI_DEBUG_READS = BIT(1), /* Show SPMI reads */ - FG_IRQS = BIT(2), /* Show interrupts */ - FG_MEM_DEBUG_WRITES = BIT(3), /* Show SRAM writes */ - FG_MEM_DEBUG_READS = BIT(4), /* Show SRAM reads */ - FG_POWER_SUPPLY = BIT(5), /* Show POWER_SUPPLY */ - FG_STATUS = BIT(6), /* Show FG status changes */ - FG_AGING = BIT(7), /* Show FG aging algorithm */ -}; - -/* PMIC REVISIONS */ -#define REVID_RESERVED 0 -#define REVID_VARIANT 1 -#define REVID_ANA_MAJOR 2 -#define REVID_DIG_MAJOR 3 - -enum dig_major { - DIG_REV_1 = 0x1, - DIG_REV_2 = 0x2, - DIG_REV_3 = 0x3, -}; - -enum pmic_subtype { - PMI8994 = 10, - PMI8950 = 17, - PMI8996 = 19, - PMI8937 = 55, -}; - -enum wa_flags { - IADC_GAIN_COMP_WA = BIT(0), - USE_CC_SOC_REG = BIT(1), - PULSE_REQUEST_WA = BIT(2), - BCL_HI_POWER_FOR_CHGLED_WA = BIT(3) -}; - -enum current_sense_type { - INTERNAL_CURRENT_SENSE, - EXTERNAL_CURRENT_SENSE, -}; - -struct fg_mem_setting { - u16 address; - u8 offset; - int value; -}; - -struct fg_mem_data { - u16 address; - u8 offset; - unsigned int len; - int value; -}; - -struct fg_learning_data { - int64_t cc_uah; - int64_t learned_cc_uah; - int init_cc_pc_val; - bool active; - bool feedback_on; - struct mutex learning_lock; - ktime_t time_stamp; - /* configuration properties */ - int max_start_soc; - int max_increment; - int max_decrement; - int min_temp; - int max_temp; - int vbat_est_thr_uv; -}; - -struct fg_rslow_data { - u8 rslow_cfg; - u8 rslow_thr; - u8 rs_to_rslow[2]; - u8 rslow_comp[4]; - uint32_t chg_rs_to_rslow; - uint32_t chg_rslow_comp_c1; - uint32_t chg_rslow_comp_c2; - uint32_t chg_rslow_comp_thr; - bool active; - struct mutex lock; -}; - -struct fg_cyc_ctr_data { - bool en; - bool started[BUCKET_COUNT]; - u16 count[BUCKET_COUNT]; - u8 last_soc[BUCKET_COUNT]; - int id; - struct mutex lock; -}; - -struct fg_iadc_comp_data { - u8 dfl_gain_reg[2]; - bool gain_active; - int64_t dfl_gain; -}; - -struct fg_cc_soc_data { - int init_sys_soc; - int init_cc_soc; - int full_capacity; - int delta_soc; -}; - -/* FG_MEMIF setting index */ -enum fg_mem_setting_index { - FG_MEM_SOFT_COLD = 0, - FG_MEM_SOFT_HOT, - FG_MEM_HARD_COLD, - FG_MEM_HARD_HOT, - FG_MEM_RESUME_SOC, - FG_MEM_BCL_LM_THRESHOLD, - FG_MEM_BCL_MH_THRESHOLD, - FG_MEM_TERM_CURRENT, - FG_MEM_CHG_TERM_CURRENT, - FG_MEM_IRQ_VOLT_EMPTY, - FG_MEM_CUTOFF_VOLTAGE, - FG_MEM_VBAT_EST_DIFF, - FG_MEM_DELTA_SOC, - FG_MEM_BATT_LOW, - FG_MEM_THERM_DELAY, - FG_MEM_SETTING_MAX, -}; - -/* FG_MEMIF data index */ -enum fg_mem_data_index { - FG_DATA_BATT_TEMP = 0, - FG_DATA_OCV, - FG_DATA_VOLTAGE, - FG_DATA_CURRENT, - FG_DATA_BATT_ESR, - FG_DATA_BATT_ESR_COUNT, - FG_DATA_BATT_SOC, - FG_DATA_CC_CHARGE, - FG_DATA_VINT_ERR, - FG_DATA_CPRED_VOLTAGE, - /* values below this only gets read once per profile reload */ - FG_DATA_BATT_ID, - FG_DATA_BATT_ID_INFO, - FG_DATA_MAX, -}; - -#define SETTING(_idx, _address, _offset, _value) \ - [FG_MEM_##_idx] = { \ - .address = _address, \ - .offset = _offset, \ - .value = _value, \ - } \ - -static struct fg_mem_setting settings[FG_MEM_SETTING_MAX] = { - /* ID Address, Offset, Value*/ - SETTING(SOFT_COLD, 0x454, 0, 100), - SETTING(SOFT_HOT, 0x454, 1, 400), - SETTING(HARD_COLD, 0x454, 2, 50), - SETTING(HARD_HOT, 0x454, 3, 450), - SETTING(RESUME_SOC, 0x45C, 1, 0), - SETTING(BCL_LM_THRESHOLD, 0x47C, 2, 50), - SETTING(BCL_MH_THRESHOLD, 0x47C, 3, 752), - SETTING(TERM_CURRENT, 0x40C, 2, 250), - SETTING(CHG_TERM_CURRENT, 0x4F8, 2, 250), - SETTING(IRQ_VOLT_EMPTY, 0x458, 3, 3100), - SETTING(CUTOFF_VOLTAGE, 0x40C, 0, 3200), - SETTING(VBAT_EST_DIFF, 0x000, 0, 30), - SETTING(DELTA_SOC, 0x450, 3, 1), - SETTING(BATT_LOW, 0x458, 0, 4200), - SETTING(THERM_DELAY, 0x4AC, 3, 0), -}; - -#define DATA(_idx, _address, _offset, _length, _value) \ - [FG_DATA_##_idx] = { \ - .address = _address, \ - .offset = _offset, \ - .len = _length, \ - .value = _value, \ - } \ - -static struct fg_mem_data fg_data[FG_DATA_MAX] = { - /* ID Address, Offset, Length, Value*/ - DATA(BATT_TEMP, 0x550, 2, 2, -EINVAL), - DATA(OCV, 0x588, 3, 2, -EINVAL), - DATA(VOLTAGE, 0x5CC, 1, 2, -EINVAL), - DATA(CURRENT, 0x5CC, 3, 2, -EINVAL), - DATA(BATT_ESR, 0x554, 2, 2, -EINVAL), - DATA(BATT_ESR_COUNT, 0x558, 2, 2, -EINVAL), - DATA(BATT_SOC, 0x56C, 1, 3, -EINVAL), - DATA(CC_CHARGE, 0x570, 0, 4, -EINVAL), - DATA(VINT_ERR, 0x560, 0, 4, -EINVAL), - DATA(CPRED_VOLTAGE, 0x540, 0, 2, -EINVAL), - DATA(BATT_ID, 0x594, 1, 1, -EINVAL), - DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL), -}; - -static int fg_debug_mask; -module_param_named( - debug_mask, fg_debug_mask, int, S_IRUSR | S_IWUSR -); - -static int fg_sense_type = -EINVAL; -static int fg_restart; - -static int fg_est_dump; -module_param_named( - first_est_dump, fg_est_dump, int, S_IRUSR | S_IWUSR -); - -static char *fg_batt_type; -module_param_named( - battery_type, fg_batt_type, charp, S_IRUSR | S_IWUSR -); - -static int fg_sram_update_period_ms = 30000; -module_param_named( - sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR -); - -struct fg_irq { - int irq; - unsigned long disabled; -}; - -enum fg_soc_irq { - HIGH_SOC, - LOW_SOC, - FULL_SOC, - EMPTY_SOC, - DELTA_SOC, - FIRST_EST_DONE, - SW_FALLBK_OCV, - SW_FALLBK_NEW_BATT, - FG_SOC_IRQ_COUNT, -}; - -enum fg_batt_irq { - JEITA_SOFT_COLD, - JEITA_SOFT_HOT, - VBATT_LOW, - BATT_IDENTIFIED, - BATT_ID_REQ, - BATTERY_UNKNOWN, - BATT_MISSING, - BATT_MATCH, - FG_BATT_IRQ_COUNT, -}; - -enum fg_mem_if_irq { - FG_MEM_AVAIL, - TA_RCVRY_SUG, - FG_MEM_IF_IRQ_COUNT, -}; - -enum fg_batt_aging_mode { - FG_AGING_NONE, - FG_AGING_ESR, - FG_AGING_CC, -}; - -enum register_type { - MEM_INTF_CFG, - MEM_INTF_CTL, - MEM_INTF_ADDR_LSB, - MEM_INTF_RD_DATA0, - MEM_INTF_WR_DATA0, - MAX_ADDRESS, -}; - -struct register_offset { - u16 address[MAX_ADDRESS]; -}; - -static struct register_offset offset[] = { - [0] = { - /* CFG CTL LSB RD0 WD0 */ - .address = {0x40, 0x41, 0x42, 0x4C, 0x48}, - }, - [1] = { - /* CFG CTL LSB RD0 WD0 */ - .address = {0x50, 0x51, 0x61, 0x67, 0x63}, - }, -}; - -#define MEM_INTF_CFG(chip) \ - ((chip)->mem_base + (chip)->offset[MEM_INTF_CFG]) -#define MEM_INTF_CTL(chip) \ - ((chip)->mem_base + (chip)->offset[MEM_INTF_CTL]) -#define MEM_INTF_ADDR_LSB(chip) \ - ((chip)->mem_base + (chip)->offset[MEM_INTF_ADDR_LSB]) -#define MEM_INTF_RD_DATA0(chip) \ - ((chip)->mem_base + (chip)->offset[MEM_INTF_RD_DATA0]) -#define MEM_INTF_WR_DATA0(chip) \ - ((chip)->mem_base + (chip)->offset[MEM_INTF_WR_DATA0]) - -struct fg_wakeup_source { - struct wakeup_source source; - unsigned long enabled; -}; - -static void fg_stay_awake(struct fg_wakeup_source *source) -{ - if (!__test_and_set_bit(0, &source->enabled)) { - __pm_stay_awake(&source->source); - pr_debug("enabled source %s\n", source->source.name); - } -} - -static void fg_relax(struct fg_wakeup_source *source) -{ - if (__test_and_clear_bit(0, &source->enabled)) { - __pm_relax(&source->source); - pr_debug("disabled source %s\n", source->source.name); - } -} - -#define THERMAL_COEFF_N_BYTES 6 -struct fg_chip { - struct device *dev; - struct platform_device *pdev; - struct regmap *regmap; - u8 pmic_subtype; - u8 pmic_revision[4]; - u8 revision[4]; - u16 soc_base; - u16 batt_base; - u16 mem_base; - u16 vbat_adc_addr; - u16 ibat_adc_addr; - u16 tp_rev_addr; - u32 wa_flag; - atomic_t memif_user_cnt; - struct fg_irq soc_irq[FG_SOC_IRQ_COUNT]; - struct fg_irq batt_irq[FG_BATT_IRQ_COUNT]; - struct fg_irq mem_irq[FG_MEM_IF_IRQ_COUNT]; - struct completion sram_access_granted; - struct completion sram_access_revoked; - struct completion batt_id_avail; - struct completion first_soc_done; - struct power_supply *bms_psy; - struct power_supply_desc bms_psy_d; - struct mutex rw_lock; - struct mutex sysfs_restart_lock; - struct delayed_work batt_profile_init; - struct work_struct dump_sram; - struct work_struct status_change_work; - struct work_struct cycle_count_work; - struct work_struct battery_age_work; - struct work_struct update_esr_work; - struct work_struct set_resume_soc_work; - struct work_struct rslow_comp_work; - struct work_struct sysfs_restart_work; - struct work_struct init_work; - struct work_struct charge_full_work; - struct work_struct gain_comp_work; - struct work_struct bcl_hi_power_work; - struct power_supply *batt_psy; - struct power_supply *usb_psy; - struct power_supply *dc_psy; - struct fg_wakeup_source memif_wakeup_source; - struct fg_wakeup_source profile_wakeup_source; - struct fg_wakeup_source empty_check_wakeup_source; - struct fg_wakeup_source resume_soc_wakeup_source; - struct fg_wakeup_source gain_comp_wakeup_source; - struct fg_wakeup_source capacity_learning_wakeup_source; - bool first_profile_loaded; - struct fg_wakeup_source update_temp_wakeup_source; - struct fg_wakeup_source update_sram_wakeup_source; - bool fg_restarting; - bool profile_loaded; - bool use_otp_profile; - bool battery_missing; - bool power_supply_registered; - bool sw_rbias_ctrl; - bool use_thermal_coefficients; - bool esr_strict_filter; - bool soc_empty; - bool charge_done; - bool resume_soc_lowered; - bool vbat_low_irq_enabled; - bool charge_full; - bool hold_soc_while_full; - bool input_present; - bool otg_present; - bool safety_timer_expired; - bool bad_batt_detection_en; - bool bcl_lpm_disabled; - bool charging_disabled; - struct delayed_work update_jeita_setting; - struct delayed_work update_sram_data; - struct delayed_work update_temp_work; - struct delayed_work check_empty_work; - char *batt_profile; - u8 thermal_coefficients[THERMAL_COEFF_N_BYTES]; - u32 cc_cv_threshold_mv; - unsigned int batt_profile_len; - unsigned int batt_max_voltage_uv; - const char *batt_type; - const char *batt_psy_name; - unsigned long last_sram_update_time; - unsigned long last_temp_update_time; - int64_t ocv_coeffs[12]; - int64_t cutoff_voltage; - int evaluation_current; - int ocv_junction_p1p2; - int ocv_junction_p2p3; - int nom_cap_uah; - int actual_cap_uah; - int status; - int prev_status; - int health; - enum fg_batt_aging_mode batt_aging_mode; - /* capacity learning */ - struct fg_learning_data learning_data; - struct alarm fg_cap_learning_alarm; - struct work_struct fg_cap_learning_work; - struct fg_cc_soc_data sw_cc_soc_data; - /* rslow compensation */ - struct fg_rslow_data rslow_comp; - /* cycle counter */ - struct fg_cyc_ctr_data cyc_ctr; - /* iadc compensation */ - struct fg_iadc_comp_data iadc_comp_data; - /* interleaved memory access */ - u16 *offset; - bool ima_supported; - bool init_done; - /* jeita hysteresis */ - bool jeita_hysteresis_support; - bool batt_hot; - bool batt_cold; - int cold_hysteresis; - int hot_hysteresis; - /* ESR pulse tuning */ - struct fg_wakeup_source esr_extract_wakeup_source; - struct work_struct esr_extract_config_work; - bool esr_extract_disabled; - bool imptr_pulse_slow_en; - bool esr_pulse_tune_en; -}; - -/* FG_MEMIF DEBUGFS structures */ -#define ADDR_LEN 4 /* 3 byte address + 1 space character */ -#define CHARS_PER_ITEM 3 /* Format is 'XX ' */ -#define ITEMS_PER_LINE 4 /* 4 data items per line */ -#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1) -#define MAX_REG_PER_TRANSACTION (8) - -static const char *DFS_ROOT_NAME = "fg_memif"; -static const mode_t DFS_MODE = S_IRUSR | S_IWUSR; -static const char *default_batt_type = "Unknown Battery"; -static const char *loading_batt_type = "Loading Battery Data"; -static const char *missing_batt_type = "Disconnected Battery"; - -/* Log buffer */ -struct fg_log_buffer { - size_t rpos; /* Current 'read' position in buffer */ - size_t wpos; /* Current 'write' position in buffer */ - size_t len; /* Length of the buffer */ - char data[0]; /* Log buffer */ -}; - -/* transaction parameters */ -struct fg_trans { - u32 cnt; /* Number of bytes to read */ - u16 addr; /* 12-bit address in SRAM */ - u32 offset; /* Offset of last read data + byte offset */ - struct fg_chip *chip; - struct fg_log_buffer *log; /* log buffer */ - u8 *data; /* fg data that is read */ - struct mutex memif_dfs_lock; /* Prevent thread concurrency */ -}; - -struct fg_dbgfs { - u32 cnt; - u32 addr; - struct fg_chip *chip; - struct dentry *root; - struct mutex lock; - struct debugfs_blob_wrapper help_msg; -}; - -static struct fg_dbgfs dbgfs_data = { - .lock = __MUTEX_INITIALIZER(dbgfs_data.lock), - .help_msg = { - .data = -"FG Debug-FS support\n" -"\n" -"Hierarchy schema:\n" -"/sys/kernel/debug/fg_memif\n" -" /help -- Static help text\n" -" /address -- Starting register address for reads or writes\n" -" /count -- Number of registers to read (only used for reads)\n" -" /data -- Initiates the SRAM read (formatted output)\n" -"\n", - }, -}; - -static const struct of_device_id fg_match_table[] = { - { .compatible = QPNP_FG_DEV_NAME, }, - {} -}; - -static char *fg_supplicants[] = { - "battery", - "bcl", - "fg_adc" -}; - -#define DEBUG_PRINT_BUFFER_SIZE 64 -static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len) -{ - int pos = 0; - int i; - - for (i = 0; i < buf_len; i++) { - pos += scnprintf(str + pos, str_len - pos, "%02X", buf[i]); - if (i < buf_len - 1) - pos += scnprintf(str + pos, str_len - pos, " "); - } -} - -static int fg_write(struct fg_chip *chip, u8 *val, u16 addr, int len) -{ - int rc = 0; - struct platform_device *pdev = chip->pdev; - char str[DEBUG_PRINT_BUFFER_SIZE]; - - if ((addr & 0xff00) == 0) { - pr_err("addr cannot be zero base=0x%02x sid=0x%02x rc=%d\n", - addr, to_spmi_device(pdev->dev.parent)->usid, rc); - return -EINVAL; - } - - rc = regmap_bulk_write(chip->regmap, addr, val, len); - if (rc) { - pr_err("write failed addr=0x%02x sid=0x%02x rc=%d\n", - addr, to_spmi_device(pdev->dev.parent)->usid, rc); - return rc; - } - - if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_WRITES)) { - str[0] = '\0'; - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len); - pr_info("write(0x%04X), sid=%d, len=%d; %s\n", - addr, to_spmi_device(pdev->dev.parent)->usid, len, - str); - } - - return rc; -} - -static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len) -{ - int rc = 0; - struct platform_device *pdev = chip->pdev; - char str[DEBUG_PRINT_BUFFER_SIZE]; - - if ((addr & 0xff00) == 0) { - pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n", - addr, to_spmi_device(pdev->dev.parent)->usid, rc); - return -EINVAL; - } - - rc = regmap_bulk_read(chip->regmap, addr, val, len); - if (rc) { - pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", addr, - to_spmi_device(pdev->dev.parent)->usid, rc); - return rc; - } - - if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_READS)) { - str[0] = '\0'; - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len); - pr_info("read(0x%04x), sid=%d, len=%d; %s\n", - addr, to_spmi_device(pdev->dev.parent)->usid, len, - str); - } - - return rc; -} - -static int fg_masked_write(struct fg_chip *chip, u16 addr, - u8 mask, u8 val, int len) -{ - int rc; - - rc = regmap_update_bits(chip->regmap, addr, mask, val); - if (rc) { - pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc); - return rc; - } - - return rc; -} - -#define RIF_MEM_ACCESS_REQ BIT(7) -static int fg_check_rif_mem_access(struct fg_chip *chip, bool *status) -{ - int rc; - u8 mem_if_sts; - - rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1); - if (rc) { - pr_err("failed to read rif_mem status rc=%d\n", rc); - return rc; - } - - *status = mem_if_sts & RIF_MEM_ACCESS_REQ; - return 0; -} - -static bool fg_check_sram_access(struct fg_chip *chip) -{ - int rc; - u8 mem_if_sts; - bool rif_mem_sts = false; - - rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1); - if (rc) { - pr_err("failed to read mem status rc=%d\n", rc); - return false; - } - - if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) - return false; - - rc = fg_check_rif_mem_access(chip, &rif_mem_sts); - if (rc) - return false; - - return rif_mem_sts; -} - -static inline int fg_assert_sram_access(struct fg_chip *chip) -{ - int rc; - u8 mem_if_sts; - - rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1); - if (rc) { - pr_err("failed to read mem status rc=%d\n", rc); - return rc; - } - - if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) { - pr_err("mem_avail not high: %02x\n", mem_if_sts); - return -EINVAL; - } - - rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1); - if (rc) { - pr_err("failed to read mem status rc=%d\n", rc); - return rc; - } - - if ((mem_if_sts & RIF_MEM_ACCESS_REQ) == 0) { - pr_err("mem_avail not high: %02x\n", mem_if_sts); - return -EINVAL; - } - - return 0; -} - -#define INTF_CTL_BURST BIT(7) -#define INTF_CTL_WR_EN BIT(6) -static int fg_config_access(struct fg_chip *chip, bool write, - bool burst) -{ - int rc; - u8 intf_ctl = 0; - - intf_ctl = (write ? INTF_CTL_WR_EN : 0) | (burst ? INTF_CTL_BURST : 0); - - rc = fg_write(chip, &intf_ctl, MEM_INTF_CTL(chip), 1); - if (rc) { - pr_err("failed to set mem access bit\n"); - return -EIO; - } - - return rc; -} - -static int fg_req_and_wait_access(struct fg_chip *chip, int timeout) -{ - int rc = 0, ret = 0; - bool tried_again = false; - - if (!fg_check_sram_access(chip)) { - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), - RIF_MEM_ACCESS_REQ, RIF_MEM_ACCESS_REQ, 1); - if (rc) { - pr_err("failed to set mem access bit\n"); - return -EIO; - } - fg_stay_awake(&chip->memif_wakeup_source); - } - -wait: - /* Wait for MEM_AVAIL IRQ. */ - ret = wait_for_completion_interruptible_timeout( - &chip->sram_access_granted, - msecs_to_jiffies(timeout)); - /* If we were interrupted wait again one more time. */ - if (ret == -ERESTARTSYS && !tried_again) { - tried_again = true; - goto wait; - } else if (ret <= 0) { - rc = -ETIMEDOUT; - pr_err("transaction timed out rc=%d\n", rc); - return rc; - } - - return rc; -} - -static int fg_release_access(struct fg_chip *chip) -{ - int rc; - - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), - RIF_MEM_ACCESS_REQ, 0, 1); - fg_relax(&chip->memif_wakeup_source); - reinit_completion(&chip->sram_access_granted); - - return rc; -} - -static void fg_release_access_if_necessary(struct fg_chip *chip) -{ - mutex_lock(&chip->rw_lock); - if (atomic_sub_return(1, &chip->memif_user_cnt) <= 0) { - fg_release_access(chip); - } - mutex_unlock(&chip->rw_lock); -} - -/* - * fg_mem_lock disallows the fuel gauge to release access until it has been - * released. - * - * an equal number of calls must be made to fg_mem_release for the fuel gauge - * driver to release the sram access. - */ -static void fg_mem_lock(struct fg_chip *chip) -{ - mutex_lock(&chip->rw_lock); - atomic_add_return(1, &chip->memif_user_cnt); - mutex_unlock(&chip->rw_lock); -} - -static void fg_mem_release(struct fg_chip *chip) -{ - fg_release_access_if_necessary(chip); -} - -static int fg_set_ram_addr(struct fg_chip *chip, u16 *address) -{ - int rc; - - rc = fg_write(chip, (u8 *) address, - chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], 2); - if (rc) { - pr_err("spmi write failed: addr=%03X, rc=%d\n", - chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], rc); - return rc; - } - - return rc; -} - -#define BUF_LEN 4 -static int fg_sub_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len, - int offset) -{ - int rc, total_len; - u8 *rd_data = val; - char str[DEBUG_PRINT_BUFFER_SIZE]; - - rc = fg_config_access(chip, 0, (len > 4)); - if (rc) - return rc; - - rc = fg_set_ram_addr(chip, &address); - if (rc) - return rc; - - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("length %d addr=%02X\n", len, address); - - total_len = len; - while (len > 0) { - if (!offset) { - rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip), - min(len, BUF_LEN)); - } else { - rc = fg_read(chip, rd_data, - MEM_INTF_RD_DATA0(chip) + offset, - min(len, BUF_LEN - offset)); - - /* manually set address to allow continous reads */ - address += BUF_LEN; - - rc = fg_set_ram_addr(chip, &address); - if (rc) - return rc; - } - if (rc) { - pr_err("spmi read failed: addr=%03x, rc=%d\n", - MEM_INTF_RD_DATA0(chip) + offset, rc); - return rc; - } - rd_data += (BUF_LEN - offset); - len -= (BUF_LEN - offset); - offset = 0; - } - - if (fg_debug_mask & FG_MEM_DEBUG_READS) { - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len); - pr_info("data: %s\n", str); - } - return rc; -} - -static int fg_conventional_mem_read(struct fg_chip *chip, u8 *val, u16 address, - int len, int offset, bool keep_access) -{ - int rc = 0, user_cnt = 0, orig_address = address; - - if (offset > 3) { - pr_err("offset too large %d\n", offset); - return -EINVAL; - } - - address = ((orig_address + offset) / 4) * 4; - offset = (orig_address + offset) % 4; - - user_cnt = atomic_add_return(1, &chip->memif_user_cnt); - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("user_cnt %d\n", user_cnt); - mutex_lock(&chip->rw_lock); - if (!fg_check_sram_access(chip)) { - rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS); - if (rc) - goto out; - } - - rc = fg_sub_mem_read(chip, val, address, len, offset); - -out: - user_cnt = atomic_sub_return(1, &chip->memif_user_cnt); - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("user_cnt %d\n", user_cnt); - - fg_assert_sram_access(chip); - - if (!keep_access && (user_cnt == 0) && !rc) { - rc = fg_release_access(chip); - if (rc) { - pr_err("failed to set mem access bit\n"); - rc = -EIO; - } - } - - mutex_unlock(&chip->rw_lock); - return rc; -} - -static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address, - int len, int offset, bool keep_access) -{ - int rc = 0, user_cnt = 0, sublen; - bool access_configured = false; - u8 *wr_data = val, word[4]; - char str[DEBUG_PRINT_BUFFER_SIZE]; - - if (address < RAM_OFFSET) - return -EINVAL; - - if (offset > 3) - return -EINVAL; - - address = ((address + offset) / 4) * 4; - offset = (address + offset) % 4; - - user_cnt = atomic_add_return(1, &chip->memif_user_cnt); - if (fg_debug_mask & FG_MEM_DEBUG_WRITES) - pr_info("user_cnt %d\n", user_cnt); - mutex_lock(&chip->rw_lock); - if (!fg_check_sram_access(chip)) { - rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS); - if (rc) - goto out; - } - - if (fg_debug_mask & FG_MEM_DEBUG_WRITES) { - pr_info("length %d addr=%02X offset=%d\n", - len, address, offset); - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, wr_data, len); - pr_info("writing: %s\n", str); - } - - while (len > 0) { - if (offset != 0) { - sublen = min(4 - offset, len); - rc = fg_sub_mem_read(chip, word, address, 4, 0); - if (rc) - goto out; - memcpy(word + offset, wr_data, sublen); - /* configure access as burst if more to write */ - rc = fg_config_access(chip, 1, (len - sublen) > 0); - if (rc) - goto out; - rc = fg_set_ram_addr(chip, &address); - if (rc) - goto out; - offset = 0; - access_configured = true; - } else if (len >= 4) { - if (!access_configured) { - rc = fg_config_access(chip, 1, len > 4); - if (rc) - goto out; - rc = fg_set_ram_addr(chip, &address); - if (rc) - goto out; - access_configured = true; - } - sublen = 4; - memcpy(word, wr_data, 4); - } else if (len > 0 && len < 4) { - sublen = len; - rc = fg_sub_mem_read(chip, word, address, 4, 0); - if (rc) - goto out; - memcpy(word, wr_data, sublen); - rc = fg_config_access(chip, 1, 0); - if (rc) - goto out; - rc = fg_set_ram_addr(chip, &address); - if (rc) - goto out; - access_configured = true; - } else { - pr_err("Invalid length: %d\n", len); - break; - } - rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip), 4); - if (rc) { - pr_err("spmi write failed: addr=%03x, rc=%d\n", - MEM_INTF_WR_DATA0(chip), rc); - goto out; - } - len -= sublen; - wr_data += sublen; - address += 4; - } - -out: - user_cnt = atomic_sub_return(1, &chip->memif_user_cnt); - if (fg_debug_mask & FG_MEM_DEBUG_WRITES) - pr_info("user_cnt %d\n", user_cnt); - - fg_assert_sram_access(chip); - - if (!keep_access && (user_cnt == 0) && !rc) { - rc = fg_release_access(chip); - if (rc) { - pr_err("failed to set mem access bit\n"); - rc = -EIO; - } - } - - mutex_unlock(&chip->rw_lock); - return rc; -} - -#define MEM_INTF_IMA_CFG 0x52 -#define MEM_INTF_IMA_OPR_STS 0x54 -#define MEM_INTF_IMA_ERR_STS 0x5F -#define MEM_INTF_IMA_EXP_STS 0x55 -#define MEM_INTF_IMA_HW_STS 0x56 -#define MEM_INTF_IMA_BYTE_EN 0x60 -#define IMA_ADDR_STBL_ERR BIT(7) -#define IMA_WR_ACS_ERR BIT(6) -#define IMA_RD_ACS_ERR BIT(5) -#define IMA_IACS_CLR BIT(2) -#define IMA_IACS_RDY BIT(1) -static int fg_check_ima_exception(struct fg_chip *chip) -{ - int rc = 0, ret = 0; - u8 err_sts, exp_sts = 0, hw_sts = 0; - - rc = fg_read(chip, &err_sts, - chip->mem_base + MEM_INTF_IMA_ERR_STS, 1); - if (rc) { - pr_err("failed to read beat count rc=%d\n", rc); - return rc; - } - - if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) { - u8 temp; - - fg_read(chip, &exp_sts, - chip->mem_base + MEM_INTF_IMA_EXP_STS, 1); - fg_read(chip, &hw_sts, - chip->mem_base + MEM_INTF_IMA_HW_STS, 1); - pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n", - err_sts, exp_sts, hw_sts); - rc = err_sts; - - /* clear the error */ - ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, - IMA_IACS_CLR, IMA_IACS_CLR, 1); - temp = 0x4; - ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1); - temp = 0x0; - ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1); - ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1); - ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, - IMA_IACS_CLR, 0, 1); - if (!ret) - return -EAGAIN; - else - pr_err("Error clearing IMA exception ret=%d\n", ret); - } - - return rc; -} - -static int fg_check_iacs_ready(struct fg_chip *chip) -{ - int rc = 0, timeout = 250; - u8 ima_opr_sts = 0; - - /* - * Additional delay to make sure IACS ready bit is set after - * Read/Write operation. - */ - - usleep_range(30, 35); - while (1) { - rc = fg_read(chip, &ima_opr_sts, - chip->mem_base + MEM_INTF_IMA_OPR_STS, 1); - if (!rc && (ima_opr_sts & IMA_IACS_RDY)) { - break; - } else { - if (!(--timeout) || rc) - break; - /* delay for iacs_ready to be asserted */ - usleep_range(5000, 7000); - } - } - - if (!timeout || rc) { - pr_err("IACS_RDY not set\n"); - /* perform IACS_CLR sequence */ - fg_check_ima_exception(chip); - return -EBUSY; - } - - return 0; -} - -#define IACS_SLCT BIT(5) -static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, - u16 address, int offset, int len) -{ - int rc = 0, i; - u8 *word = val, byte_enable = 0, num_bytes = 0; - - if (fg_debug_mask & FG_MEM_DEBUG_WRITES) - pr_info("length %d addr=%02X offset=%d\n", - len, address, offset); - - while (len > 0) { - num_bytes = (offset + len) > BUF_LEN ? - (BUF_LEN - offset) : len; - /* write to byte_enable */ - for (i = offset; i < (offset + num_bytes); i++) - byte_enable |= BIT(i); - - rc = fg_write(chip, &byte_enable, - chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1); - if (rc) { - pr_err("Unable to write to byte_en_reg rc=%d\n", - rc); - return rc; - } - /* write data */ - rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip) + offset, - num_bytes); - if (rc) { - pr_err("spmi write failed: addr=%03x, rc=%d\n", - MEM_INTF_WR_DATA0(chip) + offset, rc); - return rc; - } - /* - * The last-byte WR_DATA3 starts the write transaction. - * Write a dummy value to WR_DATA3 if it does not have - * valid data. This dummy data is not written to the - * SRAM as byte_en for WR_DATA3 is not set. - */ - if (!(byte_enable & BIT(3))) { - u8 dummy_byte = 0x0; - rc = fg_write(chip, &dummy_byte, - MEM_INTF_WR_DATA0(chip) + 3, 1); - if (rc) { - pr_err("Unable to write dummy-data to WR_DATA3 rc=%d\n", - rc); - return rc; - } - } - - rc = fg_check_iacs_ready(chip); - if (rc) { - pr_debug("IACS_RDY failed rc=%d\n", rc); - return rc; - } - - /* check for error condition */ - rc = fg_check_ima_exception(chip); - if (rc) { - pr_err("IMA transaction failed rc=%d", rc); - return rc; - } - - word += num_bytes; - len -= num_bytes; - offset = byte_enable = 0; - } - - return rc; -} - -static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address, - int offset, int len) -{ - int rc = 0, total_len; - u8 *rd_data = val, num_bytes; - char str[DEBUG_PRINT_BUFFER_SIZE]; - - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("length %d addr=%02X\n", len, address); - - total_len = len; - while (len > 0) { - num_bytes = (offset + len) > BUF_LEN ? (BUF_LEN - offset) : len; - rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip) + offset, - num_bytes); - if (rc) { - pr_err("spmi read failed: addr=%03x, rc=%d\n", - MEM_INTF_RD_DATA0(chip) + offset, rc); - return rc; - } - - rd_data += num_bytes; - len -= num_bytes; - offset = 0; - - rc = fg_check_iacs_ready(chip); - if (rc) { - pr_debug("IACS_RDY failed rc=%d\n", rc); - return rc; - } - - /* check for error condition */ - rc = fg_check_ima_exception(chip); - if (rc) { - pr_err("IMA transaction failed rc=%d", rc); - return rc; - } - - if (len && (len + offset) < BUF_LEN) { - /* move to single mode */ - u8 intr_ctl = 0; - - rc = fg_write(chip, &intr_ctl, MEM_INTF_CTL(chip), 1); - if (rc) { - pr_err("failed to move to single mode rc=%d\n", - rc); - return -EIO; - } - } - } - - if (fg_debug_mask & FG_MEM_DEBUG_READS) { - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len); - pr_info("data: %s\n", str); - } - - return rc; -} - -#define IMA_REQ_ACCESS (IACS_SLCT | RIF_MEM_ACCESS_REQ) -static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val, - u16 address, int len, int offset, int op) -{ - int rc = 0; - bool rif_mem_sts = true; - int time_count = 0; - - while (1) { - rc = fg_check_rif_mem_access(chip, &rif_mem_sts); - if (rc) - return rc; - - if (!rif_mem_sts) - break; - - if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES)) - pr_info("RIF_MEM_ACCESS_REQ is not clear yet for IMA_%s\n", - op ? "write" : "read"); - - /* - * Try this no more than 4 times. If RIF_MEM_ACCESS_REQ is not - * clear, then return an error instead of waiting for it again. - */ - if (time_count > 4) { - pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n"); - return -ETIMEDOUT; - } - - /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */ - usleep_range(4000, 4100); - time_count++; - } - - /* configure for IMA access */ - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), - IMA_REQ_ACCESS, IMA_REQ_ACCESS, 1); - if (rc) { - pr_err("failed to set mem access bit rc = %d\n", rc); - return rc; - } - - /* configure for the read/write single/burst mode */ - rc = fg_config_access(chip, op, (offset + len) > 4); - if (rc) { - pr_err("failed to set configure memory access rc = %d\n", rc); - return rc; - } - - rc = fg_check_iacs_ready(chip); - if (rc) { - pr_debug("IACS_RDY failed rc=%d\n", rc); - return rc; - } - - /* write addresses to the register */ - rc = fg_set_ram_addr(chip, &address); - if (rc) { - pr_err("failed to set SRAM address rc = %d\n", rc); - return rc; - } - - rc = fg_check_iacs_ready(chip); - if (rc) - pr_debug("IACS_RDY failed rc=%d\n", rc); - - return rc; -} - -#define MEM_INTF_FG_BEAT_COUNT 0x57 -#define BEAT_COUNT_MASK 0x0F -#define RETRY_COUNT 3 -static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address, - int len, int offset) -{ - int rc = 0, orig_address = address; - u8 start_beat_count, end_beat_count, count = 0; - bool retry = false; - - if (offset > 3) { - pr_err("offset too large %d\n", offset); - return -EINVAL; - } - - fg_stay_awake(&chip->memif_wakeup_source); - address = ((orig_address + offset) / 4) * 4; - offset = (orig_address + offset) % 4; - - if (address < RAM_OFFSET) { - /* - * OTP memory reads need a conventional memory access, do a - * conventional read when SRAM offset < RAM_OFFSET. - */ - rc = fg_conventional_mem_read(chip, val, address, len, offset, - 0); - if (rc) - pr_err("Failed to read OTP memory %d\n", rc); - goto exit; - } - - mutex_lock(&chip->rw_lock); - -retry: - rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0); - if (rc) { - pr_err("failed to configure SRAM for IMA rc = %d\n", rc); - goto out; - } - - /* read the start beat count */ - rc = fg_read(chip, &start_beat_count, - chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1); - if (rc) { - pr_err("failed to read beat count rc=%d\n", rc); - goto out; - } - - /* read data */ - rc = __fg_interleaved_mem_read(chip, val, address, offset, len); - if (rc) { - if ((rc == -EAGAIN) && (count < RETRY_COUNT)) { - count++; - pr_err("IMA access failed retry_count = %d\n", count); - goto retry; - } else { - pr_err("failed to read SRAM address rc = %d\n", rc); - goto out; - } - } - - /* read the end beat count */ - rc = fg_read(chip, &end_beat_count, - chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1); - if (rc) { - pr_err("failed to read beat count rc=%d\n", rc); - goto out; - } - - start_beat_count &= BEAT_COUNT_MASK; - end_beat_count &= BEAT_COUNT_MASK; - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("Start beat_count = %x End beat_count = %x\n", - start_beat_count, end_beat_count); - if (start_beat_count != end_beat_count) { - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("Beat count do not match - retry transaction\n"); - retry = true; - } -out: - /* Release IMA access */ - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1); - if (rc) - pr_err("failed to reset IMA access bit rc = %d\n", rc); - - if (retry) { - retry = false; - goto retry; - } - mutex_unlock(&chip->rw_lock); - -exit: - fg_relax(&chip->memif_wakeup_source); - return rc; -} - -static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address, - int len, int offset) -{ - int rc = 0, orig_address = address; - u8 count = 0; - - if (address < RAM_OFFSET) - return -EINVAL; - - if (offset > 3) { - pr_err("offset too large %d\n", offset); - return -EINVAL; - } - - fg_stay_awake(&chip->memif_wakeup_source); - address = ((orig_address + offset) / 4) * 4; - offset = (orig_address + offset) % 4; - - mutex_lock(&chip->rw_lock); - -retry: - rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1); - if (rc) { - pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc); - goto out; - } - - /* write data */ - rc = __fg_interleaved_mem_write(chip, val, address, offset, len); - if (rc) { - if ((rc == -EAGAIN) && (count < RETRY_COUNT)) { - count++; - pr_err("IMA access failed retry_count = %d\n", count); - goto retry; - } else { - pr_err("failed to write SRAM address rc = %d\n", rc); - goto out; - } - } - -out: - /* Release IMA access */ - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1); - if (rc) - pr_err("failed to reset IMA access bit rc = %d\n", rc); - - mutex_unlock(&chip->rw_lock); - fg_relax(&chip->memif_wakeup_source); - return rc; -} - -static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address, - int len, int offset, bool keep_access) -{ - if (chip->ima_supported) - return fg_interleaved_mem_read(chip, val, address, - len, offset); - else - return fg_conventional_mem_read(chip, val, address, - len, offset, keep_access); -} - -static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address, - int len, int offset, bool keep_access) -{ - if (chip->ima_supported) - return fg_interleaved_mem_write(chip, val, address, - len, offset); - else - return fg_conventional_mem_write(chip, val, address, - len, offset, keep_access); -} - -static int fg_mem_masked_write(struct fg_chip *chip, u16 addr, - u8 mask, u8 val, u8 offset) -{ - int rc = 0; - u8 reg[4]; - char str[DEBUG_PRINT_BUFFER_SIZE]; - - rc = fg_mem_read(chip, reg, addr, 4, 0, 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc); - return rc; - } - - reg[offset] &= ~mask; - reg[offset] |= val & mask; - - str[0] = '\0'; - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, reg, 4); - pr_debug("Writing %s address %03x, offset %d\n", str, addr, offset); - - rc = fg_mem_write(chip, reg, addr, 4, 0, 0); - if (rc) { - pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc); - return rc; - } - - return rc; -} - -static int soc_to_setpoint(int soc) -{ - return DIV_ROUND_CLOSEST(soc * 255, 100); -} - -static void batt_to_setpoint_adc(int vbatt_mv, u8 *data) -{ - int val; - /* Battery voltage is an offset from 0 V and LSB is 1/2^15. */ - val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000); - data[0] = val & 0xFF; - data[1] = val >> 8; - return; -} - -static u8 batt_to_setpoint_8b(int vbatt_mv) -{ - int val; - /* Battery voltage is an offset from 2.5 V and LSB is 5/2^9. */ - val = (vbatt_mv - 2500) * 512 / 1000; - return DIV_ROUND_CLOSEST(val, 5); -} - -static u8 therm_delay_to_setpoint(u32 delay_us) -{ - u8 val; - - if (delay_us < 2560) - val = 0; - else if (delay_us > 163840) - val = 7; - else - val = ilog2(delay_us / 10) - 7; - return val << 5; -} - -static int get_current_time(unsigned long *now_tm_sec) -{ - struct rtc_time tm; - struct rtc_device *rtc; - int rc; - - rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); - if (rtc == NULL) { - pr_err("%s: unable to open rtc device (%s)\n", - __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); - return -EINVAL; - } - - rc = rtc_read_time(rtc, &tm); - if (rc) { - pr_err("Error reading rtc device (%s) : %d\n", - CONFIG_RTC_HCTOSYS_DEVICE, rc); - goto close_time; - } - - rc = rtc_valid_tm(&tm); - if (rc) { - pr_err("Invalid RTC time (%s): %d\n", - CONFIG_RTC_HCTOSYS_DEVICE, rc); - goto close_time; - } - rtc_tm_to_time(&tm, now_tm_sec); - -close_time: - rtc_class_close(rtc); - return rc; -} - -#define BATTERY_SOC_REG 0x56C -#define BATTERY_SOC_OFFSET 1 -#define FULL_PERCENT_3B 0xFFFFFF -static int get_battery_soc_raw(struct fg_chip *chip) -{ - int rc; - u8 buffer[3]; - - rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0); - if (rc) { - pr_err("Unable to read battery soc: %d\n", rc); - return 0; - } - return (int)(buffer[2] << 16 | buffer[1] << 8 | buffer[0]); -} - -#define COUNTER_IMPTR_REG 0X558 -#define COUNTER_PULSE_REG 0X55C -#define SOC_FULL_REG 0x564 -#define COUNTER_IMPTR_OFFSET 2 -#define COUNTER_PULSE_OFFSET 0 -#define SOC_FULL_OFFSET 3 -#define ESR_PULSE_RECONFIG_SOC 0xFFF971 -static int fg_configure_soc(struct fg_chip *chip) -{ - u32 batt_soc; - u8 cntr[2] = {0, 0}; - int rc = 0; - - mutex_lock(&chip->rw_lock); - atomic_add_return(1, &chip->memif_user_cnt); - mutex_unlock(&chip->rw_lock); - - /* Read Battery SOC */ - batt_soc = get_battery_soc_raw(chip); - - if (batt_soc > ESR_PULSE_RECONFIG_SOC) { - if (fg_debug_mask & FG_POWER_SUPPLY) - pr_info("Configuring soc registers batt_soc: %x\n", - batt_soc); - batt_soc = ESR_PULSE_RECONFIG_SOC; - rc = fg_mem_write(chip, (u8 *)&batt_soc, BATTERY_SOC_REG, 3, - BATTERY_SOC_OFFSET, 1); - if (rc) { - pr_err("failed to write BATT_SOC rc=%d\n", rc); - goto out; - } - - rc = fg_mem_write(chip, (u8 *)&batt_soc, SOC_FULL_REG, 3, - SOC_FULL_OFFSET, 1); - if (rc) { - pr_err("failed to write SOC_FULL rc=%d\n", rc); - goto out; - } - - rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 2, - COUNTER_IMPTR_OFFSET, 1); - if (rc) { - pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc); - goto out; - } - - rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2, - COUNTER_PULSE_OFFSET, 0); - if (rc) - pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc); - } -out: - fg_release_access_if_necessary(chip); - return rc; -} - -#define SOC_EMPTY BIT(3) -static bool fg_is_batt_empty(struct fg_chip *chip) -{ - u8 fg_soc_sts; - int rc; - - rc = fg_read(chip, &fg_soc_sts, - INT_RT_STS(chip->soc_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->soc_base), rc); - return false; - } - - return (fg_soc_sts & SOC_EMPTY) != 0; -} - -static int get_monotonic_soc_raw(struct fg_chip *chip) -{ - u8 cap[2]; - int rc, tries = 0; - - while (tries < MAX_TRIES_SOC) { - rc = fg_read(chip, cap, - chip->soc_base + SOC_MONOTONIC_SOC, 2); - if (rc) { - pr_err("spmi read failed: addr=%03x, rc=%d\n", - chip->soc_base + SOC_MONOTONIC_SOC, rc); - return rc; - } - - if (cap[0] == cap[1]) - break; - - tries++; - } - - if (tries == MAX_TRIES_SOC) { - pr_err("shadow registers do not match\n"); - return -EINVAL; - } - - if (fg_debug_mask & FG_POWER_SUPPLY) - pr_info_ratelimited("raw: 0x%02x\n", cap[0]); - return cap[0]; -} - -#define EMPTY_CAPACITY 0 -#define DEFAULT_CAPACITY 50 -#define MISSING_CAPACITY 100 -#define FULL_CAPACITY 100 -#define FULL_SOC_RAW 0xFF -static int get_prop_capacity(struct fg_chip *chip) -{ - int msoc; - - if (chip->battery_missing) - return MISSING_CAPACITY; - if (!chip->profile_loaded && !chip->use_otp_profile) - return DEFAULT_CAPACITY; - if (chip->charge_full) - return FULL_CAPACITY; - if (chip->soc_empty) { - if (fg_debug_mask & FG_POWER_SUPPLY) - pr_info_ratelimited("capacity: %d, EMPTY\n", - EMPTY_CAPACITY); - return EMPTY_CAPACITY; - } - msoc = get_monotonic_soc_raw(chip); - if (msoc == 0) - return EMPTY_CAPACITY; - else if (msoc == FULL_SOC_RAW) - return FULL_CAPACITY; - return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2), - FULL_SOC_RAW - 2) + 1; -} - -#define HIGH_BIAS 3 -#define MED_BIAS BIT(1) -#define LOW_BIAS BIT(0) -static u8 bias_ua[] = { - [HIGH_BIAS] = 150, - [MED_BIAS] = 15, - [LOW_BIAS] = 5, -}; - -static int64_t get_batt_id(unsigned int battery_id_uv, u8 bid_info) -{ - u64 battery_id_ohm; - - if ((bid_info & 0x3) == 0) { - pr_err("can't determine battery id 0x%02x\n", bid_info); - return -EINVAL; - } - - battery_id_ohm = div_u64(battery_id_uv, bias_ua[bid_info & 0x3]); - - return battery_id_ohm; -} - -#define DEFAULT_TEMP_DEGC 250 -static int get_sram_prop_now(struct fg_chip *chip, unsigned int type) -{ - if (fg_debug_mask & FG_POWER_SUPPLY) - pr_info("addr 0x%02X, offset %d value %d\n", - fg_data[type].address, fg_data[type].offset, - fg_data[type].value); - - if (type == FG_DATA_BATT_ID) - return get_batt_id(fg_data[type].value, - fg_data[FG_DATA_BATT_ID_INFO].value); - - return fg_data[type].value; -} - -#define MIN_TEMP_DEGC -300 -#define MAX_TEMP_DEGC 970 -static int get_prop_jeita_temp(struct fg_chip *chip, unsigned int type) -{ - if (fg_debug_mask & FG_POWER_SUPPLY) - pr_info("addr 0x%02X, offset %d\n", settings[type].address, - settings[type].offset); - - return settings[type].value; -} - -static int set_prop_jeita_temp(struct fg_chip *chip, - unsigned int type, int decidegc) -{ - int rc = 0; - - if (fg_debug_mask & FG_POWER_SUPPLY) - pr_info("addr 0x%02X, offset %d temp%d\n", - settings[type].address, - settings[type].offset, decidegc); - - settings[type].value = decidegc; - - cancel_delayed_work_sync( - &chip->update_jeita_setting); - schedule_delayed_work( - &chip->update_jeita_setting, 0); - - return rc; -} - -#define EXTERNAL_SENSE_SELECT 0x4AC -#define EXTERNAL_SENSE_OFFSET 0x2 -#define EXTERNAL_SENSE_BIT BIT(2) -static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type) -{ - int rc; - - rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, - EXTERNAL_SENSE_BIT, - ext_sense_type ? EXTERNAL_SENSE_BIT : 0, - EXTERNAL_SENSE_OFFSET); - if (rc) { - pr_err("failed to write profile rc=%d\n", rc); - return rc; - } - - return 0; -} - -#define EXPONENT_MASK 0xF800 -#define MANTISSA_MASK 0x3FF -#define SIGN BIT(10) -#define EXPONENT_SHIFT 11 -#define MICRO_UNIT 1000000ULL -static int64_t float_decode(u16 reg) -{ - int64_t final_val, exponent_val, mantissa_val; - int exponent, mantissa, n; - bool sign; - - exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT; - mantissa = (reg & MANTISSA_MASK); - sign = !!(reg & SIGN); - - pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign); - - mantissa_val = mantissa * MICRO_UNIT; - - n = exponent - 15; - if (n < 0) - exponent_val = MICRO_UNIT >> -n; - else - exponent_val = MICRO_UNIT << n; - - n = n - 10; - if (n < 0) - mantissa_val >>= -n; - else - mantissa_val <<= n; - - final_val = exponent_val + mantissa_val; - - if (sign) - final_val *= -1; - - return final_val; -} - -#define MIN_HALFFLOAT_EXP_N -15 -#define MAX_HALFFLOAT_EXP_N 16 -static int log2_floor(int64_t uval) -{ - int n = 0; - int64_t i = MICRO_UNIT; - - if (uval > i) { - while (uval > i && n > MIN_HALFFLOAT_EXP_N) { - i <<= 1; - n += 1; - } - if (uval < i) - n -= 1; - } else if (uval < i) { - while (uval < i && n < MAX_HALFFLOAT_EXP_N) { - i >>= 1; - n -= 1; - } - } - - return n; -} - -static int64_t exp2_int(int64_t n) -{ - int p = n - 1; - - if (p > 0) - return (2 * MICRO_UNIT) << p; - else - return (2 * MICRO_UNIT) >> abs(p); -} - -static u16 float_encode(int64_t uval) -{ - int sign = 0, n, exp, mantissa; - u16 half = 0; - - if (uval < 0) { - sign = 1; - uval = abs(uval); - } - n = log2_floor(uval); - exp = n + 15; - mantissa = div_s64(div_s64((uval - exp2_int(n)) * exp2_int(10 - n), - MICRO_UNIT) + MICRO_UNIT / 2, MICRO_UNIT); - - half = (mantissa & MANTISSA_MASK) | ((sign << 10) & SIGN) - | ((exp << 11) & EXPONENT_MASK); - - if (fg_debug_mask & FG_STATUS) - pr_info("uval = %lld, m = 0x%02x, sign = 0x%02x, exp = 0x%02x, half = 0x%04x\n", - uval, mantissa, sign, exp, half); - return half; -} - -#define BATT_IDED BIT(3) -static int fg_is_batt_id_valid(struct fg_chip *chip) -{ - u8 fg_batt_sts; - int rc; - - rc = fg_read(chip, &fg_batt_sts, - INT_RT_STS(chip->batt_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->batt_base), rc); - return rc; - } - - if (fg_debug_mask & FG_IRQS) - pr_info("fg batt sts 0x%x\n", fg_batt_sts); - - return (fg_batt_sts & BATT_IDED) ? 1 : 0; -} - -static int64_t twos_compliment_extend(int64_t val, int nbytes) -{ - int i; - int64_t mask; - - mask = 0x80LL << ((nbytes - 1) * 8); - if (val & mask) { - for (i = 8; i > nbytes; i--) { - mask = 0xFFLL << ((i - 1) * 8); - val |= mask; - } - } - - return val; -} - -#define LSB_24B_NUMRTR 596046 -#define LSB_24B_DENMTR 1000000 -#define LSB_16B_NUMRTR 152587 -#define LSB_16B_DENMTR 1000 -#define LSB_8B 9800 -#define TEMP_LSB_16B 625 -#define DECIKELVIN 2730 -#define SRAM_PERIOD_NO_ID_UPDATE_MS 100 -#define FULL_PERCENT_28BIT 0xFFFFFFF -static void update_sram_data(struct fg_chip *chip, int *resched_ms) -{ - int i, j, rc = 0; - u8 reg[4]; - int64_t temp; - int battid_valid = fg_is_batt_id_valid(chip); - - fg_stay_awake(&chip->update_sram_wakeup_source); - if (chip->fg_restarting) - goto resched; - - fg_mem_lock(chip); - for (i = 1; i < FG_DATA_MAX; i++) { - if (chip->profile_loaded && i >= FG_DATA_BATT_ID) - continue; - rc = fg_mem_read(chip, reg, fg_data[i].address, - fg_data[i].len, fg_data[i].offset, 0); - if (rc) { - pr_err("Failed to update sram data\n"); - break; - } - - temp = 0; - for (j = 0; j < fg_data[i].len; j++) - temp |= reg[j] << (8 * j); - - switch (i) { - case FG_DATA_OCV: - case FG_DATA_VOLTAGE: - case FG_DATA_CPRED_VOLTAGE: - fg_data[i].value = div_u64( - (u64)(u16)temp * LSB_16B_NUMRTR, - LSB_16B_DENMTR); - break; - case FG_DATA_CURRENT: - temp = twos_compliment_extend(temp, fg_data[i].len); - fg_data[i].value = div_s64( - (s64)temp * LSB_16B_NUMRTR, - LSB_16B_DENMTR); - break; - case FG_DATA_BATT_ESR: - fg_data[i].value = float_decode((u16) temp); - break; - case FG_DATA_BATT_ESR_COUNT: - fg_data[i].value = (u16)temp; - break; - case FG_DATA_BATT_ID: - if (battid_valid) - fg_data[i].value = reg[0] * LSB_8B; - break; - case FG_DATA_BATT_ID_INFO: - if (battid_valid) - fg_data[i].value = reg[0]; - break; - case FG_DATA_BATT_SOC: - fg_data[i].value = div64_s64((temp * 10000), - FULL_PERCENT_3B); - break; - case FG_DATA_CC_CHARGE: - temp = twos_compliment_extend(temp, fg_data[i].len); - fg_data[i].value = div64_s64( - temp * (int64_t)chip->nom_cap_uah, - FULL_PERCENT_28BIT); - break; - case FG_DATA_VINT_ERR: - temp = twos_compliment_extend(temp, fg_data[i].len); - fg_data[i].value = div64_s64(temp * chip->nom_cap_uah, - FULL_PERCENT_3B); - break; - }; - - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("%d %lld %d\n", i, temp, fg_data[i].value); - } - fg_mem_release(chip); - - if (!rc) - get_current_time(&chip->last_sram_update_time); - -resched: - if (battid_valid) { - complete_all(&chip->batt_id_avail); - *resched_ms = fg_sram_update_period_ms; - } else { - *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS; - } - fg_relax(&chip->update_sram_wakeup_source); -} - -#define SRAM_TIMEOUT_MS 3000 -static void update_sram_data_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - update_sram_data.work); - int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret; - bool tried_again = false; - -wait: - /* Wait for MEMIF access revoked */ - ret = wait_for_completion_interruptible_timeout( - &chip->sram_access_revoked, - msecs_to_jiffies(SRAM_TIMEOUT_MS)); - - /* If we were interrupted wait again one more time. */ - if (ret == -ERESTARTSYS && !tried_again) { - tried_again = true; - goto wait; - } else if (ret <= 0) { - pr_err("transaction timed out ret=%d\n", ret); - goto out; - } - update_sram_data(chip, &resched_ms); - -out: - schedule_delayed_work( - &chip->update_sram_data, - msecs_to_jiffies(resched_ms)); -} - -#define BATT_TEMP_OFFSET 3 -#define BATT_TEMP_CNTRL_MASK 0x17 -#define DISABLE_THERM_BIT BIT(0) -#define TEMP_SENSE_ALWAYS_BIT BIT(1) -#define TEMP_SENSE_CHARGE_BIT BIT(2) -#define FORCE_RBIAS_ON_BIT BIT(4) -#define BATT_TEMP_OFF DISABLE_THERM_BIT -#define BATT_TEMP_ON (FORCE_RBIAS_ON_BIT | TEMP_SENSE_ALWAYS_BIT | \ - TEMP_SENSE_CHARGE_BIT) -#define TEMP_PERIOD_UPDATE_MS 10000 -#define TEMP_PERIOD_TIMEOUT_MS 3000 -static void update_temp_data(struct work_struct *work) -{ - s16 temp; - u8 reg[2]; - bool tried_again = false; - int rc, ret, timeout = TEMP_PERIOD_TIMEOUT_MS; - struct fg_chip *chip = container_of(work, - struct fg_chip, - update_temp_work.work); - - if (chip->fg_restarting) - goto resched; - - fg_stay_awake(&chip->update_temp_wakeup_source); - if (chip->sw_rbias_ctrl) { - rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, - BATT_TEMP_CNTRL_MASK, - BATT_TEMP_ON, - BATT_TEMP_OFFSET); - if (rc) { - pr_err("failed to write BATT_TEMP_ON rc=%d\n", rc); - goto out; - } - -wait: - /* Wait for MEMIF access revoked */ - ret = wait_for_completion_interruptible_timeout( - &chip->sram_access_revoked, - msecs_to_jiffies(timeout)); - - /* If we were interrupted wait again one more time. */ - if (ret == -ERESTARTSYS && !tried_again) { - tried_again = true; - goto wait; - } else if (ret <= 0) { - rc = -ETIMEDOUT; - pr_err("transaction timed out ret=%d\n", ret); - goto out; - } - } - - /* Read FG_DATA_BATT_TEMP now */ - rc = fg_mem_read(chip, reg, fg_data[0].address, - fg_data[0].len, fg_data[0].offset, - chip->sw_rbias_ctrl ? 1 : 0); - if (rc) { - pr_err("Failed to update temp data\n"); - goto out; - } - - temp = reg[0] | (reg[1] << 8); - fg_data[0].value = (temp * TEMP_LSB_16B / 1000) - - DECIKELVIN; - - if (fg_debug_mask & FG_MEM_DEBUG_READS) - pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value); - - get_current_time(&chip->last_temp_update_time); - -out: - if (chip->sw_rbias_ctrl) { - rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, - BATT_TEMP_CNTRL_MASK, - BATT_TEMP_OFF, - BATT_TEMP_OFFSET); - if (rc) - pr_err("failed to write BATT_TEMP_OFF rc=%d\n", rc); - } - fg_relax(&chip->update_temp_wakeup_source); - -resched: - schedule_delayed_work( - &chip->update_temp_work, - msecs_to_jiffies(TEMP_PERIOD_UPDATE_MS)); -} - -static void update_jeita_setting(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - update_jeita_setting.work); - u8 reg[4]; - int i, rc; - - for (i = 0; i < 4; i++) - reg[i] = (settings[FG_MEM_SOFT_COLD + i].value / 10) + 30; - - rc = fg_mem_write(chip, reg, settings[FG_MEM_SOFT_COLD].address, - 4, settings[FG_MEM_SOFT_COLD].offset, 0); - if (rc) - pr_err("failed to update JEITA setting rc=%d\n", rc); -} - -static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold) -{ - u16 address; - int offset, rc; - - address = settings[FG_MEM_RESUME_SOC].address; - offset = settings[FG_MEM_RESUME_SOC].offset; - - rc = fg_mem_masked_write(chip, address, 0xFF, threshold, offset); - - if (rc) - pr_err("write failed rc=%d\n", rc); - else - pr_debug("setting resume-soc to %x\n", threshold); - - return rc; -} - -#define VBATT_LOW_STS_BIT BIT(2) -static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts) -{ - int rc = 0; - u8 fg_batt_sts; - - rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1); - if (!rc) - *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT); - return rc; -} - -#define BATT_CYCLE_NUMBER_REG 0x5E8 -#define BATT_CYCLE_OFFSET 0 -static void restore_cycle_counter(struct fg_chip *chip) -{ - int rc = 0, i, address; - u8 data[2]; - - fg_mem_lock(chip); - for (i = 0; i < BUCKET_COUNT; i++) { - address = BATT_CYCLE_NUMBER_REG + i * 2; - rc = fg_mem_read(chip, (u8 *)&data, address, 2, - BATT_CYCLE_OFFSET, 0); - if (rc) - pr_err("Failed to read BATT_CYCLE_NUMBER[%d] rc: %d\n", - i, rc); - else - chip->cyc_ctr.count[i] = data[0] | data[1] << 8; - } - fg_mem_release(chip); -} - -static void clear_cycle_counter(struct fg_chip *chip) -{ - int rc = 0, len, i; - - if (!chip->cyc_ctr.en) - return; - - len = sizeof(chip->cyc_ctr.count); - memset(chip->cyc_ctr.count, 0, len); - for (i = 0; i < BUCKET_COUNT; i++) { - chip->cyc_ctr.started[i] = false; - chip->cyc_ctr.last_soc[i] = 0; - } - rc = fg_mem_write(chip, (u8 *)&chip->cyc_ctr.count, - BATT_CYCLE_NUMBER_REG, len, - BATT_CYCLE_OFFSET, 0); - if (rc) - pr_err("failed to write BATT_CYCLE_NUMBER rc=%d\n", rc); -} - -static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket) -{ - int rc = 0, address; - u16 cyc_count; - u8 data[2]; - - if (bucket < 0 || (bucket > BUCKET_COUNT - 1)) - return 0; - - cyc_count = chip->cyc_ctr.count[bucket]; - cyc_count++; - data[0] = cyc_count & 0xFF; - data[1] = cyc_count >> 8; - - address = BATT_CYCLE_NUMBER_REG + bucket * 2; - - rc = fg_mem_write(chip, data, address, 2, BATT_CYCLE_OFFSET, 0); - if (rc) - pr_err("failed to write BATT_CYCLE_NUMBER[%d] rc=%d\n", - bucket, rc); - else - chip->cyc_ctr.count[bucket] = cyc_count; - return rc; -} - -static void update_cycle_count(struct work_struct *work) -{ - int rc = 0, bucket, i; - u8 reg[3], batt_soc; - struct fg_chip *chip = container_of(work, - struct fg_chip, - cycle_count_work); - - mutex_lock(&chip->cyc_ctr.lock); - rc = fg_mem_read(chip, reg, BATTERY_SOC_REG, 3, - BATTERY_SOC_OFFSET, 0); - if (rc) { - pr_err("Failed to read battery soc rc: %d\n", rc); - goto out; - } - batt_soc = reg[2]; - - if (chip->status == POWER_SUPPLY_STATUS_CHARGING) { - /* Find out which bucket the SOC falls in */ - bucket = batt_soc / BUCKET_SOC_PCT; - - if (fg_debug_mask & FG_STATUS) - pr_info("batt_soc: %x bucket: %d\n", reg[2], bucket); - - /* - * If we've started counting for the previous bucket, - * then store the counter for that bucket if the - * counter for current bucket is getting started. - */ - if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] && - !chip->cyc_ctr.started[bucket]) { - rc = fg_inc_store_cycle_ctr(chip, bucket - 1); - if (rc) { - pr_err("Error in storing cycle_ctr rc: %d\n", - rc); - goto out; - } else { - chip->cyc_ctr.started[bucket - 1] = false; - chip->cyc_ctr.last_soc[bucket - 1] = 0; - } - } - if (!chip->cyc_ctr.started[bucket]) { - chip->cyc_ctr.started[bucket] = true; - chip->cyc_ctr.last_soc[bucket] = batt_soc; - } - } else { - for (i = 0; i < BUCKET_COUNT; i++) { - if (chip->cyc_ctr.started[i] && - batt_soc > chip->cyc_ctr.last_soc[i]) { - rc = fg_inc_store_cycle_ctr(chip, i); - if (rc) - pr_err("Error in storing cycle_ctr rc: %d\n", - rc); - chip->cyc_ctr.last_soc[i] = 0; - } - chip->cyc_ctr.started[i] = false; - } - } -out: - mutex_unlock(&chip->cyc_ctr.lock); -} - -static int fg_get_cycle_count(struct fg_chip *chip) -{ - int count; - - if (!chip->cyc_ctr.en) - return 0; - - if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT)) - return -EINVAL; - - mutex_lock(&chip->cyc_ctr.lock); - count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1]; - mutex_unlock(&chip->cyc_ctr.lock); - return count; -} - -static void half_float_to_buffer(int64_t uval, u8 *buffer) -{ - u16 raw; - - raw = float_encode(uval); - buffer[0] = (u8)(raw & 0xFF); - buffer[1] = (u8)((raw >> 8) & 0xFF); -} - -static int64_t half_float(u8 *buffer) -{ - u16 val; - - val = buffer[1] << 8 | buffer[0]; - return float_decode(val); -} - -static int voltage_2b(u8 *buffer) -{ - u16 val; - - val = buffer[1] << 8 | buffer[0]; - /* the range of voltage 2b is [-5V, 5V], so it will fit in an int */ - return (int)div_u64(((u64)val) * LSB_16B_NUMRTR, LSB_16B_DENMTR); -} - -static int bcap_uah_2b(u8 *buffer) -{ - u16 val; - - val = buffer[1] << 8 | buffer[0]; - return ((int)val) * 1000; -} - -static int lookup_ocv_for_soc(struct fg_chip *chip, int soc) -{ - int64_t *coeffs; - - if (soc > chip->ocv_junction_p1p2 * 10) - coeffs = chip->ocv_coeffs; - else if (soc > chip->ocv_junction_p2p3 * 10) - coeffs = chip->ocv_coeffs + 4; - else - coeffs = chip->ocv_coeffs + 8; - /* the range of ocv will fit in a 32 bit int */ - return (int)(coeffs[0] - + div_s64(coeffs[1] * soc, 1000LL) - + div_s64(coeffs[2] * soc * soc, 1000000LL) - + div_s64(coeffs[3] * soc * soc * soc, 1000000000LL)); -} - -static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv) -{ - int64_t val; - int soc = -EINVAL; - /* - * binary search variables representing the valid start and end - * percentages to search - */ - int start = 0, end = 1000, mid; - - if (fg_debug_mask & FG_AGING) - pr_info("target_ocv = %d\n", ocv); - /* do a binary search for the closest soc to match the ocv */ - while (end - start > 1) { - mid = (start + end) / 2; - val = lookup_ocv_for_soc(chip, mid); - if (fg_debug_mask & FG_AGING) - pr_info("start = %d, mid = %d, end = %d, ocv = %lld\n", - start, mid, end, val); - if (ocv < val) { - end = mid; - } else if (ocv > val) { - start = mid; - } else { - soc = mid; - break; - } - } - /* - * if the exact soc was not found and there are two or less values - * remaining, just compare them and see which one is closest to the ocv - */ - if (soc == -EINVAL) { - if (abs(ocv - lookup_ocv_for_soc(chip, start)) - > abs(ocv - lookup_ocv_for_soc(chip, end))) - soc = end; - else - soc = start; - } - if (fg_debug_mask & FG_AGING) - pr_info("closest = %d, target_ocv = %d, ocv_found = %d\n", - soc, ocv, lookup_ocv_for_soc(chip, soc)); - return soc; -} - -#define ESR_ACTUAL_REG 0x554 -#define BATTERY_ESR_REG 0x4F4 -#define TEMP_RS_TO_RSLOW_REG 0x514 -static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity) -{ - int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow; - int64_t esr_actual, battery_esr, val; - int soc_cutoff_aged, soc_cutoff_new, rc; - int battery_soc, unusable_soc, batt_temp; - u8 buffer[3]; - - if (chip->batt_aging_mode != FG_AGING_ESR) - return 0; - - if (chip->nom_cap_uah == 0) { - if (fg_debug_mask & FG_AGING) - pr_info("ocv coefficients not loaded, aborting\n"); - return 0; - } - fg_mem_lock(chip); - - batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); - if (batt_temp < 150 || batt_temp > 400) { - if (fg_debug_mask & FG_AGING) - pr_info("Battery temp (%d) out of range, aborting\n", - (int)batt_temp); - rc = 0; - goto done; - } - - battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B; - if (battery_soc < 25 || battery_soc > 75) { - if (fg_debug_mask & FG_AGING) - pr_info("Battery SoC (%d) out of range, aborting\n", - (int)battery_soc); - rc = 0; - goto done; - } - - rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0); - esr_actual = half_float(buffer); - rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0); - battery_esr = half_float(buffer); - - if (rc) { - goto error_done; - } else if (esr_actual < battery_esr) { - if (fg_debug_mask & FG_AGING) - pr_info("Batt ESR lower than ESR actual, aborting\n"); - rc = 0; - goto done; - } - rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, 0, 0); - temp_rs_to_rslow = half_float(buffer); - - if (rc) - goto error_done; - - fg_mem_release(chip); - - if (fg_debug_mask & FG_AGING) { - pr_info("batt_soc = %d, cutoff_voltage = %lld, eval current = %d\n", - battery_soc, chip->cutoff_voltage, - chip->evaluation_current); - pr_info("temp_rs_to_rslow = %lld, batt_esr = %lld, esr_actual = %lld\n", - temp_rs_to_rslow, battery_esr, esr_actual); - } - - /* calculate soc_cutoff_new */ - val = (1000000LL + temp_rs_to_rslow) * battery_esr; - do_div(val, 1000000); - ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000) - + chip->cutoff_voltage; - - /* calculate soc_cutoff_aged */ - val = (1000000LL + temp_rs_to_rslow) * esr_actual; - do_div(val, 1000000); - ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000) - + chip->cutoff_voltage; - - if (fg_debug_mask & FG_AGING) - pr_info("ocv_cutoff_new = %lld, ocv_cutoff_aged = %lld\n", - ocv_cutoff_new, ocv_cutoff_aged); - - soc_cutoff_new = lookup_soc_for_ocv(chip, ocv_cutoff_new); - soc_cutoff_aged = lookup_soc_for_ocv(chip, ocv_cutoff_aged); - - if (fg_debug_mask & FG_AGING) - pr_info("aged soc = %d, new soc = %d\n", - soc_cutoff_aged, soc_cutoff_new); - unusable_soc = soc_cutoff_aged - soc_cutoff_new; - - *actual_capacity = div64_s64(((int64_t)chip->nom_cap_uah) - * (1000 - unusable_soc), 1000); - if (fg_debug_mask & FG_AGING) - pr_info("nom cap = %d, actual cap = %d\n", - chip->nom_cap_uah, *actual_capacity); - - return rc; - -error_done: - pr_err("some register reads failed: %d\n", rc); -done: - fg_mem_release(chip); - return rc; -} - -static void battery_age_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - battery_age_work); - - estimate_battery_age(chip, &chip->actual_cap_uah); -} - -static enum power_supply_property fg_power_props[] = { - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_RAW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_VOLTAGE_OCV, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_NOW_RAW, - POWER_SUPPLY_PROP_CHARGE_NOW_ERROR, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_COOL_TEMP, - POWER_SUPPLY_PROP_WARM_TEMP, - POWER_SUPPLY_PROP_RESISTANCE, - POWER_SUPPLY_PROP_RESISTANCE_ID, - POWER_SUPPLY_PROP_BATTERY_TYPE, - POWER_SUPPLY_PROP_UPDATE_NOW, - POWER_SUPPLY_PROP_ESR_COUNT, - POWER_SUPPLY_PROP_VOLTAGE_MIN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_CYCLE_COUNT_ID, - POWER_SUPPLY_PROP_HI_POWER, -}; - -static int fg_power_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct fg_chip *chip = power_supply_get_drvdata(psy); - bool vbatt_low_sts; - - switch (psp) { - case POWER_SUPPLY_PROP_BATTERY_TYPE: - if (chip->battery_missing) - val->strval = missing_batt_type; - else if (chip->fg_restarting) - val->strval = loading_batt_type; - else - val->strval = chip->batt_type; - break; - case POWER_SUPPLY_PROP_CAPACITY: - val->intval = get_prop_capacity(chip); - break; - case POWER_SUPPLY_PROP_CAPACITY_RAW: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC); - break; - case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR: - val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR); - break; - case POWER_SUPPLY_PROP_CURRENT_NOW: - val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT); - break; - case POWER_SUPPLY_PROP_VOLTAGE_NOW: - val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE); - break; - case POWER_SUPPLY_PROP_VOLTAGE_OCV: - val->intval = get_sram_prop_now(chip, FG_DATA_OCV); - break; - case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: - val->intval = chip->batt_max_voltage_uv; - break; - case POWER_SUPPLY_PROP_TEMP: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); - break; - case POWER_SUPPLY_PROP_COOL_TEMP: - val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD); - break; - case POWER_SUPPLY_PROP_WARM_TEMP: - val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT); - break; - case POWER_SUPPLY_PROP_RESISTANCE: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR); - break; - case POWER_SUPPLY_PROP_ESR_COUNT: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT); - break; - case POWER_SUPPLY_PROP_CYCLE_COUNT: - val->intval = fg_get_cycle_count(chip); - break; - case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: - val->intval = chip->cyc_ctr.id; - break; - case POWER_SUPPLY_PROP_RESISTANCE_ID: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID); - break; - case POWER_SUPPLY_PROP_UPDATE_NOW: - val->intval = 0; - break; - case POWER_SUPPLY_PROP_VOLTAGE_MIN: - if (!fg_get_vbatt_status(chip, &vbatt_low_sts)) - val->intval = (int)vbatt_low_sts; - else - val->intval = 1; - break; - case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: - val->intval = chip->nom_cap_uah; - break; - case POWER_SUPPLY_PROP_CHARGE_FULL: - val->intval = chip->learning_data.learned_cc_uah; - break; - case POWER_SUPPLY_PROP_CHARGE_NOW: - val->intval = chip->learning_data.cc_uah; - break; - case POWER_SUPPLY_PROP_CHARGE_NOW_RAW: - val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE); - break; - case POWER_SUPPLY_PROP_HI_POWER: - val->intval = !!chip->bcl_lpm_disabled; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int correction_times[] = { - 1470, - 2940, - 4410, - 5880, - 7350, - 8820, - 10290, - 11760, - 13230, - 14700, - 16170, - 17640, - 19110, - 20580, - 22050, - 23520, - 24990, - 26460, - 27930, - 29400, - 30870, - 32340, - 33810, - 35280, - 36750, - 38220, - 39690, - 41160, - 42630, - 44100, - 45570, - 47040, -}; - -static int correction_factors[] = { - 1000000, - 1007874, - 1015789, - 1023745, - 1031742, - 1039780, - 1047859, - 1055979, - 1064140, - 1072342, - 1080584, - 1088868, - 1097193, - 1105558, - 1113964, - 1122411, - 1130899, - 1139427, - 1147996, - 1156606, - 1165256, - 1173947, - 1182678, - 1191450, - 1200263, - 1209115, - 1218008, - 1226942, - 1235915, - 1244929, - 1253983, - 1263076, -}; - -#define FG_CONVERSION_FACTOR (64198531LL) -static int iavg_3b_to_uah(u8 *buffer, int delta_ms) -{ - int64_t val, i_filtered; - int i, correction_factor; - - for (i = 0; i < ARRAY_SIZE(correction_times); i++) { - if (correction_times[i] > delta_ms) - break; - } - if (i >= ARRAY_SIZE(correction_times)) { - if (fg_debug_mask & FG_STATUS) - pr_info("fuel gauge took more than 32 cycles\n"); - i = ARRAY_SIZE(correction_times) - 1; - } - correction_factor = correction_factors[i]; - if (fg_debug_mask & FG_STATUS) - pr_info("delta_ms = %d, cycles = %d, correction = %d\n", - delta_ms, i, correction_factor); - val = buffer[2] << 16 | buffer[1] << 8 | buffer[0]; - /* convert val from signed 24b to signed 64b */ - i_filtered = (val << 40) >> 40; - val = i_filtered * correction_factor; - val = div64_s64(val + FG_CONVERSION_FACTOR / 2, FG_CONVERSION_FACTOR); - if (fg_debug_mask & FG_STATUS) - pr_info("i_filtered = 0x%llx/%lld, cc_uah = %lld\n", - i_filtered, i_filtered, val); - - return val; -} - -static bool fg_is_temperature_ok_for_learning(struct fg_chip *chip) -{ - int batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); - - if (batt_temp > chip->learning_data.max_temp - || batt_temp < chip->learning_data.min_temp) { - if (fg_debug_mask & FG_AGING) - pr_info("temp (%d) out of range [%d, %d], aborting\n", - batt_temp, - chip->learning_data.min_temp, - chip->learning_data.max_temp); - return false; - } - return true; -} - -static void fg_cap_learning_stop(struct fg_chip *chip) -{ - chip->learning_data.cc_uah = 0; - chip->learning_data.active = false; -} - -#define I_FILTERED_REG 0x584 -static void fg_cap_learning_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - fg_cap_learning_work); - u8 i_filtered[3], data[3]; - int rc, cc_uah, delta_ms; - ktime_t now_kt, delta_kt; - - mutex_lock(&chip->learning_data.learning_lock); - if (!chip->learning_data.active) - goto fail; - if (!fg_is_temperature_ok_for_learning(chip)) { - fg_cap_learning_stop(chip); - goto fail; - } - - if (chip->wa_flag & USE_CC_SOC_REG) { - mutex_unlock(&chip->learning_data.learning_lock); - fg_relax(&chip->capacity_learning_wakeup_source); - return; - } - - fg_mem_lock(chip); - - rc = fg_mem_read(chip, i_filtered, I_FILTERED_REG, 3, 0, 0); - if (rc) { - pr_err("Failed to read i_filtered: %d\n", rc); - fg_mem_release(chip); - goto fail; - } - memset(data, 0, 3); - rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0); - if (rc) { - pr_err("Failed to clear i_filtered: %d\n", rc); - fg_mem_release(chip); - goto fail; - } - fg_mem_release(chip); - - now_kt = ktime_get_boottime(); - delta_kt = ktime_sub(now_kt, chip->learning_data.time_stamp); - chip->learning_data.time_stamp = now_kt; - - delta_ms = (int)div64_s64(ktime_to_ns(delta_kt), 1000000); - - cc_uah = iavg_3b_to_uah(i_filtered, delta_ms); - chip->learning_data.cc_uah -= cc_uah; - if (fg_debug_mask & FG_AGING) - pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah); - -fail: - mutex_unlock(&chip->learning_data.learning_lock); - return; - -} - -#define CC_SOC_BASE_REG 0x5BC -#define CC_SOC_OFFSET 3 -#define CC_SOC_MAGNITUDE_MASK 0x1FFFFFFF -#define CC_SOC_NEGATIVE_BIT BIT(29) -static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc) -{ - int rc; - u8 reg[4]; - unsigned int temp, magnitude; - - rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0); - if (rc) { - pr_err("Failed to read CC_SOC_REG rc=%d\n", rc); - return rc; - } - - temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0]; - magnitude = temp & CC_SOC_MAGNITUDE_MASK; - if (temp & CC_SOC_NEGATIVE_BIT) - *cc_soc = -1 * (~magnitude + 1); - else - *cc_soc = magnitude; - - return 0; -} - -static int fg_cap_learning_process_full_data(struct fg_chip *chip) -{ - int cc_pc_val, rc = -EINVAL; - unsigned int cc_soc_delta_pc; - int64_t delta_cc_uah; - - if (!chip->learning_data.active) - goto fail; - - if (!fg_is_temperature_ok_for_learning(chip)) { - fg_cap_learning_stop(chip); - goto fail; - } - - rc = fg_get_cc_soc(chip, &cc_pc_val); - if (rc) { - pr_err("failed to get CC_SOC, stopping capacity learning\n"); - fg_cap_learning_stop(chip); - goto fail; - } - - cc_soc_delta_pc = DIV_ROUND_CLOSEST( - abs(cc_pc_val - chip->learning_data.init_cc_pc_val) - * 100, FULL_PERCENT_28BIT); - - delta_cc_uah = div64_s64( - chip->learning_data.learned_cc_uah * cc_soc_delta_pc, - 100); - chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah; - - if (fg_debug_mask & FG_AGING) - pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n", - cc_pc_val, cc_soc_delta_pc, - chip->learning_data.cc_uah); - - return 0; - -fail: - return rc; -} - -#define FG_CAP_LEARNING_INTERVAL_NS 30000000000 -static enum alarmtimer_restart fg_cap_learning_alarm_cb(struct alarm *alarm, - ktime_t now) -{ - struct fg_chip *chip = container_of(alarm, struct fg_chip, - fg_cap_learning_alarm); - - if (chip->learning_data.active) { - if (fg_debug_mask & FG_AGING) - pr_info("alarm fired\n"); - schedule_work(&chip->fg_cap_learning_work); - alarm_forward_now(alarm, - ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS)); - return ALARMTIMER_RESTART; - } - if (fg_debug_mask & FG_AGING) - pr_info("alarm misfired\n"); - return ALARMTIMER_NORESTART; -} - -#define FG_AGING_STORAGE_REG 0x5E4 -#define ACTUAL_CAPACITY_REG 0x578 -#define MAH_TO_SOC_CONV_REG 0x4A0 -#define CC_SOC_COEFF_OFFSET 0 -#define ACTUAL_CAPACITY_OFFSET 2 -#define MAH_TO_SOC_CONV_CS_OFFSET 0 -static int fg_calc_and_store_cc_soc_coeff(struct fg_chip *chip, int16_t cc_mah) -{ - int rc; - int64_t cc_to_soc_coeff, mah_to_soc; - u8 data[2]; - - rc = fg_mem_write(chip, (u8 *)&cc_mah, ACTUAL_CAPACITY_REG, 2, - ACTUAL_CAPACITY_OFFSET, 0); - if (rc) { - pr_err("Failed to store actual capacity: %d\n", rc); - return rc; - } - - rc = fg_mem_read(chip, (u8 *)&data, MAH_TO_SOC_CONV_REG, 2, - MAH_TO_SOC_CONV_CS_OFFSET, 0); - if (rc) { - pr_err("Failed to read mah_to_soc_conv_cs: %d\n", rc); - } else { - mah_to_soc = data[1] << 8 | data[0]; - mah_to_soc *= MICRO_UNIT; - cc_to_soc_coeff = div64_s64(mah_to_soc, cc_mah); - half_float_to_buffer(cc_to_soc_coeff, data); - rc = fg_mem_write(chip, (u8 *)data, - ACTUAL_CAPACITY_REG, 2, - CC_SOC_COEFF_OFFSET, 0); - if (rc) - pr_err("Failed to write cc_soc_coeff_offset: %d\n", - rc); - else if (fg_debug_mask & FG_AGING) - pr_info("new cc_soc_coeff %lld [%x %x] saved to sram\n", - cc_to_soc_coeff, data[0], data[1]); - } - return rc; -} - -static void fg_cap_learning_load_data(struct fg_chip *chip) -{ - int16_t cc_mah; - int64_t old_cap = chip->learning_data.learned_cc_uah; - int rc; - - rc = fg_mem_read(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0); - if (rc) { - pr_err("Failed to load aged capacity: %d\n", rc); - } else { - chip->learning_data.learned_cc_uah = cc_mah * 1000; - if (fg_debug_mask & FG_AGING) - pr_info("learned capacity %lld-> %lld/%x uah\n", - old_cap, - chip->learning_data.learned_cc_uah, - cc_mah); - } -} - -static void fg_cap_learning_save_data(struct fg_chip *chip) -{ - int16_t cc_mah; - int rc; - - cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000); - - rc = fg_mem_write(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0); - if (rc) - pr_err("Failed to store aged capacity: %d\n", rc); - else if (fg_debug_mask & FG_AGING) - pr_info("learned capacity %lld uah (%d/0x%x uah) saved to sram\n", - chip->learning_data.learned_cc_uah, - cc_mah, cc_mah); - - if (chip->learning_data.feedback_on) { - rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah); - if (rc) - pr_err("Error in storing cc_soc_coeff, rc:%d\n", rc); - } -} - -static void fg_cap_learning_post_process(struct fg_chip *chip) -{ - int64_t max_inc_val, min_dec_val, old_cap; - - max_inc_val = chip->learning_data.learned_cc_uah - * (1000 + chip->learning_data.max_increment); - do_div(max_inc_val, 1000); - - min_dec_val = chip->learning_data.learned_cc_uah - * (1000 - chip->learning_data.max_decrement); - do_div(min_dec_val, 1000); - - old_cap = chip->learning_data.learned_cc_uah; - if (chip->learning_data.cc_uah > max_inc_val) - chip->learning_data.learned_cc_uah = max_inc_val; - else if (chip->learning_data.cc_uah < min_dec_val) - chip->learning_data.learned_cc_uah = min_dec_val; - else - chip->learning_data.learned_cc_uah = - chip->learning_data.cc_uah; - - fg_cap_learning_save_data(chip); - if (fg_debug_mask & FG_AGING) - pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n", - chip->learning_data.cc_uah, - old_cap, chip->learning_data.learned_cc_uah); -} - -static int get_vbat_est_diff(struct fg_chip *chip) -{ - return abs(fg_data[FG_DATA_VOLTAGE].value - - fg_data[FG_DATA_CPRED_VOLTAGE].value); -} - -#define CBITS_INPUT_FILTER_REG 0x4B4 -#define IBATTF_TAU_MASK 0x38 -#define IBATTF_TAU_99_S 0x30 -static int fg_cap_learning_check(struct fg_chip *chip) -{ - u8 data[4]; - int rc = 0, battery_soc, cc_pc_val; - int vbat_est_diff, vbat_est_thr_uv; - unsigned int cc_pc_100 = FULL_PERCENT_28BIT; - - mutex_lock(&chip->learning_data.learning_lock); - if (chip->status == POWER_SUPPLY_STATUS_CHARGING - && !chip->learning_data.active - && chip->batt_aging_mode == FG_AGING_CC) { - if (chip->learning_data.learned_cc_uah == 0) { - if (fg_debug_mask & FG_AGING) - pr_info("no capacity, aborting\n"); - goto fail; - } - - if (!fg_is_temperature_ok_for_learning(chip)) - goto fail; - - fg_mem_lock(chip); - if (!chip->learning_data.feedback_on) { - vbat_est_diff = get_vbat_est_diff(chip); - vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv; - if (vbat_est_diff >= vbat_est_thr_uv && - vbat_est_thr_uv > 0) { - if (fg_debug_mask & FG_AGING) - pr_info("vbat_est_diff (%d) < threshold (%d)\n", - vbat_est_diff, vbat_est_thr_uv); - fg_mem_release(chip); - fg_cap_learning_stop(chip); - goto fail; - } - } - battery_soc = get_battery_soc_raw(chip); - if (fg_debug_mask & FG_AGING) - pr_info("checking battery soc (%d vs %d)\n", - battery_soc * 100 / FULL_PERCENT_3B, - chip->learning_data.max_start_soc); - /* check if the battery is low enough to start soc learning */ - if (battery_soc * 100 / FULL_PERCENT_3B - > chip->learning_data.max_start_soc) { - if (fg_debug_mask & FG_AGING) - pr_info("battery soc too low (%d < %d), aborting\n", - battery_soc * 100 / FULL_PERCENT_3B, - chip->learning_data.max_start_soc); - fg_mem_release(chip); - fg_cap_learning_stop(chip); - goto fail; - } - - /* set the coulomb counter to a percentage of the capacity */ - chip->learning_data.cc_uah = div64_s64( - (chip->learning_data.learned_cc_uah * battery_soc), - FULL_PERCENT_3B); - - /* Use CC_SOC_REG based capacity learning */ - if (chip->wa_flag & USE_CC_SOC_REG) { - fg_mem_release(chip); - /* SW_CC_SOC based capacity learning */ - if (fg_get_cc_soc(chip, &cc_pc_val)) { - pr_err("failed to get CC_SOC, stop capacity learning\n"); - fg_cap_learning_stop(chip); - goto fail; - } - - chip->learning_data.init_cc_pc_val = cc_pc_val; - chip->learning_data.active = true; - if (fg_debug_mask & FG_AGING) - pr_info("SW_CC_SOC based learning init_CC_SOC=%d\n", - chip->learning_data.init_cc_pc_val); - } else { - rc = fg_mem_masked_write(chip, CBITS_INPUT_FILTER_REG, - IBATTF_TAU_MASK, IBATTF_TAU_99_S, 0); - if (rc) { - pr_err("Failed to write IF IBAT Tau: %d\n", - rc); - fg_mem_release(chip); - fg_cap_learning_stop(chip); - goto fail; - } - - /* clear the i_filtered register */ - memset(data, 0, 4); - rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0); - if (rc) { - pr_err("Failed to clear i_filtered: %d\n", rc); - fg_mem_release(chip); - fg_cap_learning_stop(chip); - goto fail; - } - fg_mem_release(chip); - chip->learning_data.time_stamp = ktime_get_boottime(); - chip->learning_data.active = true; - - if (fg_debug_mask & FG_AGING) - pr_info("cap learning started, soc = %d cc_uah = %lld\n", - battery_soc * 100 / FULL_PERCENT_3B, - chip->learning_data.cc_uah); - alarm_start_relative(&chip->fg_cap_learning_alarm, - ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS)); - } - } else if ((chip->status != POWER_SUPPLY_STATUS_CHARGING) - && chip->learning_data.active) { - if (fg_debug_mask & FG_AGING) - pr_info("capacity learning stopped\n"); - if (!(chip->wa_flag & USE_CC_SOC_REG)) - alarm_try_to_cancel(&chip->fg_cap_learning_alarm); - - if (chip->status == POWER_SUPPLY_STATUS_FULL) { - if (chip->wa_flag & USE_CC_SOC_REG) { - rc = fg_cap_learning_process_full_data(chip); - if (rc) { - fg_cap_learning_stop(chip); - goto fail; - } - /* reset SW_CC_SOC register to 100% */ - rc = fg_mem_write(chip, (u8 *)&cc_pc_100, - CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0); - if (rc) - pr_err("Failed to reset CC_SOC_REG rc=%d\n", - rc); - } - fg_cap_learning_post_process(chip); - } - - fg_cap_learning_stop(chip); - } - -fail: - mutex_unlock(&chip->learning_data.learning_lock); - return rc; -} - -static bool is_usb_present(struct fg_chip *chip) -{ - union power_supply_propval prop = {0,}; - if (!chip->usb_psy) - chip->usb_psy = power_supply_get_by_name("usb"); - - if (chip->usb_psy) - power_supply_get_property(chip->usb_psy, - POWER_SUPPLY_PROP_PRESENT, &prop); - return prop.intval != 0; -} - -static bool is_dc_present(struct fg_chip *chip) -{ - union power_supply_propval prop = {0,}; - if (!chip->dc_psy) - chip->dc_psy = power_supply_get_by_name("dc"); - - if (chip->dc_psy) - power_supply_get_property(chip->dc_psy, - POWER_SUPPLY_PROP_PRESENT, &prop); - return prop.intval != 0; -} - -static bool is_input_present(struct fg_chip *chip) -{ - return is_usb_present(chip) || is_dc_present(chip); -} - -static bool is_otg_present(struct fg_chip *chip) -{ - union power_supply_propval prop = {0,}; - - if (!chip->usb_psy) - chip->usb_psy = power_supply_get_by_name("usb"); - - if (chip->usb_psy) - power_supply_get_property(chip->usb_psy, - POWER_SUPPLY_PROP_USB_OTG, &prop); - return prop.intval != 0; -} - -static bool is_charger_available(struct fg_chip *chip) -{ - if (!chip->batt_psy_name) - return false; - - if (!chip->batt_psy) - chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name); - - if (!chip->batt_psy) - return false; - - return true; -} - -static int set_prop_enable_charging(struct fg_chip *chip, bool enable) -{ - int rc = 0; - union power_supply_propval ret = {enable, }; - - if (!is_charger_available(chip)) { - pr_err("Charger not available yet!\n"); - return -EINVAL; - } - - rc = power_supply_set_property(chip->batt_psy, - POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED, - &ret); - if (rc) { - pr_err("couldn't configure batt chg %d\n", rc); - return rc; - } - - chip->charging_disabled = !enable; - if (fg_debug_mask & FG_STATUS) - pr_info("%sabling charging\n", enable ? "en" : "dis"); - - return rc; -} - -#define MAX_BATTERY_CC_SOC_CAPACITY 150 -static void status_change_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - status_change_work); - unsigned long current_time = 0; - int cc_soc, rc, capacity = get_prop_capacity(chip); - - if (chip->esr_pulse_tune_en) { - fg_stay_awake(&chip->esr_extract_wakeup_source); - schedule_work(&chip->esr_extract_config_work); - } - - if (chip->status == POWER_SUPPLY_STATUS_FULL) { - if (capacity >= 99 && chip->hold_soc_while_full - && chip->health == POWER_SUPPLY_HEALTH_GOOD) { - if (fg_debug_mask & FG_STATUS) - pr_info("holding soc at 100\n"); - chip->charge_full = true; - } else if (fg_debug_mask & FG_STATUS) { - pr_info("terminated charging at %d/0x%02x\n", - capacity, get_monotonic_soc_raw(chip)); - } - } - if (chip->status == POWER_SUPPLY_STATUS_FULL || - chip->status == POWER_SUPPLY_STATUS_CHARGING) { - if (!chip->vbat_low_irq_enabled) { - enable_irq(chip->batt_irq[VBATT_LOW].irq); - enable_irq_wake(chip->batt_irq[VBATT_LOW].irq); - chip->vbat_low_irq_enabled = true; - } - if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100) - fg_configure_soc(chip); - } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) { - if (chip->vbat_low_irq_enabled) { - disable_irq_wake(chip->batt_irq[VBATT_LOW].irq); - disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); - chip->vbat_low_irq_enabled = false; - } - } - fg_cap_learning_check(chip); - schedule_work(&chip->update_esr_work); - - if (chip->wa_flag & USE_CC_SOC_REG) { - if (fg_get_cc_soc(chip, &cc_soc)) { - pr_err("failed to get CC_SOC\n"); - return; - } - } - - if (chip->prev_status != chip->status && chip->last_sram_update_time) { - get_current_time(¤t_time); - /* - * When charging status changes, update SRAM parameters if it - * was not updated before 5 seconds from now. - */ - if (chip->last_sram_update_time + 5 < current_time) { - cancel_delayed_work(&chip->update_sram_data); - schedule_delayed_work(&chip->update_sram_data, - msecs_to_jiffies(0)); - } - if (chip->cyc_ctr.en) - schedule_work(&chip->cycle_count_work); - if ((chip->wa_flag & USE_CC_SOC_REG) && - chip->bad_batt_detection_en && - chip->status == POWER_SUPPLY_STATUS_CHARGING) { - chip->sw_cc_soc_data.init_sys_soc = capacity; - chip->sw_cc_soc_data.init_cc_soc = cc_soc; - if (fg_debug_mask & FG_STATUS) - pr_info(" Init_sys_soc %d init_cc_soc %d\n", - chip->sw_cc_soc_data.init_sys_soc, - chip->sw_cc_soc_data.init_cc_soc); - } - } - if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en - && chip->safety_timer_expired) { - chip->sw_cc_soc_data.delta_soc = - DIV_ROUND_CLOSEST(abs(cc_soc - - chip->sw_cc_soc_data.init_cc_soc) - * 100, FULL_PERCENT_28BIT); - chip->sw_cc_soc_data.full_capacity = - chip->sw_cc_soc_data.delta_soc + - chip->sw_cc_soc_data.init_sys_soc; - pr_info("Init_sys_soc %d init_cc_soc %d cc_soc %d delta_soc %d full_capacity %d\n", - chip->sw_cc_soc_data.init_sys_soc, - chip->sw_cc_soc_data.init_cc_soc, cc_soc, - chip->sw_cc_soc_data.delta_soc, - chip->sw_cc_soc_data.full_capacity); - /* - * If sw_cc_soc capacity greater than 150, then it's a bad - * battery. else, reset timer and restart charging. - */ - if (chip->sw_cc_soc_data.full_capacity > - MAX_BATTERY_CC_SOC_CAPACITY) { - pr_info("Battery possibly damaged, do not restart charging\n"); - } else { - pr_info("Reset safety-timer and restart charging\n"); - rc = set_prop_enable_charging(chip, false); - if (rc) { - pr_err("failed to disable charging %d\n", rc); - return; - } - - chip->safety_timer_expired = false; - msleep(200); - - rc = set_prop_enable_charging(chip, true); - if (rc) { - pr_err("failed to enable charging %d\n", rc); - return; - } - } - } -} - -/* - * Check for change in the status of input or OTG and schedule - * IADC gain compensation work. - */ -static void check_gain_compensation(struct fg_chip *chip) -{ - bool input_present = is_input_present(chip); - bool otg_present = is_otg_present(chip); - - if ((chip->wa_flag & IADC_GAIN_COMP_WA) - && ((chip->input_present ^ input_present) - || (chip->otg_present ^ otg_present))) { - fg_stay_awake(&chip->gain_comp_wakeup_source); - chip->input_present = input_present; - chip->otg_present = otg_present; - cancel_work_sync(&chip->gain_comp_work); - schedule_work(&chip->gain_comp_work); - } -} - -static void fg_hysteresis_config(struct fg_chip *chip) -{ - int hard_hot = 0, hard_cold = 0; - - hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT); - hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD); - if (chip->health == POWER_SUPPLY_HEALTH_OVERHEAT && !chip->batt_hot) { - /* turn down the hard hot threshold */ - chip->batt_hot = true; - set_prop_jeita_temp(chip, FG_MEM_HARD_HOT, - hard_hot - chip->hot_hysteresis); - if (fg_debug_mask & FG_STATUS) - pr_info("hard hot hysteresis: old hot=%d, new hot=%d\n", - hard_hot, hard_hot - chip->hot_hysteresis); - } else if (chip->health == POWER_SUPPLY_HEALTH_COLD && - !chip->batt_cold) { - /* turn up the hard cold threshold */ - chip->batt_cold = true; - set_prop_jeita_temp(chip, FG_MEM_HARD_COLD, - hard_cold + chip->cold_hysteresis); - if (fg_debug_mask & FG_STATUS) - pr_info("hard cold hysteresis: old cold=%d, new cold=%d\n", - hard_cold, hard_cold + chip->hot_hysteresis); - } else if (chip->health != POWER_SUPPLY_HEALTH_OVERHEAT && - chip->batt_hot) { - /* restore the hard hot threshold */ - set_prop_jeita_temp(chip, FG_MEM_HARD_HOT, - hard_hot + chip->hot_hysteresis); - chip->batt_hot = !chip->batt_hot; - if (fg_debug_mask & FG_STATUS) - pr_info("restore hard hot threshold: old hot=%d, new hot=%d\n", - hard_hot, - hard_hot + chip->hot_hysteresis); - } else if (chip->health != POWER_SUPPLY_HEALTH_COLD && - chip->batt_cold) { - /* restore the hard cold threshold */ - set_prop_jeita_temp(chip, FG_MEM_HARD_COLD, - hard_cold - chip->cold_hysteresis); - chip->batt_cold = !chip->batt_cold; - if (fg_debug_mask & FG_STATUS) - pr_info("restore hard cold threshold: old cold=%d, new cold=%d\n", - hard_cold, - hard_cold - chip->cold_hysteresis); - } -} - -#define BATT_INFO_STS(base) (base + 0x09) -#define JEITA_HARD_HOT_RT_STS BIT(6) -#define JEITA_HARD_COLD_RT_STS BIT(5) -static int fg_init_batt_temp_state(struct fg_chip *chip) -{ - int rc = 0; - u8 batt_info_sts; - int hard_hot = 0, hard_cold = 0; - - /* - * read the batt_info_sts register to parse battery's - * initial status and do hysteresis config accordingly. - */ - rc = fg_read(chip, &batt_info_sts, - BATT_INFO_STS(chip->batt_base), 1); - if (rc) { - pr_err("failed to read batt info sts, rc=%d\n", rc); - return rc; - } - - hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT); - hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD); - chip->batt_hot = - (batt_info_sts & JEITA_HARD_HOT_RT_STS) ? true : false; - chip->batt_cold = - (batt_info_sts & JEITA_HARD_COLD_RT_STS) ? true : false; - if (chip->batt_hot || chip->batt_cold) { - if (chip->batt_hot) { - chip->health = POWER_SUPPLY_HEALTH_OVERHEAT; - set_prop_jeita_temp(chip, FG_MEM_HARD_HOT, - hard_hot - chip->hot_hysteresis); - } else { - chip->health = POWER_SUPPLY_HEALTH_COLD; - set_prop_jeita_temp(chip, FG_MEM_HARD_COLD, - hard_cold + chip->cold_hysteresis); - } - } - - return rc; -} - -static int fg_power_set_property(struct power_supply *psy, - enum power_supply_property psp, - const union power_supply_propval *val) -{ - struct fg_chip *chip = power_supply_get_drvdata(psy); - int rc = 0, unused; - - switch (psp) { - case POWER_SUPPLY_PROP_COOL_TEMP: - rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_COLD, val->intval); - break; - case POWER_SUPPLY_PROP_WARM_TEMP: - rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_HOT, val->intval); - break; - case POWER_SUPPLY_PROP_UPDATE_NOW: - if (val->intval) - update_sram_data(chip, &unused); - break; - case POWER_SUPPLY_PROP_STATUS: - chip->prev_status = chip->status; - chip->status = val->intval; - schedule_work(&chip->status_change_work); - check_gain_compensation(chip); - break; - case POWER_SUPPLY_PROP_HEALTH: - chip->health = val->intval; - if (chip->health == POWER_SUPPLY_HEALTH_GOOD) { - fg_stay_awake(&chip->resume_soc_wakeup_source); - schedule_work(&chip->set_resume_soc_work); - } - - if (chip->jeita_hysteresis_support) - fg_hysteresis_config(chip); - break; - case POWER_SUPPLY_PROP_CHARGE_DONE: - chip->charge_done = val->intval; - if (!chip->resume_soc_lowered) { - fg_stay_awake(&chip->resume_soc_wakeup_source); - schedule_work(&chip->set_resume_soc_work); - } - break; - case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: - if ((val->intval > 0) && (val->intval <= BUCKET_COUNT)) { - chip->cyc_ctr.id = val->intval; - } else { - pr_err("rejecting invalid cycle_count_id = %d\n", - val->intval); - rc = -EINVAL; - } - break; - case POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED: - chip->safety_timer_expired = val->intval; - schedule_work(&chip->status_change_work); - break; - case POWER_SUPPLY_PROP_HI_POWER: - if (chip->wa_flag & BCL_HI_POWER_FOR_CHGLED_WA) { - chip->bcl_lpm_disabled = !!val->intval; - schedule_work(&chip->bcl_hi_power_work); - } - break; - default: - return -EINVAL; - }; - - return rc; -}; - -static int fg_property_is_writeable(struct power_supply *psy, - enum power_supply_property psp) -{ - switch (psp) { - case POWER_SUPPLY_PROP_COOL_TEMP: - case POWER_SUPPLY_PROP_WARM_TEMP: - case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: - return 1; - default: - break; - } - - return 0; -} - -#define SRAM_DUMP_START 0x400 -#define SRAM_DUMP_LEN 0x200 -static void dump_sram(struct work_struct *work) -{ - int i, rc; - u8 *buffer, rt_sts; - char str[16]; - struct fg_chip *chip = container_of(work, - struct fg_chip, - dump_sram); - - buffer = devm_kzalloc(chip->dev, SRAM_DUMP_LEN, GFP_KERNEL); - if (buffer == NULL) { - pr_err("Can't allocate buffer\n"); - return; - } - - rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->soc_base), 1); - if (rc) - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->soc_base), rc); - else - pr_info("soc rt_sts: 0x%x\n", rt_sts); - - rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->batt_base), 1); - if (rc) - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->batt_base), rc); - else - pr_info("batt rt_sts: 0x%x\n", rt_sts); - - rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->mem_base), 1); - if (rc) - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->mem_base), rc); - else - pr_info("memif rt_sts: 0x%x\n", rt_sts); - - rc = fg_mem_read(chip, buffer, SRAM_DUMP_START, SRAM_DUMP_LEN, 0, 0); - if (rc) { - pr_err("dump failed: rc = %d\n", rc); - return; - } - - for (i = 0; i < SRAM_DUMP_LEN; i += 4) { - str[0] = '\0'; - fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buffer + i, 4); - pr_info("%03X %s\n", SRAM_DUMP_START + i, str); - } - devm_kfree(chip->dev, buffer); -} - -#define MAXRSCHANGE_REG 0x434 -#define ESR_VALUE_OFFSET 1 -#define ESR_STRICT_VALUE 0x4120391F391F3019 -#define ESR_DEFAULT_VALUE 0x58CD4A6761C34A67 -static void update_esr_value(struct work_struct *work) -{ - union power_supply_propval prop = {0, }; - u64 esr_value; - int rc = 0; - struct fg_chip *chip = container_of(work, - struct fg_chip, - update_esr_work); - - if (!is_charger_available(chip)) - return; - - power_supply_get_property(chip->batt_psy, - POWER_SUPPLY_PROP_CHARGE_TYPE, &prop); - - if (!chip->esr_strict_filter) { - if ((prop.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER && - chip->status == POWER_SUPPLY_STATUS_CHARGING) || - (chip->status == POWER_SUPPLY_STATUS_FULL)) { - esr_value = ESR_STRICT_VALUE; - rc = fg_mem_write(chip, (u8 *)&esr_value, - MAXRSCHANGE_REG, 8, - ESR_VALUE_OFFSET, 0); - if (rc) - pr_err("failed to write strict ESR value rc=%d\n", - rc); - else - chip->esr_strict_filter = true; - } - } else if ((prop.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER && - chip->status == POWER_SUPPLY_STATUS_CHARGING) || - (chip->status == POWER_SUPPLY_STATUS_DISCHARGING)) { - esr_value = ESR_DEFAULT_VALUE; - rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8, - ESR_VALUE_OFFSET, 0); - if (rc) - pr_err("failed to write default ESR value rc=%d\n", rc); - else - chip->esr_strict_filter = false; - } -} - -#define TEMP_COUNTER_REG 0x580 -#define VBAT_FILTERED_OFFSET 1 -#define GAIN_REG 0x424 -#define GAIN_OFFSET 1 -#define K_VCOR_REG 0x484 -#define DEF_GAIN_OFFSET 2 -#define PICO_UNIT 0xE8D4A51000LL -#define ATTO_UNIT 0xDE0B6B3A7640000LL -#define VBAT_REF 3800000 - -/* - * IADC Gain compensation steps: - * If Input/OTG absent: - * - read VBAT_FILTERED, KVCOR, GAIN - * - calculate the gain compensation using following formula: - * gain = (1 + gain) * (1 + kvcor * (vbat_filtered - 3800000)) - 1; - * else - * - reset to the default gain compensation - */ -static void iadc_gain_comp_work(struct work_struct *work) -{ - u8 reg[4]; - int rc; - uint64_t vbat_filtered; - int64_t gain, kvcor, temp, numerator; - struct fg_chip *chip = container_of(work, struct fg_chip, - gain_comp_work); - bool input_present = is_input_present(chip); - bool otg_present = is_otg_present(chip); - - if (!chip->init_done) - goto done; - - if (!input_present && !otg_present) { - /* read VBAT_FILTERED */ - rc = fg_mem_read(chip, reg, TEMP_COUNTER_REG, 3, - VBAT_FILTERED_OFFSET, 0); - if (rc) { - pr_err("Failed to read VBAT: rc=%d\n", rc); - goto done; - } - temp = (reg[2] << 16) | (reg[1] << 8) | reg[0]; - vbat_filtered = div_u64((u64)temp * LSB_24B_NUMRTR, - LSB_24B_DENMTR); - - /* read K_VCOR */ - rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, 0, 0); - if (rc) { - pr_err("Failed to KVCOR rc=%d\n", rc); - goto done; - } - kvcor = half_float(reg); - - /* calculate gain */ - numerator = (MICRO_UNIT + chip->iadc_comp_data.dfl_gain) - * (PICO_UNIT + kvcor * (vbat_filtered - VBAT_REF)) - - ATTO_UNIT; - gain = div64_s64(numerator, PICO_UNIT); - - /* write back gain */ - half_float_to_buffer(gain, reg); - rc = fg_mem_write(chip, reg, GAIN_REG, 2, GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to write gain reg rc=%d\n", rc); - goto done; - } - - if (fg_debug_mask & FG_STATUS) - pr_info("IADC gain update [%x %x]\n", reg[1], reg[0]); - chip->iadc_comp_data.gain_active = true; - } else { - /* reset gain register */ - rc = fg_mem_write(chip, chip->iadc_comp_data.dfl_gain_reg, - GAIN_REG, 2, GAIN_OFFSET, 0); - if (rc) { - pr_err("unable to write gain comp: %d\n", rc); - goto done; - } - - if (fg_debug_mask & FG_STATUS) - pr_info("IADC gain reset [%x %x]\n", - chip->iadc_comp_data.dfl_gain_reg[1], - chip->iadc_comp_data.dfl_gain_reg[0]); - chip->iadc_comp_data.gain_active = false; - } - -done: - fg_relax(&chip->gain_comp_wakeup_source); -} - -#define BATT_MISSING_STS BIT(6) -static bool is_battery_missing(struct fg_chip *chip) -{ - int rc; - u8 fg_batt_sts; - - rc = fg_read(chip, &fg_batt_sts, - INT_RT_STS(chip->batt_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->batt_base), rc); - return false; - } - - return (fg_batt_sts & BATT_MISSING_STS) ? true : false; -} - -#define SOC_FIRST_EST_DONE BIT(5) -static bool is_first_est_done(struct fg_chip *chip) -{ - int rc; - u8 fg_soc_sts; - - rc = fg_read(chip, &fg_soc_sts, - INT_RT_STS(chip->soc_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->soc_base), rc); - return false; - } - - return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false; -} - -static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip) -{ - struct fg_chip *chip = _chip; - int rc; - bool vbatt_low_sts; - - if (fg_debug_mask & FG_IRQS) - pr_info("vbatt-low triggered\n"); - - if (chip->status == POWER_SUPPLY_STATUS_CHARGING) { - rc = fg_get_vbatt_status(chip, &vbatt_low_sts); - if (rc) { - pr_err("error in reading vbatt_status, rc:%d\n", rc); - goto out; - } - if (!vbatt_low_sts && chip->vbat_low_irq_enabled) { - if (fg_debug_mask & FG_IRQS) - pr_info("disabling vbatt_low irq\n"); - disable_irq_wake(chip->batt_irq[VBATT_LOW].irq); - disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); - chip->vbat_low_irq_enabled = false; - } - } - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); -out: - return IRQ_HANDLED; -} - -static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip) -{ - struct fg_chip *chip = _chip; - bool batt_missing = is_battery_missing(chip); - - if (batt_missing) { - chip->battery_missing = true; - chip->profile_loaded = false; - chip->batt_type = default_batt_type; - mutex_lock(&chip->cyc_ctr.lock); - if (fg_debug_mask & FG_IRQS) - pr_info("battery missing, clearing cycle counters\n"); - clear_cycle_counter(chip); - mutex_unlock(&chip->cyc_ctr.lock); - } else { - if (!chip->use_otp_profile) { - reinit_completion(&chip->batt_id_avail); - reinit_completion(&chip->first_soc_done); - schedule_delayed_work(&chip->batt_profile_init, 0); - cancel_delayed_work(&chip->update_sram_data); - schedule_delayed_work( - &chip->update_sram_data, - msecs_to_jiffies(0)); - } else { - chip->battery_missing = false; - } - } - - if (fg_debug_mask & FG_IRQS) - pr_info("batt-missing triggered: %s\n", - batt_missing ? "missing" : "present"); - - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - return IRQ_HANDLED; -} - -static irqreturn_t fg_mem_avail_irq_handler(int irq, void *_chip) -{ - struct fg_chip *chip = _chip; - u8 mem_if_sts; - int rc; - - rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1); - if (rc) { - pr_err("failed to read mem status rc=%d\n", rc); - return IRQ_HANDLED; - } - - if (fg_check_sram_access(chip)) { - if ((fg_debug_mask & FG_IRQS) - & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES)) - pr_info("sram access granted\n"); - reinit_completion(&chip->sram_access_revoked); - complete_all(&chip->sram_access_granted); - } else { - if ((fg_debug_mask & FG_IRQS) - & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES)) - pr_info("sram access revoked\n"); - complete_all(&chip->sram_access_revoked); - } - - if (!rc && (fg_debug_mask & FG_IRQS) - & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES)) - pr_info("mem_if sts 0x%02x\n", mem_if_sts); - - return IRQ_HANDLED; -} - -static irqreturn_t fg_soc_irq_handler(int irq, void *_chip) -{ - struct fg_chip *chip = _chip; - u8 soc_rt_sts; - int rc; - - rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->soc_base), rc); - } - - if (fg_debug_mask & FG_IRQS) - pr_info("triggered 0x%x\n", soc_rt_sts); - - schedule_work(&chip->battery_age_work); - - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - - if (chip->rslow_comp.chg_rs_to_rslow > 0 && - chip->rslow_comp.chg_rslow_comp_c1 > 0 && - chip->rslow_comp.chg_rslow_comp_c2 > 0) - schedule_work(&chip->rslow_comp_work); - if (chip->cyc_ctr.en) - schedule_work(&chip->cycle_count_work); - schedule_work(&chip->update_esr_work); - if (chip->charge_full) - schedule_work(&chip->charge_full_work); - if (chip->wa_flag & IADC_GAIN_COMP_WA - && chip->iadc_comp_data.gain_active) { - fg_stay_awake(&chip->gain_comp_wakeup_source); - schedule_work(&chip->gain_comp_work); - } - - if (chip->wa_flag & USE_CC_SOC_REG - && chip->learning_data.active) { - fg_stay_awake(&chip->capacity_learning_wakeup_source); - schedule_work(&chip->fg_cap_learning_work); - } - - if (chip->esr_pulse_tune_en) { - fg_stay_awake(&chip->esr_extract_wakeup_source); - schedule_work(&chip->esr_extract_config_work); - } - - return IRQ_HANDLED; -} - -#define FG_EMPTY_DEBOUNCE_MS 1500 -static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip) -{ - struct fg_chip *chip = _chip; - u8 soc_rt_sts; - int rc; - - rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->soc_base), rc); - goto done; - } - - if (fg_debug_mask & FG_IRQS) - pr_info("triggered 0x%x\n", soc_rt_sts); - if (fg_is_batt_empty(chip)) { - fg_stay_awake(&chip->empty_check_wakeup_source); - schedule_delayed_work(&chip->check_empty_work, - msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS)); - } else { - chip->soc_empty = false; - } - -done: - return IRQ_HANDLED; -} - -static irqreturn_t fg_first_soc_irq_handler(int irq, void *_chip) -{ - struct fg_chip *chip = _chip; - - if (fg_debug_mask & FG_IRQS) - pr_info("triggered\n"); - - if (fg_est_dump) - schedule_work(&chip->dump_sram); - - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - - complete_all(&chip->first_soc_done); - - return IRQ_HANDLED; -} - -static void fg_external_power_changed(struct power_supply *psy) -{ - struct fg_chip *chip = power_supply_get_drvdata(psy); - - if (is_input_present(chip) && chip->rslow_comp.active && - chip->rslow_comp.chg_rs_to_rslow > 0 && - chip->rslow_comp.chg_rslow_comp_c1 > 0 && - chip->rslow_comp.chg_rslow_comp_c2 > 0) - schedule_work(&chip->rslow_comp_work); - if (!is_input_present(chip) && chip->resume_soc_lowered) { - fg_stay_awake(&chip->resume_soc_wakeup_source); - schedule_work(&chip->set_resume_soc_work); - } - if (!is_input_present(chip) && chip->charge_full) - schedule_work(&chip->charge_full_work); -} - -static void set_resume_soc_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - set_resume_soc_work); - int rc, resume_soc_raw; - - if (is_input_present(chip) && !chip->resume_soc_lowered) { - if (!chip->charge_done) - goto done; - resume_soc_raw = get_monotonic_soc_raw(chip) - - (0xFF - settings[FG_MEM_RESUME_SOC].value); - if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) { - rc = fg_set_resume_soc(chip, resume_soc_raw); - if (rc) { - pr_err("Couldn't set resume SOC for FG\n"); - goto done; - } - if (fg_debug_mask & FG_STATUS) { - pr_info("resume soc lowered to 0x%02x\n", - resume_soc_raw); - } - } else if (settings[FG_MEM_RESUME_SOC].value > 0) { - pr_err("bad resume soc 0x%02x\n", resume_soc_raw); - } - chip->charge_done = false; - chip->resume_soc_lowered = true; - } else if (chip->resume_soc_lowered && (!is_input_present(chip) - || chip->health == POWER_SUPPLY_HEALTH_GOOD)) { - resume_soc_raw = settings[FG_MEM_RESUME_SOC].value; - if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) { - rc = fg_set_resume_soc(chip, resume_soc_raw); - if (rc) { - pr_err("Couldn't set resume SOC for FG\n"); - goto done; - } - if (fg_debug_mask & FG_STATUS) { - pr_info("resume soc set to 0x%02x\n", - resume_soc_raw); - } - } else if (settings[FG_MEM_RESUME_SOC].value > 0) { - pr_err("bad resume soc 0x%02x\n", resume_soc_raw); - } - chip->resume_soc_lowered = false; - } -done: - fg_relax(&chip->resume_soc_wakeup_source); -} - - -#define OCV_COEFFS_START_REG 0x4C0 -#define OCV_JUNCTION_REG 0x4D8 -#define NOM_CAP_REG 0x4F4 -#define CUTOFF_VOLTAGE_REG 0x40C -#define RSLOW_CFG_REG 0x538 -#define RSLOW_CFG_OFFSET 2 -#define RSLOW_THRESH_REG 0x52C -#define RSLOW_THRESH_OFFSET 0 -#define TEMP_RS_TO_RSLOW_OFFSET 2 -#define RSLOW_COMP_REG 0x528 -#define RSLOW_COMP_C1_OFFSET 0 -#define RSLOW_COMP_C2_OFFSET 2 -static int populate_system_data(struct fg_chip *chip) -{ - u8 buffer[24]; - int rc, i; - int16_t cc_mah; - - fg_mem_lock(chip); - rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0); - if (rc) { - pr_err("Failed to read ocv coefficients: %d\n", rc); - goto done; - } - for (i = 0; i < 12; i += 1) - chip->ocv_coeffs[i] = half_float(buffer + (i * 2)); - if (fg_debug_mask & FG_AGING) { - pr_info("coeffs1 = %lld %lld %lld %lld\n", - chip->ocv_coeffs[0], chip->ocv_coeffs[1], - chip->ocv_coeffs[2], chip->ocv_coeffs[3]); - pr_info("coeffs2 = %lld %lld %lld %lld\n", - chip->ocv_coeffs[4], chip->ocv_coeffs[5], - chip->ocv_coeffs[6], chip->ocv_coeffs[7]); - pr_info("coeffs3 = %lld %lld %lld %lld\n", - chip->ocv_coeffs[8], chip->ocv_coeffs[9], - chip->ocv_coeffs[10], chip->ocv_coeffs[11]); - } - rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0); - chip->ocv_junction_p1p2 = buffer[0] * 100 / 255; - rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0); - chip->ocv_junction_p2p3 = buffer[0] * 100 / 255; - if (rc) { - pr_err("Failed to read ocv junctions: %d\n", rc); - goto done; - } - rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0); - if (rc) { - pr_err("Failed to read nominal capacitance: %d\n", rc); - goto done; - } - chip->nom_cap_uah = bcap_uah_2b(buffer); - chip->actual_cap_uah = chip->nom_cap_uah; - if (chip->learning_data.learned_cc_uah == 0) { - chip->learning_data.learned_cc_uah = chip->nom_cap_uah; - fg_cap_learning_save_data(chip); - } else if (chip->learning_data.feedback_on) { - cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000); - rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah); - if (rc) - pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc); - } - rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0); - if (rc) { - pr_err("Failed to read cutoff voltage: %d\n", rc); - goto done; - } - chip->cutoff_voltage = voltage_2b(buffer); - if (fg_debug_mask & FG_AGING) - pr_info("cutoff_voltage = %lld, nom_cap_uah = %d p1p2 = %d, p2p3 = %d\n", - chip->cutoff_voltage, chip->nom_cap_uah, - chip->ocv_junction_p1p2, - chip->ocv_junction_p2p3); - - rc = fg_mem_read(chip, buffer, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, 0); - if (rc) { - pr_err("unable to read rslow cfg: %d\n", rc); - goto done; - } - chip->rslow_comp.rslow_cfg = buffer[0]; - rc = fg_mem_read(chip, buffer, RSLOW_THRESH_REG, 1, - RSLOW_THRESH_OFFSET, 0); - if (rc) { - pr_err("unable to read rslow thresh: %d\n", rc); - goto done; - } - chip->rslow_comp.rslow_thr = buffer[0]; - rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, - RSLOW_THRESH_OFFSET, 0); - if (rc) { - pr_err("unable to read rs to rslow: %d\n", rc); - goto done; - } - memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2); - rc = fg_mem_read(chip, buffer, RSLOW_COMP_REG, 4, - RSLOW_COMP_C1_OFFSET, 0); - if (rc) { - pr_err("unable to read rslow comp: %d\n", rc); - goto done; - } - memcpy(chip->rslow_comp.rslow_comp, buffer, 4); - -done: - fg_mem_release(chip); - return rc; -} - -#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5)) -#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3)) -#define RSLOW_THRESH_FULL_VAL 0xFF -static int fg_rslow_charge_comp_set(struct fg_chip *chip) -{ - int rc; - u8 buffer[2]; - - mutex_lock(&chip->rslow_comp.lock); - fg_mem_lock(chip); - - rc = fg_mem_masked_write(chip, RSLOW_CFG_REG, - RSLOW_CFG_MASK, RSLOW_CFG_ON_VAL, RSLOW_CFG_OFFSET); - if (rc) { - pr_err("unable to write rslow cfg: %d\n", rc); - goto done; - } - rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG, - 0xFF, RSLOW_THRESH_FULL_VAL, RSLOW_THRESH_OFFSET); - if (rc) { - pr_err("unable to write rslow thresh: %d\n", rc); - goto done; - } - - half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer); - rc = fg_mem_write(chip, buffer, - TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0); - if (rc) { - pr_err("unable to write rs to rslow: %d\n", rc); - goto done; - } - half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c1, buffer); - rc = fg_mem_write(chip, buffer, - RSLOW_COMP_REG, 2, RSLOW_COMP_C1_OFFSET, 0); - if (rc) { - pr_err("unable to write rslow comp: %d\n", rc); - goto done; - } - half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c2, buffer); - rc = fg_mem_write(chip, buffer, - RSLOW_COMP_REG, 2, RSLOW_COMP_C2_OFFSET, 0); - if (rc) { - pr_err("unable to write rslow comp: %d\n", rc); - goto done; - } - chip->rslow_comp.active = true; - if (fg_debug_mask & FG_STATUS) - pr_info("Activated rslow charge comp values\n"); - -done: - fg_mem_release(chip); - mutex_unlock(&chip->rslow_comp.lock); - return rc; -} - -#define RSLOW_CFG_ORIG_MASK (BIT(4) | BIT(5)) -static int fg_rslow_charge_comp_clear(struct fg_chip *chip) -{ - u8 reg; - int rc; - - mutex_lock(&chip->rslow_comp.lock); - fg_mem_lock(chip); - - reg = chip->rslow_comp.rslow_cfg & RSLOW_CFG_ORIG_MASK; - rc = fg_mem_masked_write(chip, RSLOW_CFG_REG, - RSLOW_CFG_MASK, reg, RSLOW_CFG_OFFSET); - if (rc) { - pr_err("unable to write rslow cfg: %d\n", rc); - goto done; - } - rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG, - 0xFF, chip->rslow_comp.rslow_thr, RSLOW_THRESH_OFFSET); - if (rc) { - pr_err("unable to write rslow thresh: %d\n", rc); - goto done; - } - - rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow, - TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0); - if (rc) { - pr_err("unable to write rs to rslow: %d\n", rc); - goto done; - } - rc = fg_mem_write(chip, chip->rslow_comp.rslow_comp, - RSLOW_COMP_REG, 4, RSLOW_COMP_C1_OFFSET, 0); - if (rc) { - pr_err("unable to write rslow comp: %d\n", rc); - goto done; - } - chip->rslow_comp.active = false; - if (fg_debug_mask & FG_STATUS) - pr_info("Cleared rslow charge comp values\n"); - -done: - fg_mem_release(chip); - mutex_unlock(&chip->rslow_comp.lock); - return rc; -} - -static void rslow_comp_work(struct work_struct *work) -{ - int battery_soc_1b; - struct fg_chip *chip = container_of(work, - struct fg_chip, - rslow_comp_work); - - battery_soc_1b = get_battery_soc_raw(chip) >> 16; - if (battery_soc_1b > chip->rslow_comp.chg_rslow_comp_thr - && chip->status == POWER_SUPPLY_STATUS_CHARGING) { - if (!chip->rslow_comp.active) - fg_rslow_charge_comp_set(chip); - } else { - if (chip->rslow_comp.active) - fg_rslow_charge_comp_clear(chip); - } -} - -#define MICROUNITS_TO_ADC_RAW(units) \ - div64_s64(units * LSB_16B_DENMTR, LSB_16B_NUMRTR) -static int update_chg_iterm(struct fg_chip *chip) -{ - u8 data[2]; - u16 converted_current_raw; - s64 current_ma = -settings[FG_MEM_CHG_TERM_CURRENT].value; - - converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000); - data[0] = cpu_to_le16(converted_current_raw) & 0xFF; - data[1] = cpu_to_le16(converted_current_raw) >> 8; - - if (fg_debug_mask & FG_STATUS) - pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n", - current_ma, converted_current_raw, data[0], data[1]); - return fg_mem_write(chip, data, - settings[FG_MEM_CHG_TERM_CURRENT].address, - 2, settings[FG_MEM_CHG_TERM_CURRENT].offset, 0); -} - -#define CC_CV_SETPOINT_REG 0x4F8 -#define CC_CV_SETPOINT_OFFSET 0 -static void update_cc_cv_setpoint(struct fg_chip *chip) -{ - int rc; - u8 tmp[2]; - - if (!chip->cc_cv_threshold_mv) - return; - batt_to_setpoint_adc(chip->cc_cv_threshold_mv, tmp); - rc = fg_mem_write(chip, tmp, CC_CV_SETPOINT_REG, 2, - CC_CV_SETPOINT_OFFSET, 0); - if (rc) { - pr_err("failed to write CC_CV_VOLT rc=%d\n", rc); - return; - } - if (fg_debug_mask & FG_STATUS) - pr_info("Wrote %x %x to address %x for CC_CV setpoint\n", - tmp[0], tmp[1], CC_CV_SETPOINT_REG); -} - -#define CBITS_INPUT_FILTER_REG 0x4B4 -#define CBITS_RMEAS1_OFFSET 1 -#define CBITS_RMEAS2_OFFSET 2 -#define CBITS_RMEAS1_DEFAULT_VAL 0x65 -#define CBITS_RMEAS2_DEFAULT_VAL 0x65 -#define IMPTR_FAST_TIME_SHIFT 1 -#define IMPTR_LONG_TIME_SHIFT (1 << 4) -#define IMPTR_PULSE_CTR_CHG 1 -#define IMPTR_PULSE_CTR_DISCHG (1 << 4) -static int fg_config_imptr_pulse(struct fg_chip *chip, bool slow) -{ - int rc; - u8 cntr[2] = {0, 0}; - u8 val; - - if (slow == chip->imptr_pulse_slow_en) { - if (fg_debug_mask & FG_STATUS) - pr_info("imptr_pulse_slow is %sabled already\n", - slow ? "en" : "dis"); - return 0; - } - - fg_mem_lock(chip); - - val = slow ? (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT) : - CBITS_RMEAS1_DEFAULT_VAL; - rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1, - CBITS_RMEAS1_OFFSET, 0); - if (rc) { - pr_err("unable to write cbits_rmeas1_offset rc=%d\n", rc); - goto done; - } - - val = slow ? (IMPTR_PULSE_CTR_CHG | IMPTR_PULSE_CTR_DISCHG) : - CBITS_RMEAS2_DEFAULT_VAL; - rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1, - CBITS_RMEAS2_OFFSET, 0); - if (rc) { - pr_err("unable to write cbits_rmeas2_offset rc=%d\n", rc); - goto done; - } - - if (slow) { - rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 4, - COUNTER_IMPTR_OFFSET, 0); - if (rc) { - pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc); - goto done; - } - - rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2, - COUNTER_PULSE_OFFSET, 0); - if (rc) { - pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc); - goto done; - } - } - - chip->imptr_pulse_slow_en = slow; - if (fg_debug_mask & FG_STATUS) - pr_info("imptr_pulse_slow is %sabled\n", slow ? "en" : "dis"); -done: - fg_mem_release(chip); - return rc; -} - -#define CURRENT_DELTA_MIN_REG 0x42C -#define CURRENT_DELTA_MIN_OFFSET 1 -#define SYS_CFG_1_REG 0x4AC -#define SYS_CFG_1_OFFSET 0 -#define CURRENT_DELTA_MIN_DEFAULT 0x16 -#define CURRENT_DELTA_MIN_500MA 0xCD -#define RSLOW_CFG_USE_FIX_RSER_VAL BIT(7) -#define ENABLE_ESR_PULSE_VAL BIT(3) -static int fg_config_esr_extract(struct fg_chip *chip, bool disable) -{ - int rc; - u8 val; - - if (disable == chip->esr_extract_disabled) { - if (fg_debug_mask & FG_STATUS) - pr_info("ESR extract already %sabled\n", - disable ? "dis" : "en"); - return 0; - } - - fg_mem_lock(chip); - - val = disable ? CURRENT_DELTA_MIN_500MA : - CURRENT_DELTA_MIN_DEFAULT; - rc = fg_mem_write(chip, &val, CURRENT_DELTA_MIN_REG, 1, - CURRENT_DELTA_MIN_OFFSET, 0); - if (rc) { - pr_err("unable to write curr_delta_min rc=%d\n", rc); - goto done; - } - - val = disable ? RSLOW_CFG_USE_FIX_RSER_VAL : 0; - rc = fg_mem_masked_write(chip, RSLOW_CFG_REG, - RSLOW_CFG_USE_FIX_RSER_VAL, val, RSLOW_CFG_OFFSET); - if (rc) { - pr_err("unable to write rslow cfg rc= %d\n", rc); - goto done; - } - - val = disable ? 0 : ENABLE_ESR_PULSE_VAL; - rc = fg_mem_masked_write(chip, SYS_CFG_1_REG, - ENABLE_ESR_PULSE_VAL, val, SYS_CFG_1_OFFSET); - if (rc) { - pr_err("unable to write sys_cfg_1 rc= %d\n", rc); - goto done; - } - - chip->esr_extract_disabled = disable; - if (fg_debug_mask & FG_STATUS) - pr_info("ESR extract is %sabled\n", disable ? "dis" : "en"); -done: - fg_mem_release(chip); - return rc; -} - -#define ESR_EXTRACT_STOP_SOC 2 -#define IMPTR_PULSE_CONFIG_SOC 5 -static void esr_extract_config_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, struct fg_chip, - esr_extract_config_work); - bool input_present = is_input_present(chip); - int capacity = get_prop_capacity(chip); - - if (input_present && capacity <= ESR_EXTRACT_STOP_SOC) { - fg_config_esr_extract(chip, true); - } else if (capacity > ESR_EXTRACT_STOP_SOC) { - fg_config_esr_extract(chip, false); - - if (capacity <= IMPTR_PULSE_CONFIG_SOC) - fg_config_imptr_pulse(chip, true); - else - fg_config_imptr_pulse(chip, false); - } - - fg_relax(&chip->esr_extract_wakeup_source); -} - -#define LOW_LATENCY BIT(6) -#define BATT_PROFILE_OFFSET 0x4C0 -#define PROFILE_INTEGRITY_REG 0x53C -#define PROFILE_INTEGRITY_BIT BIT(0) -#define FIRST_EST_DONE_BIT BIT(5) -#define MAX_TRIES_FIRST_EST 3 -#define FIRST_EST_WAIT_MS 2000 -#define PROFILE_LOAD_TIMEOUT_MS 5000 -static int fg_do_restart(struct fg_chip *chip, bool write_profile) -{ - int rc, ibat_ua; - u8 reg = 0; - u8 buf[2]; - bool tried_once = false; - - if (fg_debug_mask & FG_STATUS) - pr_info("restarting fuel gauge...\n"); - -try_again: - if (write_profile) { - if (!chip->charging_disabled) { - pr_err("Charging not yet disabled!\n"); - return -EINVAL; - } - - ibat_ua = get_sram_prop_now(chip, FG_DATA_CURRENT); - if (ibat_ua == -EINVAL) { - pr_err("SRAM not updated yet!\n"); - return ibat_ua; - } - - if (ibat_ua < 0) { - pr_warn("Charging enabled?, ibat_ua: %d\n", ibat_ua); - - if (!tried_once) { - cancel_delayed_work(&chip->update_sram_data); - schedule_delayed_work(&chip->update_sram_data, - msecs_to_jiffies(0)); - msleep(1000); - tried_once = true; - goto try_again; - } - } - } - - chip->fg_restarting = true; - /* - * save the temperature if the sw rbias control is active so that there - * is no gap of time when there is no valid temperature read after the - * restart - */ - if (chip->sw_rbias_ctrl) { - rc = fg_mem_read(chip, buf, - fg_data[FG_DATA_BATT_TEMP].address, - fg_data[FG_DATA_BATT_TEMP].len, - fg_data[FG_DATA_BATT_TEMP].offset, 0); - if (rc) { - pr_err("failed to read batt temp rc=%d\n", rc); - goto sub_and_fail; - } - } - /* - * release the sram access and configure the correct settings - * before re-requesting access. - */ - mutex_lock(&chip->rw_lock); - fg_release_access(chip); - - rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD, - NO_OTP_PROF_RELOAD, 0, 1); - if (rc) { - pr_err("failed to set no otp reload bit\n"); - goto unlock_and_fail; - } - - /* unset the restart bits so the fg doesn't continuously restart */ - reg = REDO_FIRST_ESTIMATE | RESTART_GO; - rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART, - reg, 0, 1); - if (rc) { - pr_err("failed to unset fg restart: %d\n", rc); - goto unlock_and_fail; - } - - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), - LOW_LATENCY, LOW_LATENCY, 1); - if (rc) { - pr_err("failed to set low latency access bit\n"); - goto unlock_and_fail; - } - mutex_unlock(&chip->rw_lock); - - /* read once to get a fg cycle in */ - rc = fg_mem_read(chip, ®, PROFILE_INTEGRITY_REG, 1, 0, 0); - if (rc) { - pr_err("failed to read profile integrity rc=%d\n", rc); - goto fail; - } - - /* - * If this is not the first time a profile has been loaded, sleep for - * 3 seconds to make sure the NO_OTP_RELOAD is cleared in memory - */ - if (chip->first_profile_loaded) - msleep(3000); - - mutex_lock(&chip->rw_lock); - fg_release_access(chip); - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), LOW_LATENCY, 0, 1); - if (rc) { - pr_err("failed to set low latency access bit\n"); - goto unlock_and_fail; - } - - atomic_add_return(1, &chip->memif_user_cnt); - mutex_unlock(&chip->rw_lock); - - if (write_profile) { - /* write the battery profile */ - rc = fg_mem_write(chip, chip->batt_profile, BATT_PROFILE_OFFSET, - chip->batt_profile_len, 0, 1); - if (rc) { - pr_err("failed to write profile rc=%d\n", rc); - goto sub_and_fail; - } - /* write the integrity bits and release access */ - rc = fg_mem_masked_write(chip, PROFILE_INTEGRITY_REG, - PROFILE_INTEGRITY_BIT, - PROFILE_INTEGRITY_BIT, 0); - if (rc) { - pr_err("failed to write profile rc=%d\n", rc); - goto sub_and_fail; - } - } - - /* decrement the user count so that memory access can be released */ - fg_release_access_if_necessary(chip); - - /* - * make sure that the first estimate has completed - * in case of a hotswap - */ - rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done, - msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS)); - if (rc <= 0) { - pr_err("transaction timed out rc=%d\n", rc); - rc = -ETIMEDOUT; - goto fail; - } - - /* - * reinitialize the completion so that the driver knows when the restart - * finishes - */ - reinit_completion(&chip->first_soc_done); - - if (chip->esr_pulse_tune_en) { - fg_stay_awake(&chip->esr_extract_wakeup_source); - schedule_work(&chip->esr_extract_config_work); - } - - /* - * set the restart bits so that the next fg cycle will not reload - * the profile - */ - rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD, - NO_OTP_PROF_RELOAD, NO_OTP_PROF_RELOAD, 1); - if (rc) { - pr_err("failed to set no otp reload bit\n"); - goto fail; - } - - reg = REDO_FIRST_ESTIMATE | RESTART_GO; - rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART, - reg, reg, 1); - if (rc) { - pr_err("failed to set fg restart: %d\n", rc); - goto fail; - } - - /* wait for the first estimate to complete */ - rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done, - msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS)); - if (rc <= 0) { - pr_err("transaction timed out rc=%d\n", rc); - rc = -ETIMEDOUT; - goto fail; - } - rc = fg_read(chip, ®, INT_RT_STS(chip->soc_base), 1); - if (rc) { - pr_err("spmi read failed: addr=%03X, rc=%d\n", - INT_RT_STS(chip->soc_base), rc); - goto fail; - } - if ((reg & FIRST_EST_DONE_BIT) == 0) - pr_err("Battery profile reloading failed, no first estimate\n"); - - rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD, - NO_OTP_PROF_RELOAD, 0, 1); - if (rc) { - pr_err("failed to set no otp reload bit\n"); - goto fail; - } - /* unset the restart bits so the fg doesn't continuously restart */ - reg = REDO_FIRST_ESTIMATE | RESTART_GO; - rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART, - reg, 0, 1); - if (rc) { - pr_err("failed to unset fg restart: %d\n", rc); - goto fail; - } - - /* restore the battery temperature reading here */ - if (chip->sw_rbias_ctrl) { - if (fg_debug_mask & FG_STATUS) - pr_info("reloaded 0x%02x%02x into batt temp", - buf[0], buf[1]); - rc = fg_mem_write(chip, buf, - fg_data[FG_DATA_BATT_TEMP].address, - fg_data[FG_DATA_BATT_TEMP].len, - fg_data[FG_DATA_BATT_TEMP].offset, 0); - if (rc) { - pr_err("failed to write batt temp rc=%d\n", rc); - goto fail; - } - } - - /* Enable charging now as the first estimate is done now */ - if (chip->charging_disabled) { - rc = set_prop_enable_charging(chip, true); - if (rc) - pr_err("Failed to enable charging, rc=%d\n", rc); - else - chip->charging_disabled = false; - } - - chip->fg_restarting = false; - - if (fg_debug_mask & FG_STATUS) - pr_info("done!\n"); - return 0; - -unlock_and_fail: - mutex_unlock(&chip->rw_lock); - goto fail; -sub_and_fail: - fg_release_access_if_necessary(chip); - goto fail; -fail: - chip->fg_restarting = false; - return -EINVAL; -} - -#define FG_PROFILE_LEN 128 -#define PROFILE_COMPARE_LEN 32 -#define THERMAL_COEFF_ADDR 0x444 -#define THERMAL_COEFF_OFFSET 0x2 -#define BATTERY_PSY_WAIT_MS 2000 -static int fg_batt_profile_init(struct fg_chip *chip) -{ - int rc = 0, ret, len, batt_id; - struct device_node *node = chip->pdev->dev.of_node; - struct device_node *batt_node, *profile_node; - const char *data, *batt_type_str; - bool tried_again = false, vbat_in_range, profiles_same; - u8 reg = 0; - -wait: - fg_stay_awake(&chip->profile_wakeup_source); - ret = wait_for_completion_interruptible_timeout(&chip->batt_id_avail, - msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS)); - /* If we were interrupted wait again one more time. */ - if (ret == -ERESTARTSYS && !tried_again) { - tried_again = true; - pr_debug("interrupted, waiting again\n"); - goto wait; - } else if (ret <= 0) { - rc = -ETIMEDOUT; - pr_err("profile loading timed out rc=%d\n", rc); - goto no_profile; - } - - batt_node = of_find_node_by_name(node, "qcom,battery-data"); - if (!batt_node) { - pr_warn("No available batterydata, using OTP defaults\n"); - rc = 0; - goto no_profile; - } - - batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID); - batt_id /= 1000; - if (fg_debug_mask & FG_STATUS) - pr_info("battery id = %dKOhms\n", batt_id); - - profile_node = of_batterydata_get_best_profile(batt_node, batt_id, - fg_batt_type); - if (IS_ERR_OR_NULL(profile_node)) { - rc = PTR_ERR(profile_node); - pr_err("couldn't find profile handle %d\n", rc); - goto no_profile; - } - - /* read rslow compensation values if they're available */ - rc = of_property_read_u32(profile_node, "qcom,chg-rs-to-rslow", - &chip->rslow_comp.chg_rs_to_rslow); - if (rc) { - chip->rslow_comp.chg_rs_to_rslow = -EINVAL; - if (rc != -EINVAL) - pr_err("Could not read rs to rslow: %d\n", rc); - } - rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c1", - &chip->rslow_comp.chg_rslow_comp_c1); - if (rc) { - chip->rslow_comp.chg_rslow_comp_c1 = -EINVAL; - if (rc != -EINVAL) - pr_err("Could not read rslow comp c1: %d\n", rc); - } - rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c2", - &chip->rslow_comp.chg_rslow_comp_c2); - if (rc) { - chip->rslow_comp.chg_rslow_comp_c2 = -EINVAL; - if (rc != -EINVAL) - pr_err("Could not read rslow comp c2: %d\n", rc); - } - rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-thr", - &chip->rslow_comp.chg_rslow_comp_thr); - if (rc) { - chip->rslow_comp.chg_rslow_comp_thr = -EINVAL; - if (rc != -EINVAL) - pr_err("Could not read rslow comp thr: %d\n", rc); - } - - rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv", - &chip->batt_max_voltage_uv); - - if (rc) - pr_warn("couldn't find battery max voltage\n"); - - /* - * Only configure from profile if fg-cc-cv-threshold-mv is not - * defined in the charger device node. - */ - if (!of_find_property(chip->pdev->dev.of_node, - "qcom,fg-cc-cv-threshold-mv", NULL)) { - of_property_read_u32(profile_node, - "qcom,fg-cc-cv-threshold-mv", - &chip->cc_cv_threshold_mv); - } - - data = of_get_property(profile_node, "qcom,fg-profile-data", &len); - if (!data) { - pr_err("no battery profile loaded\n"); - rc = 0; - goto no_profile; - } - - if (len != FG_PROFILE_LEN) { - pr_err("battery profile incorrect size: %d\n", len); - rc = -EINVAL; - goto no_profile; - } - - rc = of_property_read_string(profile_node, "qcom,battery-type", - &batt_type_str); - if (rc) { - pr_err("Could not find battery data type: %d\n", rc); - rc = 0; - goto no_profile; - } - - if (!chip->batt_profile) - chip->batt_profile = devm_kzalloc(chip->dev, - sizeof(char) * len, GFP_KERNEL); - - if (!chip->batt_profile) { - pr_err("out of memory\n"); - rc = -ENOMEM; - goto no_profile; - } - - rc = fg_mem_read(chip, ®, PROFILE_INTEGRITY_REG, 1, 0, 1); - if (rc) { - pr_err("failed to read profile integrity rc=%d\n", rc); - goto no_profile; - } - - rc = fg_mem_read(chip, chip->batt_profile, BATT_PROFILE_OFFSET, - len, 0, 1); - if (rc) { - pr_err("failed to read profile rc=%d\n", rc); - goto no_profile; - } - - /* Check whether the charger is ready */ - if (!is_charger_available(chip)) - goto reschedule; - - /* Disable charging for a FG cycle before calculating vbat_in_range */ - if (!chip->charging_disabled) { - rc = set_prop_enable_charging(chip, false); - if (rc) - pr_err("Failed to disable charging, rc=%d\n", rc); - - goto reschedule; - } - - vbat_in_range = get_vbat_est_diff(chip) - < settings[FG_MEM_VBAT_EST_DIFF].value * 1000; - profiles_same = memcmp(chip->batt_profile, data, - PROFILE_COMPARE_LEN) == 0; - if (reg & PROFILE_INTEGRITY_BIT) { - fg_cap_learning_load_data(chip); - if (vbat_in_range && !fg_is_batt_empty(chip) && profiles_same) { - if (fg_debug_mask & FG_STATUS) - pr_info("Battery profiles same, using default\n"); - if (fg_est_dump) - schedule_work(&chip->dump_sram); - goto done; - } - } else { - pr_info("Battery profile not same, clearing data\n"); - clear_cycle_counter(chip); - chip->learning_data.learned_cc_uah = 0; - } - - if (fg_est_dump) - dump_sram(&chip->dump_sram); - - if ((fg_debug_mask & FG_STATUS) && !vbat_in_range) - pr_info("Vbat out of range: v_current_pred: %d, v:%d\n", - fg_data[FG_DATA_CPRED_VOLTAGE].value, - fg_data[FG_DATA_VOLTAGE].value); - - if ((fg_debug_mask & FG_STATUS) && fg_is_batt_empty(chip)) - pr_info("battery empty\n"); - - if ((fg_debug_mask & FG_STATUS) && !profiles_same) - pr_info("profiles differ\n"); - - if (fg_debug_mask & FG_STATUS) { - pr_info("Using new profile\n"); - print_hex_dump(KERN_INFO, "FG: loaded profile: ", - DUMP_PREFIX_NONE, 16, 1, - chip->batt_profile, len, false); - } - - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - - memcpy(chip->batt_profile, data, len); - - chip->batt_profile_len = len; - - if (fg_debug_mask & FG_STATUS) - print_hex_dump(KERN_INFO, "FG: new profile: ", - DUMP_PREFIX_NONE, 16, 1, chip->batt_profile, - chip->batt_profile_len, false); - - rc = fg_do_restart(chip, true); - if (rc) { - pr_err("restart failed: %d\n", rc); - goto no_profile; - } - - /* - * Only configure from profile if thermal-coefficients is not - * defined in the FG device node. - */ - if (!of_find_property(chip->pdev->dev.of_node, - "qcom,thermal-coefficients", NULL)) { - data = of_get_property(profile_node, - "qcom,thermal-coefficients", &len); - if (data && len == THERMAL_COEFF_N_BYTES) { - memcpy(chip->thermal_coefficients, data, len); - rc = fg_mem_write(chip, chip->thermal_coefficients, - THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES, - THERMAL_COEFF_OFFSET, 0); - if (rc) - pr_err("spmi write failed addr:%03x, ret:%d\n", - THERMAL_COEFF_ADDR, rc); - else if (fg_debug_mask & FG_STATUS) - pr_info("Battery thermal coefficients changed\n"); - } - } - -done: - if (chip->charging_disabled) { - rc = set_prop_enable_charging(chip, true); - if (rc) - pr_err("Failed to enable charging, rc=%d\n", rc); - else - chip->charging_disabled = false; - } - - if (fg_batt_type) - chip->batt_type = fg_batt_type; - else - chip->batt_type = batt_type_str; - chip->first_profile_loaded = true; - chip->profile_loaded = true; - chip->battery_missing = is_battery_missing(chip); - update_chg_iterm(chip); - update_cc_cv_setpoint(chip); - rc = populate_system_data(chip); - if (rc) { - pr_err("failed to read ocv properties=%d\n", rc); - return rc; - } - estimate_battery_age(chip, &chip->actual_cap_uah); - schedule_work(&chip->status_change_work); - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - fg_relax(&chip->profile_wakeup_source); - pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip), - fg_data[FG_DATA_VOLTAGE].value); - return rc; -no_profile: - if (chip->charging_disabled) { - rc = set_prop_enable_charging(chip, true); - if (rc) - pr_err("Failed to enable charging, rc=%d\n", rc); - else - chip->charging_disabled = false; - } - - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - fg_relax(&chip->profile_wakeup_source); - return rc; -reschedule: - schedule_delayed_work( - &chip->batt_profile_init, - msecs_to_jiffies(BATTERY_PSY_WAIT_MS)); - cancel_delayed_work(&chip->update_sram_data); - schedule_delayed_work( - &chip->update_sram_data, - msecs_to_jiffies(0)); - fg_relax(&chip->profile_wakeup_source); - return 0; -} - -static void check_empty_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - check_empty_work.work); - - if (fg_is_batt_empty(chip)) { - if (fg_debug_mask & FG_STATUS) - pr_info("EMPTY SOC high\n"); - chip->soc_empty = true; - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - } - fg_relax(&chip->empty_check_wakeup_source); -} - -static void batt_profile_init(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - batt_profile_init.work); - - if (fg_batt_profile_init(chip)) - pr_err("failed to initialize profile\n"); -} - -static void sysfs_restart_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - sysfs_restart_work); - int rc; - - rc = fg_do_restart(chip, false); - if (rc) - pr_err("fg restart failed: %d\n", rc); - mutex_lock(&chip->sysfs_restart_lock); - fg_restart = 0; - mutex_unlock(&chip->sysfs_restart_lock); -} - -#define SRAM_MONOTONIC_SOC_REG 0x574 -#define SRAM_MONOTONIC_SOC_OFFSET 2 -#define SRAM_RELEASE_TIMEOUT_MS 500 -static void charge_full_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - charge_full_work); - int rc; - u8 buffer[3]; - int bsoc; - int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value; - bool disable = false; - u8 reg; - - if (chip->status != POWER_SUPPLY_STATUS_FULL) { - if (fg_debug_mask & FG_STATUS) - pr_info("battery not full: %d\n", chip->status); - disable = true; - } - - fg_mem_lock(chip); - rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0); - if (rc) { - pr_err("Unable to read battery soc: %d\n", rc); - goto out; - } - if (buffer[2] <= resume_soc_raw) { - if (fg_debug_mask & FG_STATUS) - pr_info("bsoc = 0x%02x <= resume = 0x%02x\n", - buffer[2], resume_soc_raw); - disable = true; - } - if (!disable) - goto out; - - rc = fg_mem_write(chip, buffer, SOC_FULL_REG, 3, - SOC_FULL_OFFSET, 0); - if (rc) { - pr_err("failed to write SOC_FULL rc=%d\n", rc); - goto out; - } - /* force a full soc value into the monotonic in order to display 100 */ - buffer[0] = 0xFF; - buffer[1] = 0xFF; - rc = fg_mem_write(chip, buffer, SRAM_MONOTONIC_SOC_REG, 2, - SRAM_MONOTONIC_SOC_OFFSET, 0); - if (rc) { - pr_err("failed to write SOC_FULL rc=%d\n", rc); - goto out; - } - if (fg_debug_mask & FG_STATUS) { - bsoc = buffer[0] | buffer[1] << 8 | buffer[2] << 16; - pr_info("wrote %06x into soc full\n", bsoc); - } - fg_mem_release(chip); - /* - * wait one cycle to make sure the soc is updated before clearing - * the soc mask bit - */ - fg_mem_lock(chip); - fg_mem_read(chip, ®, PROFILE_INTEGRITY_REG, 1, 0, 0); -out: - fg_mem_release(chip); - if (disable) - chip->charge_full = false; -} - -static void update_bcl_thresholds(struct fg_chip *chip) -{ - u8 data[4]; - u8 mh_offset = 0, lm_offset = 0; - u16 address = 0; - int ret = 0; - - address = settings[FG_MEM_BCL_MH_THRESHOLD].address; - mh_offset = settings[FG_MEM_BCL_MH_THRESHOLD].offset; - lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset; - ret = fg_mem_read(chip, data, address, 4, 0, 1); - if (ret) - pr_err("Error reading BCL LM & MH threshold rc:%d\n", ret); - else - pr_debug("Old BCL LM threshold:%x MH threshold:%x\n", - data[lm_offset], data[mh_offset]); - BCL_MA_TO_ADC(settings[FG_MEM_BCL_MH_THRESHOLD].value, data[mh_offset]); - BCL_MA_TO_ADC(settings[FG_MEM_BCL_LM_THRESHOLD].value, data[lm_offset]); - - ret = fg_mem_write(chip, data, address, 4, 0, 0); - if (ret) - pr_err("spmi write failed. addr:%03x, ret:%d\n", - address, ret); - else - pr_debug("New BCL LM threshold:%x MH threshold:%x\n", - data[lm_offset], data[mh_offset]); -} - -static int disable_bcl_lpm(struct fg_chip *chip) -{ - u8 data[4]; - u8 lm_offset = 0; - u16 address = 0; - int rc = 0; - - address = settings[FG_MEM_BCL_LM_THRESHOLD].address; - lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset; - rc = fg_mem_read(chip, data, address, 4, 0, 1); - if (rc) { - pr_err("Error reading BCL LM & MH threshold rc:%d\n", rc); - return rc; - } - pr_debug("Old BCL LM threshold:%x\n", data[lm_offset]); - - /* Put BCL always above LPM */ - BCL_MA_TO_ADC(0, data[lm_offset]); - - rc = fg_mem_write(chip, data, address, 4, 0, 0); - if (rc) - pr_err("spmi write failed. addr:%03x, rc:%d\n", - address, rc); - else - pr_debug("New BCL LM threshold:%x\n", data[lm_offset]); - - return rc; -} - -static void bcl_hi_power_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, - struct fg_chip, - bcl_hi_power_work); - int rc; - - if (chip->bcl_lpm_disabled) { - rc = disable_bcl_lpm(chip); - if (rc) - pr_err("failed to disable bcl low mode %d\n", - rc); - } else { - update_bcl_thresholds(chip); - } -} - -#define VOLT_UV_TO_VOLTCMP8(volt_uv) \ - ((volt_uv - 2500000) / 9766) -static int update_irq_volt_empty(struct fg_chip *chip) -{ - u8 data; - int volt_mv = settings[FG_MEM_IRQ_VOLT_EMPTY].value; - - data = (u8)VOLT_UV_TO_VOLTCMP8(volt_mv * 1000); - - if (fg_debug_mask & FG_STATUS) - pr_info("voltage = %d, converted_raw = %04x\n", volt_mv, data); - return fg_mem_write(chip, &data, - settings[FG_MEM_IRQ_VOLT_EMPTY].address, 1, - settings[FG_MEM_IRQ_VOLT_EMPTY].offset, 0); -} - -static int update_cutoff_voltage(struct fg_chip *chip) -{ - u8 data[2]; - u16 converted_voltage_raw; - s64 voltage_mv = settings[FG_MEM_CUTOFF_VOLTAGE].value; - - converted_voltage_raw = (s16)MICROUNITS_TO_ADC_RAW(voltage_mv * 1000); - data[0] = cpu_to_le16(converted_voltage_raw) & 0xFF; - data[1] = cpu_to_le16(converted_voltage_raw) >> 8; - - if (fg_debug_mask & FG_STATUS) - pr_info("voltage = %lld, converted_raw = %04x, data = %02x %02x\n", - voltage_mv, converted_voltage_raw, data[0], data[1]); - return fg_mem_write(chip, data, settings[FG_MEM_CUTOFF_VOLTAGE].address, - 2, settings[FG_MEM_CUTOFF_VOLTAGE].offset, 0); -} - -static int update_iterm(struct fg_chip *chip) -{ - u8 data[2]; - u16 converted_current_raw; - s64 current_ma = -settings[FG_MEM_TERM_CURRENT].value; - - converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000); - data[0] = cpu_to_le16(converted_current_raw) & 0xFF; - data[1] = cpu_to_le16(converted_current_raw) >> 8; - - if (fg_debug_mask & FG_STATUS) - pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n", - current_ma, converted_current_raw, data[0], data[1]); - return fg_mem_write(chip, data, settings[FG_MEM_TERM_CURRENT].address, - 2, settings[FG_MEM_TERM_CURRENT].offset, 0); -} - -#define OF_READ_SETTING(type, qpnp_dt_property, retval, optional) \ -do { \ - if (retval) \ - break; \ - \ - retval = of_property_read_u32(chip->pdev->dev.of_node, \ - "qcom," qpnp_dt_property, \ - &settings[type].value); \ - \ - if ((retval == -EINVAL) && optional) \ - retval = 0; \ - else if (retval) \ - pr_err("Error reading " #qpnp_dt_property \ - " property rc = %d\n", rc); \ -} while (0) - -#define OF_READ_PROPERTY(store, qpnp_dt_property, retval, default_val) \ -do { \ - if (retval) \ - break; \ - \ - retval = of_property_read_u32(chip->pdev->dev.of_node, \ - "qcom," qpnp_dt_property, \ - &store); \ - \ - if (retval == -EINVAL) { \ - retval = 0; \ - store = default_val; \ - } else if (retval) { \ - pr_err("Error reading " #qpnp_dt_property \ - " property rc = %d\n", rc); \ - } \ -} while (0) - -#define DEFAULT_EVALUATION_CURRENT_MA 1000 -static int fg_of_init(struct fg_chip *chip) -{ - int rc = 0, sense_type, len = 0; - const char *data; - struct device_node *node = chip->pdev->dev.of_node; - u32 temp[2] = {0}; - - OF_READ_SETTING(FG_MEM_SOFT_HOT, "warm-bat-decidegc", rc, 1); - OF_READ_SETTING(FG_MEM_SOFT_COLD, "cool-bat-decidegc", rc, 1); - OF_READ_SETTING(FG_MEM_HARD_HOT, "hot-bat-decidegc", rc, 1); - OF_READ_SETTING(FG_MEM_HARD_COLD, "cold-bat-decidegc", rc, 1); - - if (of_find_property(node, "qcom,cold-hot-jeita-hysteresis", NULL)) { - int hard_hot = 0, soft_hot = 0, hard_cold = 0, soft_cold = 0; - - rc = of_property_read_u32_array(node, - "qcom,cold-hot-jeita-hysteresis", temp, 2); - if (rc) { - pr_err("Error reading cold-hot-jeita-hysteresis rc=%d\n", - rc); - return rc; - } - - chip->jeita_hysteresis_support = true; - chip->cold_hysteresis = temp[0]; - chip->hot_hysteresis = temp[1]; - hard_hot = settings[FG_MEM_HARD_HOT].value; - soft_hot = settings[FG_MEM_SOFT_HOT].value; - hard_cold = settings[FG_MEM_HARD_COLD].value; - soft_cold = settings[FG_MEM_SOFT_COLD].value; - if (((hard_hot - chip->hot_hysteresis) < soft_hot) || - ((hard_cold + chip->cold_hysteresis) > soft_cold)) { - chip->jeita_hysteresis_support = false; - pr_err("invalid hysteresis: hot_hysterresis = %d cold_hysteresis = %d\n", - chip->hot_hysteresis, chip->cold_hysteresis); - } else { - pr_debug("cold_hysteresis = %d, hot_hysteresis = %d\n", - chip->cold_hysteresis, chip->hot_hysteresis); - } - } - - OF_READ_SETTING(FG_MEM_BCL_LM_THRESHOLD, "bcl-lm-threshold-ma", - rc, 1); - OF_READ_SETTING(FG_MEM_BCL_MH_THRESHOLD, "bcl-mh-threshold-ma", - rc, 1); - OF_READ_SETTING(FG_MEM_TERM_CURRENT, "fg-iterm-ma", rc, 1); - OF_READ_SETTING(FG_MEM_CHG_TERM_CURRENT, "fg-chg-iterm-ma", rc, 1); - OF_READ_SETTING(FG_MEM_CUTOFF_VOLTAGE, "fg-cutoff-voltage-mv", rc, 1); - data = of_get_property(chip->pdev->dev.of_node, - "qcom,thermal-coefficients", &len); - if (data && len == THERMAL_COEFF_N_BYTES) { - memcpy(chip->thermal_coefficients, data, len); - chip->use_thermal_coefficients = true; - } - OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc", rc, 1); - settings[FG_MEM_RESUME_SOC].value = - DIV_ROUND_CLOSEST(settings[FG_MEM_RESUME_SOC].value - * FULL_SOC_RAW, FULL_CAPACITY); - OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc-raw", rc, 1); - OF_READ_SETTING(FG_MEM_IRQ_VOLT_EMPTY, "irq-volt-empty-mv", rc, 1); - OF_READ_SETTING(FG_MEM_VBAT_EST_DIFF, "vbat-estimate-diff-mv", rc, 1); - OF_READ_SETTING(FG_MEM_DELTA_SOC, "fg-delta-soc", rc, 1); - OF_READ_SETTING(FG_MEM_BATT_LOW, "fg-vbatt-low-threshold", rc, 1); - OF_READ_SETTING(FG_MEM_THERM_DELAY, "fg-therm-delay-us", rc, 1); - OF_READ_PROPERTY(chip->learning_data.max_increment, - "cl-max-increment-deciperc", rc, 5); - OF_READ_PROPERTY(chip->learning_data.max_decrement, - "cl-max-decrement-deciperc", rc, 100); - OF_READ_PROPERTY(chip->learning_data.max_temp, - "cl-max-temp-decidegc", rc, 450); - OF_READ_PROPERTY(chip->learning_data.min_temp, - "cl-min-temp-decidegc", rc, 150); - OF_READ_PROPERTY(chip->learning_data.max_start_soc, - "cl-max-start-capacity", rc, 15); - OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv, - "cl-vbat-est-thr-uv", rc, 40000); - OF_READ_PROPERTY(chip->evaluation_current, - "aging-eval-current-ma", rc, - DEFAULT_EVALUATION_CURRENT_MA); - OF_READ_PROPERTY(chip->cc_cv_threshold_mv, - "fg-cc-cv-threshold-mv", rc, 0); - if (of_property_read_bool(chip->pdev->dev.of_node, - "qcom,capacity-learning-on")) - chip->batt_aging_mode = FG_AGING_CC; - else if (of_property_read_bool(chip->pdev->dev.of_node, - "qcom,capacity-estimation-on")) - chip->batt_aging_mode = FG_AGING_ESR; - else - chip->batt_aging_mode = FG_AGING_NONE; - if (chip->batt_aging_mode == FG_AGING_CC) { - chip->learning_data.feedback_on - = of_property_read_bool(chip->pdev->dev.of_node, - "qcom,capacity-learning-feedback"); - } - if (fg_debug_mask & FG_AGING) - pr_info("battery aging mode: %d\n", chip->batt_aging_mode); - - /* Get the use-otp-profile property */ - chip->use_otp_profile = of_property_read_bool(chip->pdev->dev.of_node, - "qcom,use-otp-profile"); - chip->hold_soc_while_full - = of_property_read_bool(chip->pdev->dev.of_node, - "qcom,hold-soc-while-full"); - - sense_type = of_property_read_bool(chip->pdev->dev.of_node, - "qcom,ext-sense-type"); - if (rc == 0) { - if (fg_sense_type < 0) - fg_sense_type = sense_type; - - if (fg_debug_mask & FG_STATUS) { - if (fg_sense_type == INTERNAL_CURRENT_SENSE) - pr_info("Using internal sense\n"); - else if (fg_sense_type == EXTERNAL_CURRENT_SENSE) - pr_info("Using external sense\n"); - else - pr_info("Using default sense\n"); - } - } else { - rc = 0; - } - - chip->bad_batt_detection_en = of_property_read_bool(node, - "qcom,bad-battery-detection-enable"); - - chip->sw_rbias_ctrl = of_property_read_bool(node, - "qcom,sw-rbias-control"); - - chip->cyc_ctr.en = of_property_read_bool(node, - "qcom,cycle-counter-en"); - if (chip->cyc_ctr.en) - chip->cyc_ctr.id = 1; - - chip->esr_pulse_tune_en = of_property_read_bool(node, - "qcom,esr-pulse-tuning-en"); - - return rc; -} - -static int fg_init_irqs(struct fg_chip *chip) -{ - int rc = 0; - unsigned int base; - struct device_node *child; - u8 subtype; - struct platform_device *pdev = chip->pdev; - - if (of_get_available_child_count(pdev->dev.of_node) == 0) { - pr_err("no child nodes\n"); - return -ENXIO; - } - - for_each_available_child_of_node(pdev->dev.of_node, child) { - rc = of_property_read_u32(child, "reg", &base); - if (rc < 0) { - dev_err(&pdev->dev, - "Couldn't find reg in node = %s rc = %d\n", - child->full_name, rc); - return rc; - } - - if ((base == chip->vbat_adc_addr) || - (base == chip->ibat_adc_addr) || - (base == chip->tp_rev_addr)) - continue; - - rc = fg_read(chip, &subtype, - base + REG_OFFSET_PERP_SUBTYPE, 1); - if (rc) { - pr_err("Peripheral subtype read failed rc=%d\n", rc); - return rc; - } - - switch (subtype) { - case FG_SOC: - chip->soc_irq[FULL_SOC].irq = of_irq_get_byname(child, - "full-soc"); - if (chip->soc_irq[FULL_SOC].irq < 0) { - pr_err("Unable to get full-soc irq\n"); - return rc; - } - chip->soc_irq[EMPTY_SOC].irq = of_irq_get_byname(child, - "empty-soc"); - if (chip->soc_irq[EMPTY_SOC].irq < 0) { - pr_err("Unable to get low-soc irq\n"); - return rc; - } - chip->soc_irq[DELTA_SOC].irq = of_irq_get_byname(child, - "delta-soc"); - if (chip->soc_irq[DELTA_SOC].irq < 0) { - pr_err("Unable to get delta-soc irq\n"); - return rc; - } - chip->soc_irq[FIRST_EST_DONE].irq - = of_irq_get_byname(child, "first-est-done"); - if (chip->soc_irq[FIRST_EST_DONE].irq < 0) { - pr_err("Unable to get first-est-done irq\n"); - return rc; - } - - rc = devm_request_irq(chip->dev, - chip->soc_irq[FULL_SOC].irq, - fg_soc_irq_handler, IRQF_TRIGGER_RISING, - "full-soc", chip); - if (rc < 0) { - pr_err("Can't request %d full-soc: %d\n", - chip->soc_irq[FULL_SOC].irq, rc); - return rc; - } - rc = devm_request_irq(chip->dev, - chip->soc_irq[EMPTY_SOC].irq, - fg_empty_soc_irq_handler, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "empty-soc", chip); - if (rc < 0) { - pr_err("Can't request %d empty-soc: %d\n", - chip->soc_irq[EMPTY_SOC].irq, rc); - return rc; - } - rc = devm_request_irq(chip->dev, - chip->soc_irq[DELTA_SOC].irq, - fg_soc_irq_handler, IRQF_TRIGGER_RISING, - "delta-soc", chip); - if (rc < 0) { - pr_err("Can't request %d delta-soc: %d\n", - chip->soc_irq[DELTA_SOC].irq, rc); - return rc; - } - rc = devm_request_irq(chip->dev, - chip->soc_irq[FIRST_EST_DONE].irq, - fg_first_soc_irq_handler, IRQF_TRIGGER_RISING, - "first-est-done", chip); - if (rc < 0) { - pr_err("Can't request %d delta-soc: %d\n", - chip->soc_irq[FIRST_EST_DONE].irq, rc); - return rc; - } - - enable_irq_wake(chip->soc_irq[DELTA_SOC].irq); - enable_irq_wake(chip->soc_irq[FULL_SOC].irq); - enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq); - break; - case FG_MEMIF: - chip->mem_irq[FG_MEM_AVAIL].irq - = of_irq_get_byname(child, "mem-avail"); - if (chip->mem_irq[FG_MEM_AVAIL].irq < 0) { - pr_err("Unable to get mem-avail irq\n"); - return rc; - } - rc = devm_request_irq(chip->dev, - chip->mem_irq[FG_MEM_AVAIL].irq, - fg_mem_avail_irq_handler, - IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "mem-avail", chip); - if (rc < 0) { - pr_err("Can't request %d mem-avail: %d\n", - chip->mem_irq[FG_MEM_AVAIL].irq, rc); - return rc; - } - break; - case FG_BATT: - chip->batt_irq[BATT_MISSING].irq - = of_irq_get_byname(child, "batt-missing"); - if (chip->batt_irq[BATT_MISSING].irq < 0) { - pr_err("Unable to get batt-missing irq\n"); - rc = -EINVAL; - return rc; - } - rc = devm_request_threaded_irq(chip->dev, - chip->batt_irq[BATT_MISSING].irq, - NULL, - fg_batt_missing_irq_handler, - IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING | - IRQF_ONESHOT, - "batt-missing", chip); - if (rc < 0) { - pr_err("Can't request %d batt-missing: %d\n", - chip->batt_irq[BATT_MISSING].irq, rc); - return rc; - } - chip->batt_irq[VBATT_LOW].irq - = of_irq_get_byname(child, "vbatt-low"); - if (chip->batt_irq[VBATT_LOW].irq < 0) { - pr_err("Unable to get vbatt-low irq\n"); - rc = -EINVAL; - return rc; - } - rc = devm_request_irq(chip->dev, - chip->batt_irq[VBATT_LOW].irq, - fg_vbatt_low_handler, - IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "vbatt-low", chip); - if (rc < 0) { - pr_err("Can't request %d vbatt-low: %d\n", - chip->batt_irq[VBATT_LOW].irq, rc); - return rc; - } - disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); - chip->vbat_low_irq_enabled = false; - break; - case FG_ADC: - break; - default: - pr_err("subtype %d\n", subtype); - return -EINVAL; - } - } - - return rc; -} - -static void fg_cleanup(struct fg_chip *chip) -{ - cancel_delayed_work_sync(&chip->update_sram_data); - cancel_delayed_work_sync(&chip->update_temp_work); - cancel_delayed_work_sync(&chip->update_jeita_setting); - cancel_delayed_work_sync(&chip->check_empty_work); - cancel_delayed_work_sync(&chip->batt_profile_init); - alarm_try_to_cancel(&chip->fg_cap_learning_alarm); - cancel_work_sync(&chip->rslow_comp_work); - cancel_work_sync(&chip->set_resume_soc_work); - cancel_work_sync(&chip->fg_cap_learning_work); - cancel_work_sync(&chip->dump_sram); - cancel_work_sync(&chip->status_change_work); - cancel_work_sync(&chip->cycle_count_work); - cancel_work_sync(&chip->update_esr_work); - cancel_work_sync(&chip->sysfs_restart_work); - cancel_work_sync(&chip->gain_comp_work); - cancel_work_sync(&chip->init_work); - cancel_work_sync(&chip->charge_full_work); - cancel_work_sync(&chip->esr_extract_config_work); - mutex_destroy(&chip->rslow_comp.lock); - mutex_destroy(&chip->rw_lock); - mutex_destroy(&chip->cyc_ctr.lock); - mutex_destroy(&chip->learning_data.learning_lock); - mutex_destroy(&chip->sysfs_restart_lock); - wakeup_source_trash(&chip->resume_soc_wakeup_source.source); - wakeup_source_trash(&chip->empty_check_wakeup_source.source); - wakeup_source_trash(&chip->memif_wakeup_source.source); - wakeup_source_trash(&chip->profile_wakeup_source.source); - wakeup_source_trash(&chip->update_temp_wakeup_source.source); - wakeup_source_trash(&chip->update_sram_wakeup_source.source); - wakeup_source_trash(&chip->gain_comp_wakeup_source.source); - wakeup_source_trash(&chip->capacity_learning_wakeup_source.source); - wakeup_source_trash(&chip->esr_extract_wakeup_source.source); -} - -static int fg_remove(struct platform_device *pdev) -{ - struct fg_chip *chip = dev_get_drvdata(&pdev->dev); - - fg_cleanup(chip); - dev_set_drvdata(&pdev->dev, NULL); - return 0; -} - -static int fg_memif_data_open(struct inode *inode, struct file *file) -{ - struct fg_log_buffer *log; - struct fg_trans *trans; - u8 *data_buf; - - size_t logbufsize = SZ_4K; - size_t databufsize = SZ_4K; - - if (!dbgfs_data.chip) { - pr_err("Not initialized data\n"); - return -EINVAL; - } - - /* Per file "transaction" data */ - trans = kzalloc(sizeof(*trans), GFP_KERNEL); - if (!trans) { - pr_err("Unable to allocate memory for transaction data\n"); - return -ENOMEM; - } - - /* Allocate log buffer */ - log = kzalloc(logbufsize, GFP_KERNEL); - - if (!log) { - kfree(trans); - pr_err("Unable to allocate memory for log buffer\n"); - return -ENOMEM; - } - - log->rpos = 0; - log->wpos = 0; - log->len = logbufsize - sizeof(*log); - - /* Allocate data buffer */ - data_buf = kzalloc(databufsize, GFP_KERNEL); - - if (!data_buf) { - kfree(trans); - kfree(log); - pr_err("Unable to allocate memory for data buffer\n"); - return -ENOMEM; - } - - trans->log = log; - trans->data = data_buf; - trans->cnt = dbgfs_data.cnt; - trans->addr = dbgfs_data.addr; - trans->chip = dbgfs_data.chip; - trans->offset = trans->addr; - mutex_init(&trans->memif_dfs_lock); - - file->private_data = trans; - return 0; -} - -static int fg_memif_dfs_close(struct inode *inode, struct file *file) -{ - struct fg_trans *trans = file->private_data; - - if (trans && trans->log && trans->data) { - file->private_data = NULL; - mutex_destroy(&trans->memif_dfs_lock); - kfree(trans->log); - kfree(trans->data); - kfree(trans); - } - - return 0; -} - -/** - * print_to_log: format a string and place into the log buffer - * @log: The log buffer to place the result into. - * @fmt: The format string to use. - * @...: The arguments for the format string. - * - * The return value is the number of characters written to @log buffer - * not including the trailing '\0'. - */ -static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...) -{ - va_list args; - int cnt; - char *buf = &log->data[log->wpos]; - size_t size = log->len - log->wpos; - - va_start(args, fmt); - cnt = vscnprintf(buf, size, fmt, args); - va_end(args); - - log->wpos += cnt; - return cnt; -} - -/** - * write_next_line_to_log: Writes a single "line" of data into the log buffer - * @trans: Pointer to SRAM transaction data. - * @offset: SRAM address offset to start reading from. - * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read. - * - * The 'offset' is a 12-bit SRAM address. - * - * On a successful read, the pcnt is decremented by the number of data - * bytes read from the SRAM. When the cnt reaches 0, all requested bytes have - * been read. - */ -static int -write_next_line_to_log(struct fg_trans *trans, int offset, size_t *pcnt) -{ - int i, j; - u8 data[ITEMS_PER_LINE]; - struct fg_log_buffer *log = trans->log; - - int cnt = 0; - int padding = offset % ITEMS_PER_LINE; - int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt); - int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read); - - /* Buffer needs enough space for an entire line */ - if ((log->len - log->wpos) < MAX_LINE_LENGTH) - goto done; - - memcpy(data, trans->data + (offset - trans->addr), items_to_read); - - *pcnt -= items_to_read; - - /* Each line starts with the aligned offset (12-bit address) */ - cnt = print_to_log(log, "%3.3X ", offset & 0xfff); - if (cnt == 0) - goto done; - - /* If the offset is unaligned, add padding to right justify items */ - for (i = 0; i < padding; ++i) { - cnt = print_to_log(log, "-- "); - if (cnt == 0) - goto done; - } - - /* Log the data items */ - for (j = 0; i < items_to_log; ++i, ++j) { - cnt = print_to_log(log, "%2.2X ", data[j]); - if (cnt == 0) - goto done; - } - - /* If the last character was a space, then replace it with a newline */ - if (log->wpos > 0 && log->data[log->wpos - 1] == ' ') - log->data[log->wpos - 1] = '\n'; - -done: - return cnt; -} - -/** - * get_log_data - reads data from SRAM and saves to the log buffer - * @trans: Pointer to SRAM transaction data. - * - * Returns the number of "items" read or SPMI error code for read failures. - */ -static int get_log_data(struct fg_trans *trans) -{ - int cnt, rc; - int last_cnt; - int items_read; - int total_items_read = 0; - u32 offset = trans->offset; - size_t item_cnt = trans->cnt; - struct fg_log_buffer *log = trans->log; - - if (item_cnt == 0) - return 0; - - if (item_cnt > SZ_4K) { - pr_err("Reading too many bytes\n"); - return -EINVAL; - } - - rc = fg_mem_read(trans->chip, trans->data, - trans->addr, trans->cnt, 0, 0); - if (rc) { - pr_err("dump failed: rc = %d\n", rc); - return rc; - } - /* Reset the log buffer 'pointers' */ - log->wpos = log->rpos = 0; - - /* Keep reading data until the log is full */ - do { - last_cnt = item_cnt; - cnt = write_next_line_to_log(trans, offset, &item_cnt); - items_read = last_cnt - item_cnt; - offset += items_read; - total_items_read += items_read; - } while (cnt && item_cnt > 0); - - /* Adjust the transaction offset and count */ - trans->cnt = item_cnt; - trans->offset += total_items_read; - - return total_items_read; -} - -/** - * fg_memif_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a - * byte array (coded as string) - * @file: file pointer - * @buf: where to put the result - * @count: maximum space available in @buf - * @ppos: starting position - * @return number of user bytes read, or negative error value - */ -static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct fg_trans *trans = file->private_data; - struct fg_log_buffer *log = trans->log; - size_t ret; - size_t len; - - mutex_lock(&trans->memif_dfs_lock); - /* Is the the log buffer empty */ - if (log->rpos >= log->wpos) { - if (get_log_data(trans) <= 0) { - len = 0; - goto unlock_mutex; - } - } - - len = min(count, log->wpos - log->rpos); - - ret = copy_to_user(buf, &log->data[log->rpos], len); - if (ret == len) { - pr_err("error copy sram register values to user\n"); - len = -EFAULT; - goto unlock_mutex; - } - - /* 'ret' is the number of bytes not copied */ - len -= ret; - - *ppos += len; - log->rpos += len; - -unlock_mutex: - mutex_unlock(&trans->memif_dfs_lock); - return len; -} - -/** - * fg_memif_dfs_reg_write: write user's byte array (coded as string) to SRAM. - * @file: file pointer - * @buf: user data to be written. - * @count: maximum space available in @buf - * @ppos: starting position - * @return number of user byte written, or negative error value - */ -static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - int bytes_read; - int data; - int pos = 0; - int cnt = 0; - u8 *values; - size_t ret = 0; - char *kbuf; - u32 offset; - - struct fg_trans *trans = file->private_data; - - mutex_lock(&trans->memif_dfs_lock); - offset = trans->offset; - - /* Make a copy of the user data */ - kbuf = kmalloc(count + 1, GFP_KERNEL); - if (!kbuf) { - ret = -ENOMEM; - goto unlock_mutex; - } - - ret = copy_from_user(kbuf, buf, count); - if (ret == count) { - pr_err("failed to copy data from user\n"); - ret = -EFAULT; - goto free_buf; - } - - count -= ret; - *ppos += count; - kbuf[count] = '\0'; - - /* Override the text buffer with the raw data */ - values = kbuf; - - /* Parse the data in the buffer. It should be a string of numbers */ - while ((pos < count) && - sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) { - /* - * We shouldn't be receiving a string of characters that - * exceeds a size of 5 to keep this functionally correct. - * Also, we should make sure that pos never gets overflowed - * beyond the limit. - */ - if (bytes_read > 5 || bytes_read > INT_MAX - pos) { - cnt = 0; - ret = -EINVAL; - break; - } - pos += bytes_read; - values[cnt++] = data & 0xff; - } - - if (!cnt) - goto free_buf; - - pr_info("address %x, count %d\n", offset, cnt); - /* Perform the write(s) */ - - ret = fg_mem_write(trans->chip, values, offset, - cnt, 0, 0); - if (ret) { - pr_err("SPMI write failed, err = %zu\n", ret); - } else { - ret = count; - trans->offset += cnt > 4 ? 4 : cnt; - } - -free_buf: - kfree(kbuf); -unlock_mutex: - mutex_unlock(&trans->memif_dfs_lock); - return ret; -} - -static const struct file_operations fg_memif_dfs_reg_fops = { - .open = fg_memif_data_open, - .release = fg_memif_dfs_close, - .read = fg_memif_dfs_reg_read, - .write = fg_memif_dfs_reg_write, -}; - -/** - * fg_dfs_create_fs: create debugfs file system. - * @return pointer to root directory or NULL if failed to create fs - */ -static struct dentry *fg_dfs_create_fs(void) -{ - struct dentry *root, *file; - - pr_debug("Creating FG_MEM debugfs file-system\n"); - root = debugfs_create_dir(DFS_ROOT_NAME, NULL); - if (IS_ERR_OR_NULL(root)) { - pr_err("Error creating top level directory err:%ld", - (long)root); - if (PTR_ERR(root) == -ENODEV) - pr_err("debugfs is not enabled in the kernel"); - return NULL; - } - - dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data); - - file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg); - if (!file) { - pr_err("error creating help entry\n"); - goto err_remove_fs; - } - return root; - -err_remove_fs: - debugfs_remove_recursive(root); - return NULL; -} - -/** - * fg_dfs_get_root: return a pointer to FG debugfs root directory. - * @return a pointer to the existing directory, or if no root - * directory exists then create one. Directory is created with file that - * configures SRAM transaction, namely: address, and count. - * @returns valid pointer on success or NULL - */ -struct dentry *fg_dfs_get_root(void) -{ - if (dbgfs_data.root) - return dbgfs_data.root; - - if (mutex_lock_interruptible(&dbgfs_data.lock) < 0) - return NULL; - /* critical section */ - if (!dbgfs_data.root) { /* double checking idiom */ - dbgfs_data.root = fg_dfs_create_fs(); - } - mutex_unlock(&dbgfs_data.lock); - return dbgfs_data.root; -} - -/* - * fg_dfs_create: adds new fg_mem if debugfs entry - * @return zero on success - */ -int fg_dfs_create(struct fg_chip *chip) -{ - struct dentry *root; - struct dentry *file; - - root = fg_dfs_get_root(); - if (!root) - return -ENOENT; - - dbgfs_data.chip = chip; - - file = debugfs_create_u32("count", DFS_MODE, root, &(dbgfs_data.cnt)); - if (!file) { - pr_err("error creating 'count' entry\n"); - goto err_remove_fs; - } - - file = debugfs_create_x32("address", DFS_MODE, - root, &(dbgfs_data.addr)); - if (!file) { - pr_err("error creating 'address' entry\n"); - goto err_remove_fs; - } - - file = debugfs_create_file("data", DFS_MODE, root, &dbgfs_data, - &fg_memif_dfs_reg_fops); - if (!file) { - pr_err("error creating 'data' entry\n"); - goto err_remove_fs; - } - - return 0; - -err_remove_fs: - debugfs_remove_recursive(root); - return -ENOMEM; -} - -#define EXTERNAL_SENSE_OFFSET_REG 0x41C -#define EXT_OFFSET_TRIM_REG 0xF8 -#define SEC_ACCESS_REG 0xD0 -#define SEC_ACCESS_UNLOCK 0xA5 -#define BCL_TRIM_REV_FIXED 12 -static int bcl_trim_workaround(struct fg_chip *chip) -{ - u8 reg, rc; - - if (chip->tp_rev_addr == 0) - return 0; - - rc = fg_read(chip, ®, chip->tp_rev_addr, 1); - if (rc) { - pr_err("Failed to read tp reg, rc = %d\n", rc); - return rc; - } - if (reg >= BCL_TRIM_REV_FIXED) { - if (fg_debug_mask & FG_STATUS) - pr_info("workaround not applied, tp_rev = %d\n", reg); - return 0; - } - - rc = fg_mem_read(chip, ®, EXTERNAL_SENSE_OFFSET_REG, 1, 2, 0); - if (rc) { - pr_err("Failed to read ext sense offset trim, rc = %d\n", rc); - return rc; - } - rc = fg_masked_write(chip, chip->soc_base + SEC_ACCESS_REG, - SEC_ACCESS_UNLOCK, SEC_ACCESS_UNLOCK, 1); - - rc |= fg_masked_write(chip, chip->soc_base + EXT_OFFSET_TRIM_REG, - 0xFF, reg, 1); - if (rc) { - pr_err("Failed to write ext sense offset trim, rc = %d\n", rc); - return rc; - } - - return 0; -} - -#define FG_ALG_SYSCTL_1 0x4B0 -#define SOC_CNFG 0x450 -#define SOC_DELTA_OFFSET 3 -#define DELTA_SOC_PERCENT 1 -#define I_TERM_QUAL_BIT BIT(1) -#define PATCH_NEG_CURRENT_BIT BIT(3) -#define KI_COEFF_PRED_FULL_ADDR 0x408 -#define KI_COEFF_PRED_FULL_4_0_MSB 0x88 -#define KI_COEFF_PRED_FULL_4_0_LSB 0x00 -#define TEMP_FRAC_SHIFT_REG 0x4A4 -#define FG_ADC_CONFIG_REG 0x4B8 -#define FG_BCL_CONFIG_OFFSET 0x3 -#define BCL_FORCED_HPM_IN_CHARGE BIT(2) -static int fg_common_hw_init(struct fg_chip *chip) -{ - int rc; - int resume_soc_raw; - u8 val; - - update_iterm(chip); - update_cutoff_voltage(chip); - update_irq_volt_empty(chip); - update_bcl_thresholds(chip); - - resume_soc_raw = settings[FG_MEM_RESUME_SOC].value; - if (resume_soc_raw > 0) { - rc = fg_set_resume_soc(chip, resume_soc_raw); - if (rc) { - pr_err("Couldn't set resume SOC for FG\n"); - return rc; - } - } else { - pr_info("FG auto recharge threshold not specified in DT\n"); - } - - if (fg_sense_type >= 0) { - rc = set_prop_sense_type(chip, fg_sense_type); - if (rc) { - pr_err("failed to config sense type %d rc=%d\n", - fg_sense_type, rc); - return rc; - } - } - - rc = fg_mem_masked_write(chip, settings[FG_MEM_DELTA_SOC].address, 0xFF, - soc_to_setpoint(settings[FG_MEM_DELTA_SOC].value), - settings[FG_MEM_DELTA_SOC].offset); - if (rc) { - pr_err("failed to write delta soc rc=%d\n", rc); - return rc; - } - - rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF, - batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value), - settings[FG_MEM_BATT_LOW].offset); - if (rc) { - pr_err("failed to write Vbatt_low rc=%d\n", rc); - return rc; - } - - rc = fg_mem_masked_write(chip, settings[FG_MEM_THERM_DELAY].address, - THERM_DELAY_MASK, - therm_delay_to_setpoint(settings[FG_MEM_THERM_DELAY].value), - settings[FG_MEM_THERM_DELAY].offset); - if (rc) { - pr_err("failed to write therm_delay rc=%d\n", rc); - return rc; - } - - if (chip->use_thermal_coefficients) { - fg_mem_write(chip, chip->thermal_coefficients, - THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES, - THERMAL_COEFF_OFFSET, 0); - } - - if (!chip->sw_rbias_ctrl) { - rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, - BATT_TEMP_CNTRL_MASK, - TEMP_SENSE_ALWAYS_BIT, - BATT_TEMP_OFFSET); - if (rc) { - pr_err("failed to write BATT_TEMP_OFFSET rc=%d\n", rc); - return rc; - } - } - - /* Read the cycle counter back from FG SRAM */ - if (chip->cyc_ctr.en) - restore_cycle_counter(chip); - - if (chip->esr_pulse_tune_en) { - rc = fg_mem_read(chip, &val, SYS_CFG_1_REG, 1, SYS_CFG_1_OFFSET, - 0); - if (rc) { - pr_err("unable to read sys_cfg_1: %d\n", rc); - return rc; - } - - if (!(val & ENABLE_ESR_PULSE_VAL)) - chip->esr_extract_disabled = true; - - if (fg_debug_mask & FG_STATUS) - pr_info("ESR extract is %sabled\n", - chip->esr_extract_disabled ? "dis" : "en"); - - rc = fg_mem_read(chip, &val, CBITS_INPUT_FILTER_REG, 1, - CBITS_RMEAS1_OFFSET, 0); - if (rc) { - pr_err("unable to read cbits_input_filter_reg: %d\n", - rc); - return rc; - } - - if (val & (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT)) - chip->imptr_pulse_slow_en = true; - - if (fg_debug_mask & FG_STATUS) - pr_info("imptr_pulse_slow is %sabled\n", - chip->imptr_pulse_slow_en ? "en" : "dis"); - - rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, - 0); - if (rc) { - pr_err("unable to read rslow cfg: %d\n", rc); - return rc; - } - - if (val & RSLOW_CFG_ON_VAL) - chip->rslow_comp.active = true; - - if (fg_debug_mask & FG_STATUS) - pr_info("rslow_comp active is %sabled\n", - chip->rslow_comp.active ? "en" : "dis"); - } - - return 0; -} - -static int fg_8994_hw_init(struct fg_chip *chip) -{ - int rc = 0; - u8 data[4]; - u64 esr_value; - - rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, - PATCH_NEG_CURRENT_BIT, - PATCH_NEG_CURRENT_BIT, - EXTERNAL_SENSE_OFFSET); - if (rc) { - pr_err("failed to write patch current bit rc=%d\n", rc); - return rc; - } - - rc = bcl_trim_workaround(chip); - if (rc) { - pr_err("failed to redo bcl trim rc=%d\n", rc); - return rc; - } - - rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG, - BCL_FORCED_HPM_IN_CHARGE, - BCL_FORCED_HPM_IN_CHARGE, - FG_BCL_CONFIG_OFFSET); - if (rc) { - pr_err("failed to force hpm in charge rc=%d\n", rc); - return rc; - } - - fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, I_TERM_QUAL_BIT, 0, 0); - - data[0] = 0xA2; - data[1] = 0x12; - - rc = fg_mem_write(chip, data, TEMP_FRAC_SHIFT_REG, 2, 2, 0); - if (rc) { - pr_err("failed to write temp ocv constants rc=%d\n", rc); - return rc; - } - - data[0] = KI_COEFF_PRED_FULL_4_0_LSB; - data[1] = KI_COEFF_PRED_FULL_4_0_MSB; - fg_mem_write(chip, data, KI_COEFF_PRED_FULL_ADDR, 2, 2, 0); - - esr_value = ESR_DEFAULT_VALUE; - rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8, - ESR_VALUE_OFFSET, 0); - if (rc) - pr_err("failed to write default ESR value rc=%d\n", rc); - else - pr_debug("set default value to esr filter\n"); - - return 0; -} - -#define FG_USBID_CONFIG_OFFSET 0x2 -#define DISABLE_USBID_DETECT_BIT BIT(0) -static int fg_8996_hw_init(struct fg_chip *chip) -{ - int rc; - - rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG, - BCL_FORCED_HPM_IN_CHARGE, - BCL_FORCED_HPM_IN_CHARGE, - FG_BCL_CONFIG_OFFSET); - if (rc) { - pr_err("failed to force hpm in charge rc=%d\n", rc); - return rc; - } - - /* enable usbid conversions for PMi8996 V1.0 */ - if (chip->pmic_revision[REVID_DIG_MAJOR] == 1 - && chip->pmic_revision[REVID_ANA_MAJOR] == 0) { - rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG, - DISABLE_USBID_DETECT_BIT, - 0, FG_USBID_CONFIG_OFFSET); - if (rc) { - pr_err("failed to enable usbid conversions: %d\n", rc); - return rc; - } - } - - return rc; -} - -static int fg_8950_hw_init(struct fg_chip *chip) -{ - int rc; - - rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG, - BCL_FORCED_HPM_IN_CHARGE, - BCL_FORCED_HPM_IN_CHARGE, - FG_BCL_CONFIG_OFFSET); - if (rc) - pr_err("failed to force hpm in charge rc=%d\n", rc); - - return rc; -} - -static int fg_hw_init(struct fg_chip *chip) -{ - int rc = 0; - - rc = fg_common_hw_init(chip); - if (rc) { - pr_err("Unable to initialize FG HW rc=%d\n", rc); - return rc; - } - - /* add PMIC specific hw init */ - switch (chip->pmic_subtype) { - case PMI8994: - rc = fg_8994_hw_init(chip); - chip->wa_flag |= PULSE_REQUEST_WA; - break; - case PMI8996: - rc = fg_8996_hw_init(chip); - /* Setup workaround flag based on PMIC type */ - if (fg_sense_type == INTERNAL_CURRENT_SENSE) - chip->wa_flag |= IADC_GAIN_COMP_WA; - if (chip->pmic_revision[REVID_DIG_MAJOR] > 1) - chip->wa_flag |= USE_CC_SOC_REG; - - break; - case PMI8950: - case PMI8937: - rc = fg_8950_hw_init(chip); - /* Setup workaround flag based on PMIC type */ - chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA; - if (fg_sense_type == INTERNAL_CURRENT_SENSE) - chip->wa_flag |= IADC_GAIN_COMP_WA; - if (chip->pmic_revision[REVID_DIG_MAJOR] > 1) - chip->wa_flag |= USE_CC_SOC_REG; - - break; - } - if (rc) - pr_err("Unable to initialize PMIC specific FG HW rc=%d\n", rc); - - pr_debug("wa_flag=0x%x\n", chip->wa_flag); - - return rc; -} - -#define DIG_MINOR 0x0 -#define DIG_MAJOR 0x1 -#define ANA_MINOR 0x2 -#define ANA_MAJOR 0x3 -#define IACS_INTR_SRC_SLCT BIT(3) -static int fg_setup_memif_offset(struct fg_chip *chip) -{ - int rc; - - rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4); - if (rc) { - pr_err("Unable to read FG revision rc=%d\n", rc); - return rc; - } - - switch (chip->revision[DIG_MAJOR]) { - case DIG_REV_1: - case DIG_REV_2: - chip->offset = offset[0].address; - break; - case DIG_REV_3: - chip->offset = offset[1].address; - chip->ima_supported = true; - break; - default: - pr_err("Digital Major rev=%d not supported\n", - chip->revision[DIG_MAJOR]); - return -EINVAL; - } - - if (chip->ima_supported) { - /* - * Change the FG_MEM_INT interrupt to track IACS_READY - * condition instead of end-of-transaction. This makes sure - * that the next transaction starts only after the hw is ready. - */ - rc = fg_masked_write(chip, - chip->mem_base + MEM_INTF_IMA_CFG, IACS_INTR_SRC_SLCT, - IACS_INTR_SRC_SLCT, 1); - if (rc) { - pr_err("failed to configure interrupt source %d\n", rc); - return rc; - } - } - - return 0; -} - -static int fg_detect_pmic_type(struct fg_chip *chip) -{ - struct pmic_revid_data *pmic_rev_id; - struct device_node *revid_dev_node; - - revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node, - "qcom,pmic-revid", 0); - if (!revid_dev_node) { - pr_err("Missing qcom,pmic-revid property - driver failed\n"); - return -EINVAL; - } - - pmic_rev_id = get_revid_data(revid_dev_node); - if (IS_ERR_OR_NULL(pmic_rev_id)) { - pr_err("Unable to get pmic_revid rc=%ld\n", - PTR_ERR(pmic_rev_id)); - /* - * the revid peripheral must be registered, any failure - * here only indicates that the rev-id module has not - * probed yet. - */ - return -EPROBE_DEFER; - } - - switch (pmic_rev_id->pmic_subtype) { - case PMI8994: - case PMI8950: - case PMI8937: - case PMI8996: - chip->pmic_subtype = pmic_rev_id->pmic_subtype; - chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1; - chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2; - chip->pmic_revision[REVID_ANA_MAJOR] = pmic_rev_id->rev3; - chip->pmic_revision[REVID_DIG_MAJOR] = pmic_rev_id->rev4; - break; - default: - pr_err("PMIC subtype %d not supported\n", - pmic_rev_id->pmic_subtype); - return -EINVAL; - } - - return 0; -} - -#define INIT_JEITA_DELAY_MS 1000 - -static void delayed_init_work(struct work_struct *work) -{ - u8 reg[2]; - int rc; - struct fg_chip *chip = container_of(work, - struct fg_chip, - init_work); - - /* hold memory access until initialization finishes */ - fg_mem_lock(chip); - - rc = fg_hw_init(chip); - if (rc) { - pr_err("failed to hw init rc = %d\n", rc); - fg_mem_release(chip); - fg_cleanup(chip); - return; - } - /* release memory access before update_sram_data is called */ - fg_mem_release(chip); - - schedule_delayed_work( - &chip->update_jeita_setting, - msecs_to_jiffies(INIT_JEITA_DELAY_MS)); - - if (chip->last_sram_update_time == 0) - update_sram_data_work(&chip->update_sram_data.work); - - if (chip->last_temp_update_time == 0) - update_temp_data(&chip->update_temp_work.work); - - if (!chip->use_otp_profile) - schedule_delayed_work(&chip->batt_profile_init, 0); - - if (chip->wa_flag & IADC_GAIN_COMP_WA) { - /* read default gain config */ - rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to read default gain rc=%d\n", rc); - goto done; - } - - if (reg[1] || reg[0]) { - /* - * Default gain register has valid value: - * - write to gain register. - */ - rc = fg_mem_write(chip, reg, GAIN_REG, 2, - GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to write gain rc=%d\n", rc); - goto done; - } - } else { - /* - * Default gain register is invalid: - * - read gain register for default gain value - * - write to default gain register. - */ - rc = fg_mem_read(chip, reg, GAIN_REG, 2, - GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to read gain rc=%d\n", rc); - goto done; - } - rc = fg_mem_write(chip, reg, K_VCOR_REG, 2, - DEF_GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to write default gain rc=%d\n", - rc); - goto done; - } - } - - chip->iadc_comp_data.dfl_gain_reg[0] = reg[0]; - chip->iadc_comp_data.dfl_gain_reg[1] = reg[1]; - chip->iadc_comp_data.dfl_gain = half_float(reg); - chip->input_present = is_input_present(chip); - chip->otg_present = is_otg_present(chip); - chip->init_done = true; - - pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n", - reg[1], reg[0], chip->iadc_comp_data.dfl_gain); - } - - pr_debug("FG: HW_init success\n"); - - return; -done: - fg_cleanup(chip); -} - -static int fg_probe(struct platform_device *pdev) -{ - struct device *dev = &(pdev->dev); - struct fg_chip *chip; - struct device_node *child; - unsigned int base; - u8 subtype, reg; - int rc = 0; - struct power_supply_config bms_psy_cfg; - - if (!pdev) { - pr_err("no valid spmi pointer\n"); - return -ENODEV; - } - - if (!pdev->dev.of_node) { - pr_err("device node missing\n"); - return -ENODEV; - } - - chip = devm_kzalloc(dev, sizeof(struct fg_chip), GFP_KERNEL); - if (chip == NULL) { - pr_err("Can't allocate fg_chip\n"); - return -ENOMEM; - } - chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); - if (!chip->regmap) { - dev_err(&pdev->dev, "Couldn't get parent's regmap\n"); - return -EINVAL; - } - - chip->pdev = pdev; - chip->dev = &(pdev->dev); - - wakeup_source_init(&chip->empty_check_wakeup_source.source, - "qpnp_fg_empty_check"); - wakeup_source_init(&chip->memif_wakeup_source.source, - "qpnp_fg_memaccess"); - wakeup_source_init(&chip->profile_wakeup_source.source, - "qpnp_fg_profile"); - wakeup_source_init(&chip->update_temp_wakeup_source.source, - "qpnp_fg_update_temp"); - wakeup_source_init(&chip->update_sram_wakeup_source.source, - "qpnp_fg_update_sram"); - wakeup_source_init(&chip->resume_soc_wakeup_source.source, - "qpnp_fg_set_resume_soc"); - wakeup_source_init(&chip->gain_comp_wakeup_source.source, - "qpnp_fg_gain_comp"); - wakeup_source_init(&chip->capacity_learning_wakeup_source.source, - "qpnp_fg_cap_learning"); - wakeup_source_init(&chip->esr_extract_wakeup_source.source, - "qpnp_fg_esr_extract"); - mutex_init(&chip->rw_lock); - mutex_init(&chip->cyc_ctr.lock); - mutex_init(&chip->learning_data.learning_lock); - mutex_init(&chip->rslow_comp.lock); - mutex_init(&chip->sysfs_restart_lock); - INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting); - INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work); - INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data); - INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work); - INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init); - INIT_WORK(&chip->rslow_comp_work, rslow_comp_work); - INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work); - INIT_WORK(&chip->dump_sram, dump_sram); - INIT_WORK(&chip->status_change_work, status_change_work); - INIT_WORK(&chip->cycle_count_work, update_cycle_count); - INIT_WORK(&chip->battery_age_work, battery_age_work); - INIT_WORK(&chip->update_esr_work, update_esr_value); - INIT_WORK(&chip->set_resume_soc_work, set_resume_soc_work); - INIT_WORK(&chip->sysfs_restart_work, sysfs_restart_work); - INIT_WORK(&chip->init_work, delayed_init_work); - INIT_WORK(&chip->charge_full_work, charge_full_work); - INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work); - INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work); - INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work); - alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME, - fg_cap_learning_alarm_cb); - init_completion(&chip->sram_access_granted); - init_completion(&chip->sram_access_revoked); - complete_all(&chip->sram_access_revoked); - init_completion(&chip->batt_id_avail); - init_completion(&chip->first_soc_done); - dev_set_drvdata(&pdev->dev, chip); - - if (of_get_available_child_count(pdev->dev.of_node) == 0) { - pr_err("no child nodes\n"); - rc = -ENXIO; - goto of_init_fail; - } - - for_each_available_child_of_node(pdev->dev.of_node, child) { - rc = of_property_read_u32(child, "reg", &base); - if (rc < 0) { - dev_err(&pdev->dev, - "Couldn't find reg in node = %s rc = %d\n", - child->full_name, rc); - goto of_init_fail; - } - - if (strcmp("qcom,fg-adc-vbat", child->name) == 0) { - chip->vbat_adc_addr = base; - continue; - } else if (strcmp("qcom,fg-adc-ibat", child->name) == 0) { - chip->ibat_adc_addr = base; - continue; - } else if (strcmp("qcom,revid-tp-rev", child->name) == 0) { - chip->tp_rev_addr = base; - continue; - } - - rc = fg_read(chip, &subtype, - base + REG_OFFSET_PERP_SUBTYPE, 1); - if (rc) { - pr_err("Peripheral subtype read failed rc=%d\n", rc); - goto of_init_fail; - } - - switch (subtype) { - case FG_SOC: - chip->soc_base = base; - break; - case FG_MEMIF: - chip->mem_base = base; - break; - case FG_BATT: - chip->batt_base = base; - break; - default: - pr_err("Invalid peripheral subtype=0x%x\n", subtype); - rc = -EINVAL; - } - } - - rc = fg_detect_pmic_type(chip); - if (rc) { - pr_err("Unable to detect PMIC type rc=%d\n", rc); - return rc; - } - - rc = fg_setup_memif_offset(chip); - if (rc) { - pr_err("Unable to setup mem_if offsets rc=%d\n", rc); - goto of_init_fail; - } - - rc = fg_of_init(chip); - if (rc) { - pr_err("failed to parse devicetree rc%d\n", rc); - goto of_init_fail; - } - - if (chip->jeita_hysteresis_support) { - rc = fg_init_batt_temp_state(chip); - if (rc) { - pr_err("failed to get battery status rc%d\n", rc); - goto of_init_fail; - } - } - - /* check if the first estimate is already finished at this time */ - if (is_first_est_done(chip)) - complete_all(&chip->first_soc_done); - - reg = 0xFF; - rc = fg_write(chip, ®, INT_EN_CLR(chip->mem_base), 1); - if (rc) { - pr_err("failed to clear interrupts %d\n", rc); - goto of_init_fail; - } - - rc = fg_init_irqs(chip); - if (rc) { - pr_err("failed to request interrupts %d\n", rc); - goto cancel_work; - } - - chip->batt_type = default_batt_type; - - chip->bms_psy_d.name = "bms"; - chip->bms_psy_d.type = POWER_SUPPLY_TYPE_BMS; - chip->bms_psy_d.properties = fg_power_props; - chip->bms_psy_d.num_properties = ARRAY_SIZE(fg_power_props); - chip->bms_psy_d.get_property = fg_power_get_property; - chip->bms_psy_d.set_property = fg_power_set_property; - chip->bms_psy_d.external_power_changed = fg_external_power_changed; - chip->bms_psy_d.property_is_writeable = fg_property_is_writeable; - - bms_psy_cfg.drv_data = chip; - bms_psy_cfg.supplied_to = fg_supplicants; - bms_psy_cfg.num_supplicants = ARRAY_SIZE(fg_supplicants); - bms_psy_cfg.of_node = NULL; - chip->bms_psy = devm_power_supply_register(chip->dev, - &chip->bms_psy_d, - &bms_psy_cfg); - if (IS_ERR(chip->bms_psy)) { - pr_err("batt failed to register rc = %ld\n", - PTR_ERR(chip->bms_psy)); - goto of_init_fail; - } - chip->power_supply_registered = true; - /* - * Just initialize the batt_psy_name here. Power supply - * will be obtained later. - */ - chip->batt_psy_name = "battery"; - - if (chip->mem_base) { - rc = fg_dfs_create(chip); - if (rc < 0) { - pr_err("failed to create debugfs rc = %d\n", rc); - goto cancel_work; - } - } - - schedule_work(&chip->init_work); - - pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n", - chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR], - chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR], - chip->pmic_subtype); - - return rc; - -cancel_work: - cancel_delayed_work_sync(&chip->update_jeita_setting); - cancel_delayed_work_sync(&chip->update_sram_data); - cancel_delayed_work_sync(&chip->update_temp_work); - cancel_delayed_work_sync(&chip->check_empty_work); - cancel_delayed_work_sync(&chip->batt_profile_init); - alarm_try_to_cancel(&chip->fg_cap_learning_alarm); - cancel_work_sync(&chip->set_resume_soc_work); - cancel_work_sync(&chip->fg_cap_learning_work); - cancel_work_sync(&chip->dump_sram); - cancel_work_sync(&chip->status_change_work); - cancel_work_sync(&chip->cycle_count_work); - cancel_work_sync(&chip->update_esr_work); - cancel_work_sync(&chip->rslow_comp_work); - cancel_work_sync(&chip->sysfs_restart_work); - cancel_work_sync(&chip->gain_comp_work); - cancel_work_sync(&chip->init_work); - cancel_work_sync(&chip->charge_full_work); - cancel_work_sync(&chip->bcl_hi_power_work); - cancel_work_sync(&chip->esr_extract_config_work); -of_init_fail: - mutex_destroy(&chip->rslow_comp.lock); - mutex_destroy(&chip->rw_lock); - mutex_destroy(&chip->cyc_ctr.lock); - mutex_destroy(&chip->learning_data.learning_lock); - mutex_destroy(&chip->sysfs_restart_lock); - wakeup_source_trash(&chip->resume_soc_wakeup_source.source); - wakeup_source_trash(&chip->empty_check_wakeup_source.source); - wakeup_source_trash(&chip->memif_wakeup_source.source); - wakeup_source_trash(&chip->profile_wakeup_source.source); - wakeup_source_trash(&chip->update_temp_wakeup_source.source); - wakeup_source_trash(&chip->update_sram_wakeup_source.source); - wakeup_source_trash(&chip->gain_comp_wakeup_source.source); - wakeup_source_trash(&chip->capacity_learning_wakeup_source.source); - wakeup_source_trash(&chip->esr_extract_wakeup_source.source); - return rc; -} - -static void check_and_update_sram_data(struct fg_chip *chip) -{ - unsigned long current_time = 0, next_update_time, time_left; - - get_current_time(¤t_time); - - next_update_time = chip->last_temp_update_time - + (TEMP_PERIOD_UPDATE_MS / 1000); - - if (next_update_time > current_time) - time_left = next_update_time - current_time; - else - time_left = 0; - - schedule_delayed_work( - &chip->update_temp_work, msecs_to_jiffies(time_left * 1000)); - - next_update_time = chip->last_sram_update_time - + (fg_sram_update_period_ms / 1000); - - if (next_update_time > current_time) - time_left = next_update_time - current_time; - else - time_left = 0; - - schedule_delayed_work( - &chip->update_sram_data, msecs_to_jiffies(time_left * 1000)); -} - -static int fg_suspend(struct device *dev) -{ - struct fg_chip *chip = dev_get_drvdata(dev); - - if (!chip->sw_rbias_ctrl) - return 0; - - cancel_delayed_work(&chip->update_temp_work); - cancel_delayed_work(&chip->update_sram_data); - - return 0; -} - -static int fg_resume(struct device *dev) -{ - struct fg_chip *chip = dev_get_drvdata(dev); - - if (!chip->sw_rbias_ctrl) - return 0; - - check_and_update_sram_data(chip); - return 0; -} - -static const struct dev_pm_ops qpnp_fg_pm_ops = { - .suspend = fg_suspend, - .resume = fg_resume, -}; - -static int fg_sense_type_set(const char *val, const struct kernel_param *kp) -{ - int rc; - struct power_supply *bms_psy; - struct fg_chip *chip; - int old_fg_sense_type = fg_sense_type; - - rc = param_set_int(val, kp); - if (rc) { - pr_err("Unable to set fg_sense_type: %d\n", rc); - return rc; - } - - if (fg_sense_type != 0 && fg_sense_type != 1) { - pr_err("Bad value %d\n", fg_sense_type); - fg_sense_type = old_fg_sense_type; - return -EINVAL; - } - - if (fg_debug_mask & FG_STATUS) - pr_info("fg_sense_type set to %d\n", fg_sense_type); - - bms_psy = power_supply_get_by_name("bms"); - if (!bms_psy) { - pr_err("bms psy not found\n"); - return 0; - } - - chip = power_supply_get_drvdata(bms_psy); - rc = set_prop_sense_type(chip, fg_sense_type); - return rc; -} - -static struct kernel_param_ops fg_sense_type_ops = { - .set = fg_sense_type_set, - .get = param_get_int, -}; - -module_param_cb(sense_type, &fg_sense_type_ops, &fg_sense_type, 0644); - -static int fg_restart_set(const char *val, const struct kernel_param *kp) -{ - struct power_supply *bms_psy; - struct fg_chip *chip; - - bms_psy = power_supply_get_by_name("bms"); - if (!bms_psy) { - pr_err("bms psy not found\n"); - return 0; - } - chip = power_supply_get_drvdata(bms_psy); - - mutex_lock(&chip->sysfs_restart_lock); - if (fg_restart != 0) { - mutex_unlock(&chip->sysfs_restart_lock); - return 0; - } - fg_restart = 1; - mutex_unlock(&chip->sysfs_restart_lock); - - if (fg_debug_mask & FG_STATUS) - pr_info("fuel gauge restart initiated from sysfs...\n"); - - schedule_work(&chip->sysfs_restart_work); - return 0; -} - -static struct kernel_param_ops fg_restart_ops = { - .set = fg_restart_set, - .get = param_get_int, -}; - -module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644); - -static struct platform_driver fg_driver = { - .driver = { - .name = QPNP_FG_DEV_NAME, - .of_match_table = fg_match_table, - .pm = &qpnp_fg_pm_ops, - }, - .probe = fg_probe, - .remove = fg_remove, -}; - -static int __init fg_init(void) -{ - return platform_driver_register(&fg_driver); -} - -static void __exit fg_exit(void) -{ - return platform_driver_unregister(&fg_driver); -} - -module_init(fg_init); -module_exit(fg_exit); - -MODULE_DESCRIPTION("QPNP Fuel Gauge Driver"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:" QPNP_FG_DEV_NAME); diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c deleted file mode 100644 index 6c1e58d046e8593347ac950d7a2580246d68cbce..0000000000000000000000000000000000000000 --- a/drivers/power/supply/qcom/qpnp-smbcharger.c +++ /dev/null @@ -1,8472 +0,0 @@ -/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#define pr_fmt(fmt) "SMBCHG: %s: " fmt, __func__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "pmic-voter.h" - -/* Mask/Bit helpers */ -#define _SMB_MASK(BITS, POS) \ - ((unsigned char)(((1 << (BITS)) - 1) << (POS))) -#define SMB_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \ - _SMB_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \ - (RIGHT_BIT_POS)) -/* Config registers */ -struct smbchg_regulator { - struct regulator_desc rdesc; - struct regulator_dev *rdev; -}; - -struct parallel_usb_cfg { - struct power_supply *psy; - int min_current_thr_ma; - int min_9v_current_thr_ma; - int allowed_lowering_ma; - int current_max_ma; - bool avail; - struct mutex lock; - int initial_aicl_ma; - ktime_t last_disabled; - bool enabled_once; -}; - -struct ilim_entry { - int vmin_uv; - int vmax_uv; - int icl_pt_ma; - int icl_lv_ma; - int icl_hv_ma; -}; - -struct ilim_map { - int num; - struct ilim_entry *entries; -}; - -struct smbchg_version_tables { - const int *dc_ilim_ma_table; - int dc_ilim_ma_len; - const int *usb_ilim_ma_table; - int usb_ilim_ma_len; - const int *iterm_ma_table; - int iterm_ma_len; - const int *fcc_comp_table; - int fcc_comp_len; - const int *aicl_rerun_period_table; - int aicl_rerun_period_len; - int rchg_thr_mv; -}; - -struct smbchg_chip { - struct device *dev; - struct platform_device *pdev; - struct regmap *regmap; - int schg_version; - - /* peripheral register address bases */ - u16 chgr_base; - u16 bat_if_base; - u16 usb_chgpth_base; - u16 dc_chgpth_base; - u16 otg_base; - u16 misc_base; - - int fake_battery_soc; - u8 revision[4]; - - /* configuration parameters */ - int iterm_ma; - int usb_max_current_ma; - int typec_current_ma; - int dc_max_current_ma; - int dc_target_current_ma; - int cfg_fastchg_current_ma; - int fastchg_current_ma; - int vfloat_mv; - int fastchg_current_comp; - int float_voltage_comp; - int resume_delta_mv; - int safety_time; - int prechg_safety_time; - int bmd_pin_src; - int jeita_temp_hard_limit; - int aicl_rerun_period_s; - bool use_vfloat_adjustments; - bool iterm_disabled; - bool bmd_algo_disabled; - bool soft_vfloat_comp_disabled; - bool chg_enabled; - bool charge_unknown_battery; - bool chg_inhibit_en; - bool chg_inhibit_source_fg; - bool low_volt_dcin; - bool cfg_chg_led_support; - bool cfg_chg_led_sw_ctrl; - bool vbat_above_headroom; - bool force_aicl_rerun; - bool hvdcp3_supported; - bool restricted_charging; - bool skip_usb_suspend_for_fake_battery; - bool hvdcp_not_supported; - bool otg_pinctrl; - u8 original_usbin_allowance; - struct parallel_usb_cfg parallel; - struct delayed_work parallel_en_work; - struct dentry *debug_root; - struct smbchg_version_tables tables; - - /* wipower params */ - struct ilim_map wipower_default; - struct ilim_map wipower_pt; - struct ilim_map wipower_div2; - struct qpnp_vadc_chip *vadc_dev; - bool wipower_dyn_icl_avail; - struct ilim_entry current_ilim; - struct mutex wipower_config; - bool wipower_configured; - struct qpnp_adc_tm_btm_param param; - - /* flash current prediction */ - int rpara_uohm; - int rslow_uohm; - int vled_max_uv; - - /* vfloat adjustment */ - int max_vbat_sample; - int n_vbat_samples; - - /* status variables */ - int wake_reasons; - int previous_soc; - int usb_online; - bool dc_present; - bool usb_present; - bool batt_present; - int otg_retries; - ktime_t otg_enable_time; - bool aicl_deglitch_short; - bool safety_timer_en; - bool aicl_complete; - bool usb_ov_det; - bool otg_pulse_skip_dis; - const char *battery_type; - enum power_supply_type usb_supply_type; - bool very_weak_charger; - bool parallel_charger_detected; - bool chg_otg_enabled; - bool flash_triggered; - bool flash_active; - bool icl_disabled; - u32 wa_flags; - int usb_icl_delta; - bool typec_dfp; - unsigned int usb_current_max; - unsigned int usb_health; - - /* jeita and temperature */ - bool batt_hot; - bool batt_cold; - bool batt_warm; - bool batt_cool; - unsigned int thermal_levels; - unsigned int therm_lvl_sel; - unsigned int *thermal_mitigation; - - /* irqs */ - int batt_hot_irq; - int batt_warm_irq; - int batt_cool_irq; - int batt_cold_irq; - int batt_missing_irq; - int vbat_low_irq; - int chg_hot_irq; - int chg_term_irq; - int taper_irq; - bool taper_irq_enabled; - struct mutex taper_irq_lock; - int recharge_irq; - int fastchg_irq; - int wdog_timeout_irq; - int power_ok_irq; - int dcin_uv_irq; - int usbin_uv_irq; - int usbin_ov_irq; - int src_detect_irq; - int otg_fail_irq; - int otg_oc_irq; - int aicl_done_irq; - int usbid_change_irq; - int chg_error_irq; - bool enable_aicl_wake; - - /* psy */ - struct power_supply_desc usb_psy_d; - struct power_supply *usb_psy; - struct power_supply_desc batt_psy_d; - struct power_supply *batt_psy; - struct power_supply_desc dc_psy_d; - struct power_supply *dc_psy; - struct power_supply *bms_psy; - struct power_supply *typec_psy; - int dc_psy_type; - const char *bms_psy_name; - const char *battery_psy_name; - - struct regulator *dpdm_reg; - struct smbchg_regulator otg_vreg; - struct smbchg_regulator ext_otg_vreg; - struct work_struct usb_set_online_work; - struct delayed_work vfloat_adjust_work; - struct delayed_work hvdcp_det_work; - spinlock_t sec_access_lock; - struct mutex therm_lvl_lock; - struct mutex usb_set_online_lock; - struct mutex pm_lock; - /* aicl deglitch workaround */ - unsigned long first_aicl_seconds; - int aicl_irq_count; - struct mutex usb_status_lock; - bool hvdcp_3_det_ignore_uv; - struct completion src_det_lowered; - struct completion src_det_raised; - struct completion usbin_uv_lowered; - struct completion usbin_uv_raised; - int pulse_cnt; - struct led_classdev led_cdev; - bool skip_usb_notification; - u32 vchg_adc_channel; - struct qpnp_vadc_chip *vchg_vadc_dev; - - /* voters */ - struct votable *fcc_votable; - struct votable *usb_icl_votable; - struct votable *dc_icl_votable; - struct votable *usb_suspend_votable; - struct votable *dc_suspend_votable; - struct votable *battchg_suspend_votable; - struct votable *hw_aicl_rerun_disable_votable; - struct votable *hw_aicl_rerun_enable_indirect_votable; - struct votable *aicl_deglitch_short_votable; - - /* extcon for VBUS / ID notification to USB */ - struct extcon_dev *extcon; -}; - -enum qpnp_schg { - QPNP_SCHG, - QPNP_SCHG_LITE, -}; - -static char *version_str[] = { - [QPNP_SCHG] = "SCHG", - [QPNP_SCHG_LITE] = "SCHG_LITE", -}; - -enum pmic_subtype { - PMI8994 = 10, - PMI8950 = 17, - PMI8996 = 19, - PMI8937 = 55, -}; - -enum smbchg_wa { - SMBCHG_AICL_DEGLITCH_WA = BIT(0), - SMBCHG_HVDCP_9V_EN_WA = BIT(1), - SMBCHG_USB100_WA = BIT(2), - SMBCHG_BATT_OV_WA = BIT(3), - SMBCHG_CC_ESR_WA = BIT(4), - SMBCHG_FLASH_ICL_DISABLE_WA = BIT(5), - SMBCHG_RESTART_WA = BIT(6), - SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA = BIT(7), -}; - -enum print_reason { - PR_REGISTER = BIT(0), - PR_INTERRUPT = BIT(1), - PR_STATUS = BIT(2), - PR_DUMP = BIT(3), - PR_PM = BIT(4), - PR_MISC = BIT(5), - PR_WIPOWER = BIT(6), - PR_TYPEC = BIT(7), -}; - -enum wake_reason { - PM_PARALLEL_CHECK = BIT(0), - PM_REASON_VFLOAT_ADJUST = BIT(1), - PM_ESR_PULSE = BIT(2), - PM_PARALLEL_TAPER = BIT(3), - PM_DETECT_HVDCP = BIT(4), -}; - -/* fcc_voters */ -#define ESR_PULSE_FCC_VOTER "ESR_PULSE_FCC_VOTER" -#define BATT_TYPE_FCC_VOTER "BATT_TYPE_FCC_VOTER" -#define RESTRICTED_CHG_FCC_VOTER "RESTRICTED_CHG_FCC_VOTER" - -/* ICL VOTERS */ -#define PSY_ICL_VOTER "PSY_ICL_VOTER" -#define THERMAL_ICL_VOTER "THERMAL_ICL_VOTER" -#define HVDCP_ICL_VOTER "HVDCP_ICL_VOTER" -#define USER_ICL_VOTER "USER_ICL_VOTER" -#define WEAK_CHARGER_ICL_VOTER "WEAK_CHARGER_ICL_VOTER" -#define SW_AICL_ICL_VOTER "SW_AICL_ICL_VOTER" -#define CHG_SUSPEND_WORKAROUND_ICL_VOTER "CHG_SUSPEND_WORKAROUND_ICL_VOTER" - -/* USB SUSPEND VOTERS */ -/* userspace has suspended charging altogether */ -#define USER_EN_VOTER "USER_EN_VOTER" -/* - * this specific path has been suspended through the power supply - * framework - */ -#define POWER_SUPPLY_EN_VOTER "POWER_SUPPLY_EN_VOTER" -/* - * the usb driver has suspended this path by setting a current limit - * of < 2MA - */ -#define USB_EN_VOTER "USB_EN_VOTER" -/* - * the thermal daemon can suspend a charge path when the system - * temperature levels rise - */ -#define THERMAL_EN_VOTER "THERMAL_EN_VOTER" -/* - * an external OTG supply is being used, suspend charge path so the - * charger does not accidentally try to charge from the external supply. - */ -#define OTG_EN_VOTER "OTG_EN_VOTER" -/* - * the charger is very weak, do not draw any current from it - */ -#define WEAK_CHARGER_EN_VOTER "WEAK_CHARGER_EN_VOTER" -/* - * fake battery voter, if battery id-resistance around 7.5 Kohm - */ -#define FAKE_BATTERY_EN_VOTER "FAKE_BATTERY_EN_VOTER" - -/* battchg_enable_voters */ - /* userspace has disabled battery charging */ -#define BATTCHG_USER_EN_VOTER "BATTCHG_USER_EN_VOTER" - /* battery charging disabled while loading battery profiles */ -#define BATTCHG_UNKNOWN_BATTERY_EN_VOTER "BATTCHG_UNKNOWN_BATTERY_EN_VOTER" - -/* hw_aicl_rerun_enable_indirect_voters */ -/* enabled via device tree */ -#define DEFAULT_CONFIG_HW_AICL_VOTER "DEFAULT_CONFIG_HW_AICL_VOTER" -/* Varb workaround voter */ -#define VARB_WORKAROUND_VOTER "VARB_WORKAROUND_VOTER" -/* SHUTDOWN workaround voter */ -#define SHUTDOWN_WORKAROUND_VOTER "SHUTDOWN_WORKAROUND_VOTER" - -/* hw_aicl_rerun_disable_voters */ -/* the results from enabling clients */ -#define HW_AICL_RERUN_ENABLE_INDIRECT_VOTER \ - "HW_AICL_RERUN_ENABLE_INDIRECT_VOTER" -/* Weak charger voter */ -#define WEAK_CHARGER_HW_AICL_VOTER "WEAK_CHARGER_HW_AICL_VOTER" - -/* aicl_short_deglitch_voters */ -/* Varb workaround voter */ -#define VARB_WORKAROUND_SHORT_DEGLITCH_VOTER \ - "VARB_WRKARND_SHORT_DEGLITCH_VOTER" -/* QC 2.0 */ -#define HVDCP_SHORT_DEGLITCH_VOTER "HVDCP_SHORT_DEGLITCH_VOTER" - -static const unsigned int smbchg_extcon_cable[] = { - EXTCON_USB, - EXTCON_USB_HOST, - EXTCON_NONE, -}; - -static int smbchg_debug_mask; -module_param_named( - debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR -); - -static int smbchg_parallel_en = 1; -module_param_named( - parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR -); - -static int smbchg_main_chg_fcc_percent = 50; -module_param_named( - main_chg_fcc_percent, smbchg_main_chg_fcc_percent, - int, S_IRUSR | S_IWUSR -); - -static int smbchg_main_chg_icl_percent = 60; -module_param_named( - main_chg_icl_percent, smbchg_main_chg_icl_percent, - int, S_IRUSR | S_IWUSR -); - -static int smbchg_default_hvdcp_icl_ma = 1800; -module_param_named( - default_hvdcp_icl_ma, smbchg_default_hvdcp_icl_ma, - int, S_IRUSR | S_IWUSR -); - -static int smbchg_default_hvdcp3_icl_ma = 3000; -module_param_named( - default_hvdcp3_icl_ma, smbchg_default_hvdcp3_icl_ma, - int, S_IRUSR | S_IWUSR -); - -static int smbchg_default_dcp_icl_ma = 1800; -module_param_named( - default_dcp_icl_ma, smbchg_default_dcp_icl_ma, - int, S_IRUSR | S_IWUSR -); - -static int wipower_dyn_icl_en; -module_param_named( - dynamic_icl_wipower_en, wipower_dyn_icl_en, - int, S_IRUSR | S_IWUSR -); - -static int wipower_dcin_interval = ADC_MEAS1_INTERVAL_2P0MS; -module_param_named( - wipower_dcin_interval, wipower_dcin_interval, - int, S_IRUSR | S_IWUSR -); - -#define WIPOWER_DEFAULT_HYSTERISIS_UV 250000 -static int wipower_dcin_hyst_uv = WIPOWER_DEFAULT_HYSTERISIS_UV; -module_param_named( - wipower_dcin_hyst_uv, wipower_dcin_hyst_uv, - int, S_IRUSR | S_IWUSR -); - -#define pr_smb(reason, fmt, ...) \ - do { \ - if (smbchg_debug_mask & (reason)) \ - pr_info(fmt, ##__VA_ARGS__); \ - else \ - pr_debug(fmt, ##__VA_ARGS__); \ - } while (0) - -#define pr_smb_rt(reason, fmt, ...) \ - do { \ - if (smbchg_debug_mask & (reason)) \ - pr_info_ratelimited(fmt, ##__VA_ARGS__); \ - else \ - pr_debug(fmt, ##__VA_ARGS__); \ - } while (0) - -static int smbchg_read(struct smbchg_chip *chip, u8 *val, - u16 addr, int count) -{ - int rc = 0; - struct platform_device *pdev = chip->pdev; - - if (addr == 0) { - dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n", - addr, to_spmi_device(pdev->dev.parent)->usid, rc); - return -EINVAL; - } - - rc = regmap_bulk_read(chip->regmap, addr, val, count); - if (rc) { - dev_err(chip->dev, "spmi read failed addr=0x%02x sid=0x%02x rc=%d\n", - addr, to_spmi_device(pdev->dev.parent)->usid, - rc); - return rc; - } - return 0; -} - -/* - * Writes a register to the specified by the base and limited by the bit mask - * - * Do not use this function for register writes if possible. Instead use the - * smbchg_masked_write function. - * - * The sec_access_lock must be held for all register writes and this function - * does not do that. If this function is used, please hold the spinlock or - * random secure access writes may fail. - */ -static int smbchg_masked_write_raw(struct smbchg_chip *chip, u16 base, u8 mask, - u8 val) -{ - int rc; - - rc = regmap_update_bits(chip->regmap, base, mask, val); - if (rc) { - dev_err(chip->dev, "spmi write failed: addr=%03X, rc=%d\n", - base, rc); - return rc; - } - - return 0; -} - -/* - * Writes a register to the specified by the base and limited by the bit mask - * - * This function holds a spin lock to ensure secure access register writes goes - * through. If the secure access unlock register is armed, any old register - * write can unarm the secure access unlock, causing the next write to fail. - * - * Note: do not use this for sec_access registers. Instead use the function - * below: smbchg_sec_masked_write - */ -static int smbchg_masked_write(struct smbchg_chip *chip, u16 base, u8 mask, - u8 val) -{ - unsigned long flags; - int rc; - - spin_lock_irqsave(&chip->sec_access_lock, flags); - rc = smbchg_masked_write_raw(chip, base, mask, val); - spin_unlock_irqrestore(&chip->sec_access_lock, flags); - - return rc; -} - -/* - * Unlocks sec access and writes to the register specified. - * - * This function holds a spin lock to exclude other register writes while - * the two writes are taking place. - */ -#define SEC_ACCESS_OFFSET 0xD0 -#define SEC_ACCESS_VALUE 0xA5 -#define PERIPHERAL_MASK 0xFF -static int smbchg_sec_masked_write(struct smbchg_chip *chip, u16 base, u8 mask, - u8 val) -{ - unsigned long flags; - int rc; - u16 peripheral_base = base & (~PERIPHERAL_MASK); - - spin_lock_irqsave(&chip->sec_access_lock, flags); - - rc = smbchg_masked_write_raw(chip, peripheral_base + SEC_ACCESS_OFFSET, - SEC_ACCESS_VALUE, SEC_ACCESS_VALUE); - if (rc) { - dev_err(chip->dev, "Unable to unlock sec_access: %d", rc); - goto out; - } - - rc = smbchg_masked_write_raw(chip, base, mask, val); - -out: - spin_unlock_irqrestore(&chip->sec_access_lock, flags); - return rc; -} - -static void smbchg_stay_awake(struct smbchg_chip *chip, int reason) -{ - int reasons; - - mutex_lock(&chip->pm_lock); - reasons = chip->wake_reasons | reason; - if (reasons != 0 && chip->wake_reasons == 0) { - pr_smb(PR_PM, "staying awake: 0x%02x (bit %d)\n", - reasons, reason); - pm_stay_awake(chip->dev); - } - chip->wake_reasons = reasons; - mutex_unlock(&chip->pm_lock); -} - -static void smbchg_relax(struct smbchg_chip *chip, int reason) -{ - int reasons; - - mutex_lock(&chip->pm_lock); - reasons = chip->wake_reasons & (~reason); - if (reasons == 0 && chip->wake_reasons != 0) { - pr_smb(PR_PM, "relaxing: 0x%02x (bit %d)\n", - reasons, reason); - pm_relax(chip->dev); - } - chip->wake_reasons = reasons; - mutex_unlock(&chip->pm_lock); -}; - -enum pwr_path_type { - UNKNOWN = 0, - PWR_PATH_BATTERY = 1, - PWR_PATH_USB = 2, - PWR_PATH_DC = 3, -}; - -#define PWR_PATH 0x08 -#define PWR_PATH_MASK 0x03 -static enum pwr_path_type smbchg_get_pwr_path(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + PWR_PATH, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read PWR_PATH rc = %d\n", rc); - return PWR_PATH_BATTERY; - } - - return reg & PWR_PATH_MASK; -} - -#define RID_STS 0xB -#define RID_MASK 0xF -#define IDEV_STS 0x8 -#define RT_STS 0x10 -#define USBID_MSB 0xE -#define USBIN_UV_BIT BIT(0) -#define USBIN_OV_BIT BIT(1) -#define USBIN_SRC_DET_BIT BIT(2) -#define FMB_STS_MASK SMB_MASK(3, 0) -#define USBID_GND_THRESHOLD 0x495 -static bool is_otg_present_schg(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - u8 usbid_reg[2]; - u16 usbid_val; - /* - * After the falling edge of the usbid change interrupt occurs, - * there may still be some time before the ADC conversion for USB RID - * finishes in the fuel gauge. In the worst case, this could be up to - * 15 ms. - * - * Sleep for 20 ms (minimum msleep time) to wait for the conversion to - * finish and the USB RID status register to be updated before trying - * to detect OTG insertions. - */ - - msleep(20); - - /* - * There is a problem with USBID conversions on PMI8994 revisions - * 2.0.0. As a workaround, check that the cable is not - * detected as factory test before enabling OTG. - */ - rc = smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read IDEV_STS rc = %d\n", rc); - return false; - } - - if ((reg & FMB_STS_MASK) != 0) { - pr_smb(PR_STATUS, "IDEV_STS = %02x, not ground\n", reg); - return false; - } - - rc = smbchg_read(chip, usbid_reg, chip->usb_chgpth_base + USBID_MSB, 2); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read USBID rc = %d\n", rc); - return false; - } - usbid_val = (usbid_reg[0] << 8) | usbid_reg[1]; - - if (usbid_val > USBID_GND_THRESHOLD) { - pr_smb(PR_STATUS, "USBID = 0x%04x, too high to be ground\n", - usbid_val); - return false; - } - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RID_STS, 1); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't read usb rid status rc = %d\n", rc); - return false; - } - - pr_smb(PR_STATUS, "RID_STS = %02x\n", reg); - - return (reg & RID_MASK) == 0; -} - -#define RID_GND_DET_STS BIT(2) -static bool is_otg_present_schg_lite(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->otg_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't read otg RT status rc = %d\n", rc); - return false; - } - - return !!(reg & RID_GND_DET_STS); -} - -static bool is_otg_present(struct smbchg_chip *chip) -{ - if (chip->schg_version == QPNP_SCHG_LITE) - return is_otg_present_schg_lite(chip); - - return is_otg_present_schg(chip); -} - -#define USBIN_9V BIT(5) -#define USBIN_UNREG BIT(4) -#define USBIN_LV BIT(3) -#define DCIN_9V BIT(2) -#define DCIN_UNREG BIT(1) -#define DCIN_LV BIT(0) -#define INPUT_STS 0x0D -#define DCIN_UV_BIT BIT(0) -#define DCIN_OV_BIT BIT(1) -static bool is_dc_present(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->dc_chgpth_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read dc status rc = %d\n", rc); - return false; - } - - if ((reg & DCIN_UV_BIT) || (reg & DCIN_OV_BIT)) - return false; - - return true; -} - -static bool is_usb_present(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc); - return false; - } - if (!(reg & USBIN_SRC_DET_BIT) || (reg & USBIN_OV_BIT)) - return false; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + INPUT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc); - return false; - } - - return !!(reg & (USBIN_9V | USBIN_UNREG | USBIN_LV)); -} - -static char *usb_type_str[] = { - "SDP", /* bit 0 */ - "OTHER", /* bit 1 */ - "DCP", /* bit 2 */ - "CDP", /* bit 3 */ - "NONE", /* bit 4 error case */ -}; - -#define N_TYPE_BITS 4 -#define TYPE_BITS_OFFSET 4 - -static int get_type(u8 type_reg) -{ - unsigned long type = type_reg; - type >>= TYPE_BITS_OFFSET; - return find_first_bit(&type, N_TYPE_BITS); -} - -/* helper to return the string of USB type */ -static inline char *get_usb_type_name(int type) -{ - return usb_type_str[type]; -} - -static enum power_supply_type usb_type_enum[] = { - POWER_SUPPLY_TYPE_USB, /* bit 0 */ - POWER_SUPPLY_TYPE_USB_DCP, /* bit 1 */ - POWER_SUPPLY_TYPE_USB_DCP, /* bit 2 */ - POWER_SUPPLY_TYPE_USB_CDP, /* bit 3 */ - POWER_SUPPLY_TYPE_USB_DCP, /* bit 4 error case, report DCP */ -}; - -/* helper to return enum power_supply_type of USB type */ -static inline enum power_supply_type get_usb_supply_type(int type) -{ - return usb_type_enum[type]; -} - -static bool is_src_detect_high(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc); - return false; - } - return reg &= USBIN_SRC_DET_BIT; -} - -static void read_usb_type(struct smbchg_chip *chip, char **usb_type_name, - enum power_supply_type *usb_supply_type) -{ - int rc, type; - u8 reg; - - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "src det low\n"); - *usb_type_name = "Absent"; - *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; - return; - } - - rc = smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc); - *usb_type_name = "Other"; - *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; - return; - } - type = get_type(reg); - *usb_type_name = get_usb_type_name(type); - *usb_supply_type = get_usb_supply_type(type); -} - -#define CHGR_STS 0x0E -#define BATT_LESS_THAN_2V BIT(4) -#define CHG_HOLD_OFF_BIT BIT(3) -#define CHG_TYPE_MASK SMB_MASK(2, 1) -#define CHG_TYPE_SHIFT 1 -#define BATT_NOT_CHG_VAL 0x0 -#define BATT_PRE_CHG_VAL 0x1 -#define BATT_FAST_CHG_VAL 0x2 -#define BATT_TAPER_CHG_VAL 0x3 -#define CHG_INHIBIT_BIT BIT(1) -#define BAT_TCC_REACHED_BIT BIT(7) -static int get_prop_batt_status(struct smbchg_chip *chip) -{ - int rc, status = POWER_SUPPLY_STATUS_DISCHARGING; - u8 reg = 0, chg_type; - bool charger_present, chg_inhibit; - - charger_present = is_usb_present(chip) | is_dc_present(chip) | - chip->hvdcp_3_det_ignore_uv; - if (!charger_present) - return POWER_SUPPLY_STATUS_DISCHARGING; - - rc = smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc); - return POWER_SUPPLY_STATUS_UNKNOWN; - } - - if (reg & BAT_TCC_REACHED_BIT) - return POWER_SUPPLY_STATUS_FULL; - - chg_inhibit = reg & CHG_INHIBIT_BIT; - if (chg_inhibit) - return POWER_SUPPLY_STATUS_FULL; - - rc = smbchg_read(chip, ®, chip->chgr_base + CHGR_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc); - return POWER_SUPPLY_STATUS_UNKNOWN; - } - - if (reg & CHG_HOLD_OFF_BIT) { - /* - * when chg hold off happens the battery is - * not charging - */ - status = POWER_SUPPLY_STATUS_NOT_CHARGING; - goto out; - } - - chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT; - - if (chg_type == BATT_NOT_CHG_VAL && !chip->hvdcp_3_det_ignore_uv) - status = POWER_SUPPLY_STATUS_DISCHARGING; - else - status = POWER_SUPPLY_STATUS_CHARGING; -out: - pr_smb_rt(PR_MISC, "CHGR_STS = 0x%02x\n", reg); - return status; -} - -#define BAT_PRES_STATUS 0x08 -#define BAT_PRES_BIT BIT(7) -static int get_prop_batt_present(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->bat_if_base + BAT_PRES_STATUS, 1); - if (rc < 0) { - dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc); - return 0; - } - - return !!(reg & BAT_PRES_BIT); -} - -static int get_prop_charge_type(struct smbchg_chip *chip) -{ - int rc; - u8 reg, chg_type; - - rc = smbchg_read(chip, ®, chip->chgr_base + CHGR_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc); - return 0; - } - - chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT; - if (chg_type == BATT_NOT_CHG_VAL) - return POWER_SUPPLY_CHARGE_TYPE_NONE; - else if (chg_type == BATT_TAPER_CHG_VAL) - return POWER_SUPPLY_CHARGE_TYPE_TAPER; - else if (chg_type == BATT_FAST_CHG_VAL) - return POWER_SUPPLY_CHARGE_TYPE_FAST; - else if (chg_type == BATT_PRE_CHG_VAL) - return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; - - return POWER_SUPPLY_CHARGE_TYPE_NONE; -} - -static int set_property_on_fg(struct smbchg_chip *chip, - enum power_supply_property prop, int val) -{ - int rc; - union power_supply_propval ret = {0, }; - - if (!chip->bms_psy && chip->bms_psy_name) - chip->bms_psy = - power_supply_get_by_name((char *)chip->bms_psy_name); - if (!chip->bms_psy) { - pr_smb(PR_STATUS, "no bms psy found\n"); - return -EINVAL; - } - - ret.intval = val; - rc = power_supply_set_property(chip->bms_psy, prop, &ret); - if (rc) - pr_smb(PR_STATUS, - "bms psy does not allow updating prop %d rc = %d\n", - prop, rc); - - return rc; -} - -static int get_property_from_fg(struct smbchg_chip *chip, - enum power_supply_property prop, int *val) -{ - int rc; - union power_supply_propval ret = {0, }; - - if (!chip->bms_psy && chip->bms_psy_name) - chip->bms_psy = - power_supply_get_by_name((char *)chip->bms_psy_name); - if (!chip->bms_psy) { - pr_smb(PR_STATUS, "no bms psy found\n"); - return -EINVAL; - } - - rc = power_supply_get_property(chip->bms_psy, prop, &ret); - if (rc) { - pr_smb(PR_STATUS, - "bms psy doesn't support reading prop %d rc = %d\n", - prop, rc); - return rc; - } - - *val = ret.intval; - return rc; -} - -#define DEFAULT_BATT_CAPACITY 50 -static int get_prop_batt_capacity(struct smbchg_chip *chip) -{ - int capacity, rc; - - if (chip->fake_battery_soc >= 0) - return chip->fake_battery_soc; - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CAPACITY, &capacity); - if (rc) { - pr_smb(PR_STATUS, "Couldn't get capacity rc = %d\n", rc); - capacity = DEFAULT_BATT_CAPACITY; - } - return capacity; -} - -#define DEFAULT_BATT_TEMP 200 -static int get_prop_batt_temp(struct smbchg_chip *chip) -{ - int temp, rc; - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_TEMP, &temp); - if (rc) { - pr_smb(PR_STATUS, "Couldn't get temperature rc = %d\n", rc); - temp = DEFAULT_BATT_TEMP; - } - return temp; -} - -#define DEFAULT_BATT_CURRENT_NOW 0 -static int get_prop_batt_current_now(struct smbchg_chip *chip) -{ - int ua, rc; - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW, &ua); - if (rc) { - pr_smb(PR_STATUS, "Couldn't get current rc = %d\n", rc); - ua = DEFAULT_BATT_CURRENT_NOW; - } - return ua; -} - -#define DEFAULT_BATT_VOLTAGE_NOW 0 -static int get_prop_batt_voltage_now(struct smbchg_chip *chip) -{ - int uv, rc; - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_NOW, &uv); - if (rc) { - pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc); - uv = DEFAULT_BATT_VOLTAGE_NOW; - } - return uv; -} - -#define DEFAULT_BATT_VOLTAGE_MAX_DESIGN 4200000 -static int get_prop_batt_voltage_max_design(struct smbchg_chip *chip) -{ - int uv, rc; - - rc = get_property_from_fg(chip, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &uv); - if (rc) { - pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc); - uv = DEFAULT_BATT_VOLTAGE_MAX_DESIGN; - } - return uv; -} - -static int get_prop_batt_health(struct smbchg_chip *chip) -{ - if (chip->batt_hot) - return POWER_SUPPLY_HEALTH_OVERHEAT; - else if (chip->batt_cold) - return POWER_SUPPLY_HEALTH_COLD; - else if (chip->batt_warm) - return POWER_SUPPLY_HEALTH_WARM; - else if (chip->batt_cool) - return POWER_SUPPLY_HEALTH_COOL; - else - return POWER_SUPPLY_HEALTH_GOOD; -} - -static void get_property_from_typec(struct smbchg_chip *chip, - enum power_supply_property property, - union power_supply_propval *prop) -{ - int rc; - - rc = power_supply_get_property(chip->typec_psy, - property, prop); - if (rc) - pr_smb(PR_TYPEC, - "typec psy doesn't support reading prop %d rc = %d\n", - property, rc); -} - -static void update_typec_status(struct smbchg_chip *chip) -{ - union power_supply_propval type = {0, }; - union power_supply_propval capability = {0, }; - - get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type); - if (type.intval != POWER_SUPPLY_TYPE_UNKNOWN) { - get_property_from_typec(chip, - POWER_SUPPLY_PROP_CURRENT_CAPABILITY, - &capability); - chip->typec_current_ma = capability.intval; - pr_smb(PR_TYPEC, "SMB Type-C mode = %d, current=%d\n", - type.intval, capability.intval); - } else { - pr_smb(PR_TYPEC, - "typec detection not completed continuing with USB update\n"); - } -} - -/* - * finds the index of the closest value in the array. If there are two that - * are equally close, the lower index will be returned - */ -static int find_closest_in_array(const int *arr, int len, int val) -{ - int i, closest = 0; - - if (len == 0) - return closest; - for (i = 0; i < len; i++) - if (abs(val - arr[i]) < abs(val - arr[closest])) - closest = i; - - return closest; -} - -/* finds the index of the closest smaller value in the array. */ -static int find_smaller_in_array(const int *table, int val, int len) -{ - int i; - - for (i = len - 1; i >= 0; i--) { - if (val >= table[i]) - break; - } - - return i; -} - -static const int iterm_ma_table_8994[] = { - 300, - 50, - 100, - 150, - 200, - 250, - 500, - 600 -}; - -static const int iterm_ma_table_8996[] = { - 300, - 50, - 100, - 150, - 200, - 250, - 400, - 500 -}; - -static const int usb_ilim_ma_table_8994[] = { - 300, - 400, - 450, - 475, - 500, - 550, - 600, - 650, - 700, - 900, - 950, - 1000, - 1100, - 1200, - 1400, - 1450, - 1500, - 1600, - 1800, - 1850, - 1880, - 1910, - 1930, - 1950, - 1970, - 2000, - 2050, - 2100, - 2300, - 2400, - 2500, - 3000 -}; - -static const int usb_ilim_ma_table_8996[] = { - 300, - 400, - 500, - 600, - 700, - 800, - 900, - 1000, - 1100, - 1200, - 1300, - 1400, - 1450, - 1500, - 1550, - 1600, - 1700, - 1800, - 1900, - 1950, - 2000, - 2050, - 2100, - 2200, - 2300, - 2400, - 2500, - 2600, - 2700, - 2800, - 2900, - 3000 -}; - -static int dc_ilim_ma_table_8994[] = { - 300, - 400, - 450, - 475, - 500, - 550, - 600, - 650, - 700, - 900, - 950, - 1000, - 1100, - 1200, - 1400, - 1450, - 1500, - 1600, - 1800, - 1850, - 1880, - 1910, - 1930, - 1950, - 1970, - 2000, -}; - -static int dc_ilim_ma_table_8996[] = { - 300, - 400, - 500, - 600, - 700, - 800, - 900, - 1000, - 1100, - 1200, - 1300, - 1400, - 1450, - 1500, - 1550, - 1600, - 1700, - 1800, - 1900, - 1950, - 2000, - 2050, - 2100, - 2200, - 2300, - 2400, -}; - -static const int fcc_comp_table_8994[] = { - 250, - 700, - 900, - 1200, -}; - -static const int fcc_comp_table_8996[] = { - 250, - 1100, - 1200, - 1500, -}; - -static const int aicl_rerun_period[] = { - 45, - 90, - 180, - 360, -}; - -static const int aicl_rerun_period_schg_lite[] = { - 3, /* 2.8s */ - 6, /* 5.6s */ - 11, /* 11.3s */ - 23, /* 22.5s */ - 45, - 90, - 180, - 360, -}; - -static void use_pmi8994_tables(struct smbchg_chip *chip) -{ - chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8994; - chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8994); - chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8994; - chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8994); - chip->tables.iterm_ma_table = iterm_ma_table_8994; - chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8994); - chip->tables.fcc_comp_table = fcc_comp_table_8994; - chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8994); - chip->tables.rchg_thr_mv = 200; - chip->tables.aicl_rerun_period_table = aicl_rerun_period; - chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period); -} - -static void use_pmi8996_tables(struct smbchg_chip *chip) -{ - chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8996; - chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8996); - chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8996; - chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8996); - chip->tables.iterm_ma_table = iterm_ma_table_8996; - chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8996); - chip->tables.fcc_comp_table = fcc_comp_table_8996; - chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8996); - chip->tables.rchg_thr_mv = 150; - chip->tables.aicl_rerun_period_table = aicl_rerun_period; - chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period); -} - -#define CMD_CHG_REG 0x42 -#define EN_BAT_CHG_BIT BIT(1) -static int smbchg_charging_en(struct smbchg_chip *chip, bool en) -{ - /* The en bit is configured active low */ - return smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, - EN_BAT_CHG_BIT, en ? 0 : EN_BAT_CHG_BIT); -} - -#define CMD_IL 0x40 -#define USBIN_SUSPEND_BIT BIT(4) -#define CURRENT_100_MA 100 -#define CURRENT_150_MA 150 -#define CURRENT_500_MA 500 -#define CURRENT_900_MA 900 -#define CURRENT_1500_MA 1500 -#define SUSPEND_CURRENT_MA 2 -#define ICL_OVERRIDE_BIT BIT(2) -static int smbchg_usb_suspend(struct smbchg_chip *chip, bool suspend) -{ - int rc; - - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0); - if (rc < 0) - dev_err(chip->dev, "Couldn't set usb suspend rc = %d\n", rc); - return rc; -} - -#define DCIN_SUSPEND_BIT BIT(3) -static int smbchg_dc_suspend(struct smbchg_chip *chip, bool suspend) -{ - int rc = 0; - - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - DCIN_SUSPEND_BIT, suspend ? DCIN_SUSPEND_BIT : 0); - if (rc < 0) - dev_err(chip->dev, "Couldn't set dc suspend rc = %d\n", rc); - return rc; -} - -#define IL_CFG 0xF2 -#define DCIN_INPUT_MASK SMB_MASK(4, 0) -static int smbchg_set_dc_current_max(struct smbchg_chip *chip, int current_ma) -{ - int i; - u8 dc_cur_val; - - i = find_smaller_in_array(chip->tables.dc_ilim_ma_table, - current_ma, chip->tables.dc_ilim_ma_len); - - if (i < 0) { - dev_err(chip->dev, "Cannot find %dma current_table\n", - current_ma); - return -EINVAL; - } - - chip->dc_max_current_ma = chip->tables.dc_ilim_ma_table[i]; - dc_cur_val = i & DCIN_INPUT_MASK; - - pr_smb(PR_STATUS, "dc current set to %d mA\n", - chip->dc_max_current_ma); - return smbchg_sec_masked_write(chip, chip->dc_chgpth_base + IL_CFG, - DCIN_INPUT_MASK, dc_cur_val); -} - -#define AICL_WL_SEL_CFG 0xF5 -#define AICL_WL_SEL_MASK SMB_MASK(1, 0) -#define AICL_WL_SEL_SCHG_LITE_MASK SMB_MASK(2, 0) -static int smbchg_set_aicl_rerun_period_s(struct smbchg_chip *chip, - int period_s) -{ - int i; - u8 reg, mask; - - i = find_smaller_in_array(chip->tables.aicl_rerun_period_table, - period_s, chip->tables.aicl_rerun_period_len); - - if (i < 0) { - dev_err(chip->dev, "Cannot find %ds in aicl rerun period\n", - period_s); - return -EINVAL; - } - - if (chip->schg_version == QPNP_SCHG_LITE) - mask = AICL_WL_SEL_SCHG_LITE_MASK; - else - mask = AICL_WL_SEL_MASK; - - reg = i & mask; - - pr_smb(PR_STATUS, "aicl rerun period set to %ds\n", - chip->tables.aicl_rerun_period_table[i]); - return smbchg_sec_masked_write(chip, - chip->dc_chgpth_base + AICL_WL_SEL_CFG, - mask, reg); -} - -static struct power_supply *get_parallel_psy(struct smbchg_chip *chip) -{ - if (!chip->parallel.avail) - return NULL; - if (chip->parallel.psy) - return chip->parallel.psy; - chip->parallel.psy = power_supply_get_by_name("usb-parallel"); - if (!chip->parallel.psy) - pr_smb(PR_STATUS, "parallel charger not found\n"); - return chip->parallel.psy; -} - -static void smbchg_usb_update_online_work(struct work_struct *work) -{ - struct smbchg_chip *chip = container_of(work, - struct smbchg_chip, - usb_set_online_work); - bool user_enabled = !get_client_vote(chip->usb_suspend_votable, - USER_EN_VOTER); - int online; - - online = user_enabled && chip->usb_present && !chip->very_weak_charger; - - mutex_lock(&chip->usb_set_online_lock); - if (chip->usb_online != online) { - pr_smb(PR_MISC, "setting usb psy online = %d\n", online); - chip->usb_online = online; - power_supply_changed(chip->usb_psy); - } - mutex_unlock(&chip->usb_set_online_lock); -} - -#define CHGPTH_CFG 0xF4 -#define CFG_USB_2_3_SEL_BIT BIT(7) -#define CFG_USB_2 0 -#define CFG_USB_3 BIT(7) -#define USBIN_INPUT_MASK SMB_MASK(4, 0) -#define USBIN_MODE_CHG_BIT BIT(0) -#define USBIN_LIMITED_MODE 0 -#define USBIN_HC_MODE BIT(0) -#define USB51_MODE_BIT BIT(1) -#define USB51_100MA 0 -#define USB51_500MA BIT(1) -static int smbchg_set_high_usb_chg_current(struct smbchg_chip *chip, - int current_ma) -{ - int i, rc; - u8 usb_cur_val; - - if (current_ma == CURRENT_100_MA) { - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - CFG_USB_2_3_SEL_BIT, CFG_USB_2); - if (rc < 0) { - pr_err("Couldn't set CFG_USB_2 rc=%d\n", rc); - return rc; - } - - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT | USB51_MODE_BIT | ICL_OVERRIDE_BIT, - USBIN_LIMITED_MODE | USB51_100MA | ICL_OVERRIDE_BIT); - if (rc < 0) { - pr_err("Couldn't set ICL_OVERRIDE rc=%d\n", rc); - return rc; - } - - pr_smb(PR_STATUS, - "Forcing 100mA current limit\n"); - chip->usb_max_current_ma = CURRENT_100_MA; - return rc; - } - - i = find_smaller_in_array(chip->tables.usb_ilim_ma_table, - current_ma, chip->tables.usb_ilim_ma_len); - if (i < 0) { - dev_err(chip->dev, - "Cannot find %dma current_table using %d\n", - current_ma, CURRENT_150_MA); - - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - CFG_USB_2_3_SEL_BIT, CFG_USB_3); - rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT | USB51_MODE_BIT, - USBIN_LIMITED_MODE | USB51_100MA); - if (rc < 0) - dev_err(chip->dev, "Couldn't set %dmA rc=%d\n", - CURRENT_150_MA, rc); - else - chip->usb_max_current_ma = 150; - return rc; - } - - usb_cur_val = i & USBIN_INPUT_MASK; - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + IL_CFG, - USBIN_INPUT_MASK, usb_cur_val); - if (rc < 0) { - dev_err(chip->dev, "cannot write to config c rc = %d\n", rc); - return rc; - } - - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT, USBIN_HC_MODE); - if (rc < 0) - dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc); - chip->usb_max_current_ma = chip->tables.usb_ilim_ma_table[i]; - return rc; -} - -/* if APSD results are used - * if SDP is detected it will look at 500mA setting - * if set it will draw 500mA - * if unset it will draw 100mA - * if CDP/DCP it will look at 0x0C setting - * i.e. values in 0x41[1, 0] does not matter - */ -static int smbchg_set_usb_current_max(struct smbchg_chip *chip, - int current_ma) -{ - int rc = 0; - - /* - * if the battery is not present, do not allow the usb ICL to lower in - * order to avoid browning out the device during a hotswap. - */ - if (!chip->batt_present && current_ma < chip->usb_max_current_ma) { - pr_info_ratelimited("Ignoring usb current->%d, battery is absent\n", - current_ma); - return 0; - } - pr_smb(PR_STATUS, "USB current_ma = %d\n", current_ma); - - if (current_ma <= SUSPEND_CURRENT_MA) { - /* suspend the usb if current <= 2mA */ - rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, true, 0); - chip->usb_max_current_ma = 0; - goto out; - } else { - rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, false, 0); - } - - switch (chip->usb_supply_type) { - case POWER_SUPPLY_TYPE_USB: - if ((current_ma < CURRENT_150_MA) && - (chip->wa_flags & SMBCHG_USB100_WA)) - current_ma = CURRENT_150_MA; - - if (current_ma < CURRENT_150_MA) { - /* force 100mA */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - CFG_USB_2_3_SEL_BIT, CFG_USB_2); - if (rc < 0) { - pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc); - goto out; - } - rc = smbchg_masked_write(chip, - chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT | USB51_MODE_BIT, - USBIN_LIMITED_MODE | USB51_100MA); - if (rc < 0) { - pr_err("Couldn't set CMD_IL rc = %d\n", rc); - goto out; - } - chip->usb_max_current_ma = 100; - } - /* specific current values */ - if (current_ma == CURRENT_150_MA) { - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - CFG_USB_2_3_SEL_BIT, CFG_USB_3); - if (rc < 0) { - pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc); - goto out; - } - rc = smbchg_masked_write(chip, - chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT | USB51_MODE_BIT, - USBIN_LIMITED_MODE | USB51_100MA); - if (rc < 0) { - pr_err("Couldn't set CMD_IL rc = %d\n", rc); - goto out; - } - chip->usb_max_current_ma = 150; - } - if (current_ma == CURRENT_500_MA) { - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - CFG_USB_2_3_SEL_BIT, CFG_USB_2); - if (rc < 0) { - pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc); - goto out; - } - rc = smbchg_masked_write(chip, - chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT | USB51_MODE_BIT, - USBIN_LIMITED_MODE | USB51_500MA); - if (rc < 0) { - pr_err("Couldn't set CMD_IL rc = %d\n", rc); - goto out; - } - chip->usb_max_current_ma = 500; - } - if (current_ma == CURRENT_900_MA) { - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - CFG_USB_2_3_SEL_BIT, CFG_USB_3); - if (rc < 0) { - pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc); - goto out; - } - rc = smbchg_masked_write(chip, - chip->usb_chgpth_base + CMD_IL, - USBIN_MODE_CHG_BIT | USB51_MODE_BIT, - USBIN_LIMITED_MODE | USB51_500MA); - if (rc < 0) { - pr_err("Couldn't set CMD_IL rc = %d\n", rc); - goto out; - } - chip->usb_max_current_ma = 900; - } - break; - case POWER_SUPPLY_TYPE_USB_CDP: - if (current_ma < CURRENT_1500_MA) { - /* use override for CDP */ - rc = smbchg_masked_write(chip, - chip->usb_chgpth_base + CMD_IL, - ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT); - if (rc < 0) - pr_err("Couldn't set override rc = %d\n", rc); - } - /* fall through */ - default: - rc = smbchg_set_high_usb_chg_current(chip, current_ma); - if (rc < 0) - pr_err("Couldn't set %dmA rc = %d\n", current_ma, rc); - break; - } - -out: - pr_smb(PR_STATUS, "usb type = %d current set to %d mA\n", - chip->usb_supply_type, chip->usb_max_current_ma); - return rc; -} - -#define USBIN_HVDCP_STS 0x0C -#define USBIN_HVDCP_SEL_BIT BIT(4) -#define USBIN_HVDCP_SEL_9V_BIT BIT(1) -#define SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT BIT(2) -#define SCHG_LITE_USBIN_HVDCP_SEL_BIT BIT(0) -static int smbchg_get_min_parallel_current_ma(struct smbchg_chip *chip) -{ - int rc; - u8 reg, hvdcp_sel, hvdcp_sel_9v; - - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + USBIN_HVDCP_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc); - return 0; - } - if (chip->schg_version == QPNP_SCHG_LITE) { - hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT; - hvdcp_sel_9v = SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT; - } else { - hvdcp_sel = USBIN_HVDCP_SEL_BIT; - hvdcp_sel_9v = USBIN_HVDCP_SEL_9V_BIT; - } - - if ((reg & hvdcp_sel) && (reg & hvdcp_sel_9v)) - return chip->parallel.min_9v_current_thr_ma; - return chip->parallel.min_current_thr_ma; -} - -static bool is_hvdcp_present(struct smbchg_chip *chip) -{ - int rc; - u8 reg, hvdcp_sel; - - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + USBIN_HVDCP_STS, 1); - if (rc < 0) { - pr_err("Couldn't read hvdcp status rc = %d\n", rc); - return false; - } - - pr_smb(PR_STATUS, "HVDCP_STS = 0x%02x\n", reg); - /* - * If a valid HVDCP is detected, notify it to the usb_psy only - * if USB is still present. - */ - if (chip->schg_version == QPNP_SCHG_LITE) - hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT; - else - hvdcp_sel = USBIN_HVDCP_SEL_BIT; - - if ((reg & hvdcp_sel) && is_usb_present(chip)) - return true; - - return false; -} - -#define FCC_CFG 0xF2 -#define FCC_500MA_VAL 0x4 -#define FCC_MASK SMB_MASK(4, 0) -static int smbchg_set_fastchg_current_raw(struct smbchg_chip *chip, - int current_ma) -{ - int i, rc; - u8 cur_val; - - /* the fcc enumerations are the same as the usb currents */ - i = find_smaller_in_array(chip->tables.usb_ilim_ma_table, - current_ma, chip->tables.usb_ilim_ma_len); - if (i < 0) { - dev_err(chip->dev, - "Cannot find %dma current_table using %d\n", - current_ma, CURRENT_500_MA); - - rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG, - FCC_MASK, - FCC_500MA_VAL); - if (rc < 0) - dev_err(chip->dev, "Couldn't set %dmA rc=%d\n", - CURRENT_500_MA, rc); - else - chip->fastchg_current_ma = 500; - return rc; - } - - if (chip->tables.usb_ilim_ma_table[i] == chip->fastchg_current_ma) { - pr_smb(PR_STATUS, "skipping fastchg current request: %d\n", - chip->fastchg_current_ma); - return 0; - } - - cur_val = i & FCC_MASK; - rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG, - FCC_MASK, cur_val); - if (rc < 0) { - dev_err(chip->dev, "cannot write to fcc cfg rc = %d\n", rc); - return rc; - } - pr_smb(PR_STATUS, "fastcharge current requested %d, set to %d\n", - current_ma, chip->tables.usb_ilim_ma_table[cur_val]); - - chip->fastchg_current_ma = chip->tables.usb_ilim_ma_table[cur_val]; - return rc; -} - -#define ICL_STS_1_REG 0x7 -#define ICL_STS_2_REG 0x9 -#define ICL_STS_MASK 0x1F -#define AICL_SUSP_BIT BIT(6) -#define AICL_STS_BIT BIT(5) -#define USBIN_SUSPEND_STS_BIT BIT(3) -#define USBIN_ACTIVE_PWR_SRC_BIT BIT(1) -#define DCIN_ACTIVE_PWR_SRC_BIT BIT(0) -#define PARALLEL_REENABLE_TIMER_MS 1000 -#define PARALLEL_CHG_THRESHOLD_CURRENT 1800 -static bool smbchg_is_usbin_active_pwr_src(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + ICL_STS_2_REG, 1); - if (rc < 0) { - dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", rc); - return false; - } - - return !(reg & USBIN_SUSPEND_STS_BIT) - && (reg & USBIN_ACTIVE_PWR_SRC_BIT); -} - -static int smbchg_parallel_usb_charging_en(struct smbchg_chip *chip, bool en) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - - if (!parallel_psy || !chip->parallel_charger_detected) - return 0; - - pval.intval = en; - return power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval); -} - -#define ESR_PULSE_CURRENT_DELTA_MA 200 -static int smbchg_sw_esr_pulse_en(struct smbchg_chip *chip, bool en) -{ - int rc, fg_current_now, icl_ma; - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW, - &fg_current_now); - if (rc) { - pr_smb(PR_STATUS, "bms psy does not support OCV\n"); - return 0; - } - - icl_ma = max(chip->iterm_ma + ESR_PULSE_CURRENT_DELTA_MA, - fg_current_now - ESR_PULSE_CURRENT_DELTA_MA); - rc = vote(chip->fcc_votable, ESR_PULSE_FCC_VOTER, en, icl_ma); - if (rc < 0) { - pr_err("Couldn't Vote FCC en = %d rc = %d\n", en, rc); - return rc; - } - rc = smbchg_parallel_usb_charging_en(chip, !en); - return rc; -} - -#define USB_AICL_CFG 0xF3 -#define AICL_EN_BIT BIT(2) -static void smbchg_rerun_aicl(struct smbchg_chip *chip) -{ - pr_smb(PR_STATUS, "Rerunning AICL...\n"); - smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, 0); - /* Add a delay so that AICL successfully clears */ - msleep(50); - smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, AICL_EN_BIT); -} - -static void taper_irq_en(struct smbchg_chip *chip, bool en) -{ - mutex_lock(&chip->taper_irq_lock); - if (en != chip->taper_irq_enabled) { - if (en) { - enable_irq(chip->taper_irq); - enable_irq_wake(chip->taper_irq); - } else { - disable_irq_wake(chip->taper_irq); - disable_irq_nosync(chip->taper_irq); - } - chip->taper_irq_enabled = en; - } - mutex_unlock(&chip->taper_irq_lock); -} - -static int smbchg_get_aicl_level_ma(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + ICL_STS_1_REG, 1); - if (rc < 0) { - dev_err(chip->dev, "Could not read usb icl sts 1: %d\n", rc); - return 0; - } - if (reg & AICL_SUSP_BIT) { - pr_warn("AICL suspended: %02x\n", reg); - return 0; - } - reg &= ICL_STS_MASK; - if (reg >= chip->tables.usb_ilim_ma_len) { - pr_warn("invalid AICL value: %02x\n", reg); - return 0; - } - return chip->tables.usb_ilim_ma_table[reg]; -} - -static void smbchg_parallel_usb_disable(struct smbchg_chip *chip) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - int fcc_ma, usb_icl_ma; - - if (!parallel_psy || !chip->parallel_charger_detected) - return; - pr_smb(PR_STATUS, "disabling parallel charger\n"); - chip->parallel.last_disabled = ktime_get_boottime(); - taper_irq_en(chip, false); - chip->parallel.initial_aicl_ma = 0; - chip->parallel.current_max_ma = 0; - pval.intval = SUSPEND_CURRENT_MA * 1000; - power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX, - &pval); - - pval.intval = false; - power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT, - &pval); - - fcc_ma = get_effective_result_locked(chip->fcc_votable); - usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable); - if (fcc_ma < 0) - pr_err("no voters for fcc, skip it\n"); - else - smbchg_set_fastchg_current_raw(chip, fcc_ma); - - if (usb_icl_ma < 0) - pr_err("no voters for usb_icl, skip it\n"); - else - smbchg_set_usb_current_max(chip, usb_icl_ma); - - smbchg_rerun_aicl(chip); -} - -#define PARALLEL_TAPER_MAX_TRIES 3 -#define PARALLEL_FCC_PERCENT_REDUCTION 75 -#define MINIMUM_PARALLEL_FCC_MA 500 -#define CHG_ERROR_BIT BIT(0) -#define BAT_TAPER_MODE_BIT BIT(6) -static void smbchg_parallel_usb_taper(struct smbchg_chip *chip) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - int parallel_fcc_ma, tries = 0; - u8 reg = 0; - - if (!parallel_psy || !chip->parallel_charger_detected) - return; - - smbchg_stay_awake(chip, PM_PARALLEL_TAPER); -try_again: - mutex_lock(&chip->parallel.lock); - if (chip->parallel.current_max_ma == 0) { - pr_smb(PR_STATUS, "Not parallel charging, skipping\n"); - goto done; - } - power_supply_get_property(parallel_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - tries += 1; - parallel_fcc_ma = pval.intval / 1000; - pr_smb(PR_STATUS, "try #%d parallel charger fcc = %d\n", - tries, parallel_fcc_ma); - if (parallel_fcc_ma < MINIMUM_PARALLEL_FCC_MA - || tries > PARALLEL_TAPER_MAX_TRIES) { - smbchg_parallel_usb_disable(chip); - goto done; - } - pval.intval = ((parallel_fcc_ma - * PARALLEL_FCC_PERCENT_REDUCTION) / 100); - pr_smb(PR_STATUS, "reducing FCC of parallel charger to %d\n", - pval.intval); - /* Change it to uA */ - pval.intval *= 1000; - power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - /* - * sleep here for 100 ms in order to make sure the charger has a chance - * to go back into constant current charging - */ - mutex_unlock(&chip->parallel.lock); - msleep(100); - - mutex_lock(&chip->parallel.lock); - if (chip->parallel.current_max_ma == 0) { - pr_smb(PR_STATUS, "Not parallel charging, skipping\n"); - goto done; - } - smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1); - if (reg & BAT_TAPER_MODE_BIT) { - mutex_unlock(&chip->parallel.lock); - goto try_again; - } - taper_irq_en(chip, true); -done: - mutex_unlock(&chip->parallel.lock); - smbchg_relax(chip, PM_PARALLEL_TAPER); -} - -static void smbchg_parallel_usb_enable(struct smbchg_chip *chip, - int total_current_ma) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - int new_parallel_cl_ma, set_parallel_cl_ma, new_pmi_cl_ma, rc; - int current_table_index, target_icl_ma; - int fcc_ma, main_fastchg_current_ma; - int target_parallel_fcc_ma, supplied_parallel_fcc_ma; - int parallel_chg_fcc_percent; - - if (!parallel_psy || !chip->parallel_charger_detected) - return; - - pr_smb(PR_STATUS, "Attempting to enable parallel charger\n"); - pval.intval = chip->vfloat_mv + 50; - rc = power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set Vflt on parallel psy rc: %d\n", rc); - return; - } - /* Set USB ICL */ - target_icl_ma = get_effective_result_locked(chip->usb_icl_votable); - if (target_icl_ma < 0) { - pr_err("no voters for usb_icl, skip it\n"); - return; - } - new_parallel_cl_ma = total_current_ma - * (100 - smbchg_main_chg_icl_percent) / 100; - taper_irq_en(chip, true); - - pval.intval = true; - power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT, - &pval); - - pval.intval = new_parallel_cl_ma * 1000; - power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX, - &pval); - - /* read back the real amount of current we are getting */ - power_supply_get_property(parallel_psy, - POWER_SUPPLY_PROP_CURRENT_MAX, &pval); - set_parallel_cl_ma = pval.intval / 1000; - chip->parallel.current_max_ma = new_parallel_cl_ma; - pr_smb(PR_MISC, "Requested ICL = %d from parallel, got %d\n", - new_parallel_cl_ma, set_parallel_cl_ma); - new_pmi_cl_ma = max(0, target_icl_ma - set_parallel_cl_ma); - pr_smb(PR_STATUS, "New Total USB current = %d[%d, %d]\n", - total_current_ma, new_pmi_cl_ma, - set_parallel_cl_ma); - smbchg_set_usb_current_max(chip, new_pmi_cl_ma); - - /* begin splitting the fast charge current */ - fcc_ma = get_effective_result_locked(chip->fcc_votable); - if (fcc_ma < 0) { - pr_err("no voters for fcc, skip it\n"); - return; - } - parallel_chg_fcc_percent = 100 - smbchg_main_chg_fcc_percent; - target_parallel_fcc_ma = (fcc_ma * parallel_chg_fcc_percent) / 100; - pval.intval = target_parallel_fcc_ma * 1000; - power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - /* check how much actual current is supplied by the parallel charger */ - power_supply_get_property(parallel_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - supplied_parallel_fcc_ma = pval.intval / 1000; - pr_smb(PR_MISC, "Requested FCC = %d from parallel, got %d\n", - target_parallel_fcc_ma, supplied_parallel_fcc_ma); - - /* then for the main charger, use the left over FCC */ - current_table_index = find_smaller_in_array( - chip->tables.usb_ilim_ma_table, - fcc_ma - supplied_parallel_fcc_ma, - chip->tables.usb_ilim_ma_len); - main_fastchg_current_ma = - chip->tables.usb_ilim_ma_table[current_table_index]; - smbchg_set_fastchg_current_raw(chip, main_fastchg_current_ma); - pr_smb(PR_STATUS, "FCC = %d[%d, %d]\n", fcc_ma, main_fastchg_current_ma, - supplied_parallel_fcc_ma); - - chip->parallel.enabled_once = true; - - return; -} - -static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip, - int *ret_total_current_ma) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - int min_current_thr_ma, rc, type; - int total_current_ma, current_limit_ma, parallel_cl_ma; - ktime_t kt_since_last_disable; - u8 reg; - int fcc_ma = get_effective_result_locked(chip->fcc_votable); - const char *fcc_voter - = get_effective_client_locked(chip->fcc_votable); - int usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable); - - if (!parallel_psy || !smbchg_parallel_en - || !chip->parallel_charger_detected) { - pr_smb(PR_STATUS, "Parallel charging not enabled\n"); - return false; - } - - if (fcc_ma < 0) { - pr_err("no voters for fcc! Can't enable parallel\n"); - return false; - } - if (usb_icl_ma < 0) { - pr_err("no voters for usb_icl, Can't enable parallel\n"); - return false; - } - - kt_since_last_disable = ktime_sub(ktime_get_boottime(), - chip->parallel.last_disabled); - if (chip->parallel.current_max_ma == 0 - && chip->parallel.enabled_once - && ktime_to_ms(kt_since_last_disable) - < PARALLEL_REENABLE_TIMER_MS) { - pr_smb(PR_STATUS, "Only been %lld since disable, skipping\n", - ktime_to_ms(kt_since_last_disable)); - return false; - } - - /* - * If the battery is not present, try not to change parallel charging - * from OFF to ON or from ON to OFF, as it could cause the device to - * brown out in the instant that the USB settings are changed. - * - * Only allow parallel charging check to report false (thereby turnin - * off parallel charging) if the battery is still there, or if parallel - * charging is disabled in the first place. - */ - if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST - && (get_prop_batt_present(chip) - || chip->parallel.current_max_ma == 0)) { - pr_smb(PR_STATUS, "Not in fast charge, skipping\n"); - return false; - } - - if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) { - pr_smb(PR_STATUS, "JEITA active, skipping\n"); - return false; - } - - rc = smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc); - return false; - } - - type = get_type(reg); - if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB_CDP) { - pr_smb(PR_STATUS, "CDP adapter, skipping\n"); - return false; - } - - if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB) { - pr_smb(PR_STATUS, "SDP adapter, skipping\n"); - return false; - } - - /* - * If USBIN is suspended or not the active power source, do not enable - * parallel charging. The device may be charging off of DCIN. - */ - if (!smbchg_is_usbin_active_pwr_src(chip)) { - pr_smb(PR_STATUS, "USB not active power source: %02x\n", reg); - return false; - } - - min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip); - if (min_current_thr_ma <= 0) { - pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n", - min_current_thr_ma); - return false; - } - - if (usb_icl_ma < min_current_thr_ma) { - pr_smb(PR_STATUS, "Weak USB chg skip enable: %d < %d\n", - usb_icl_ma, min_current_thr_ma); - return false; - } - - if (!fcc_voter) - return false; - /* - * Suspend the parallel charger if the charging current is < 1800 mA - * and is not because of an ESR pulse. - */ - if ((strcmp(fcc_voter, ESR_PULSE_FCC_VOTER) == 0) - && fcc_ma < PARALLEL_CHG_THRESHOLD_CURRENT) { - pr_smb(PR_STATUS, "FCC %d lower than %d\n", - fcc_ma, - PARALLEL_CHG_THRESHOLD_CURRENT); - return false; - } - - current_limit_ma = smbchg_get_aicl_level_ma(chip); - if (current_limit_ma <= 0) - return false; - - if (chip->parallel.initial_aicl_ma == 0) { - if (current_limit_ma < min_current_thr_ma) { - pr_smb(PR_STATUS, "Initial AICL very low: %d < %d\n", - current_limit_ma, min_current_thr_ma); - return false; - } - chip->parallel.initial_aicl_ma = current_limit_ma; - } - - power_supply_get_property(parallel_psy, - POWER_SUPPLY_PROP_CURRENT_MAX, &pval); - parallel_cl_ma = pval.intval / 1000; - /* - * Read back the real amount of current we are getting - * Treat 2mA as 0 because that is the suspend current setting - */ - if (parallel_cl_ma <= SUSPEND_CURRENT_MA) - parallel_cl_ma = 0; - - /* - * Set the parallel charge path's input current limit (ICL) - * to the total current / 2 - */ - total_current_ma = min(current_limit_ma + parallel_cl_ma, usb_icl_ma); - - if (total_current_ma < chip->parallel.initial_aicl_ma - - chip->parallel.allowed_lowering_ma) { - pr_smb(PR_STATUS, - "Total current reduced a lot: %d (%d + %d) < %d - %d\n", - total_current_ma, - current_limit_ma, parallel_cl_ma, - chip->parallel.initial_aicl_ma, - chip->parallel.allowed_lowering_ma); - return false; - } - - *ret_total_current_ma = total_current_ma; - return true; -} - -#define PARALLEL_CHARGER_EN_DELAY_MS 500 -static void smbchg_parallel_usb_en_work(struct work_struct *work) -{ - struct smbchg_chip *chip = container_of(work, - struct smbchg_chip, - parallel_en_work.work); - int previous_aicl_ma, total_current_ma, aicl_ma; - bool in_progress; - - /* do a check to see if the aicl is stable */ - previous_aicl_ma = smbchg_get_aicl_level_ma(chip); - msleep(PARALLEL_CHARGER_EN_DELAY_MS); - aicl_ma = smbchg_get_aicl_level_ma(chip); - if (previous_aicl_ma == aicl_ma) { - pr_smb(PR_STATUS, "AICL at %d\n", aicl_ma); - } else { - pr_smb(PR_STATUS, - "AICL changed [%d -> %d], recheck %d ms\n", - previous_aicl_ma, aicl_ma, - PARALLEL_CHARGER_EN_DELAY_MS); - goto recheck; - } - - mutex_lock(&chip->parallel.lock); - in_progress = (chip->parallel.current_max_ma != 0); - if (smbchg_is_parallel_usb_ok(chip, &total_current_ma)) { - smbchg_parallel_usb_enable(chip, total_current_ma); - } else { - if (in_progress) { - pr_smb(PR_STATUS, "parallel charging unavailable\n"); - smbchg_parallel_usb_disable(chip); - } - } - mutex_unlock(&chip->parallel.lock); - smbchg_relax(chip, PM_PARALLEL_CHECK); - return; - -recheck: - schedule_delayed_work(&chip->parallel_en_work, 0); -} - -static void smbchg_parallel_usb_check_ok(struct smbchg_chip *chip) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - - if (!parallel_psy || !chip->parallel_charger_detected) - return; - - smbchg_stay_awake(chip, PM_PARALLEL_CHECK); - schedule_delayed_work(&chip->parallel_en_work, 0); -} - -static int charging_suspend_vote_cb(struct votable *votable, void *data, - int suspend, - const char *client) -{ - int rc; - struct smbchg_chip *chip = data; - - if (suspend < 0) { - pr_err("No voters\n"); - suspend = false; - } - - rc = smbchg_charging_en(chip, !suspend); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't configure batt chg: 0x%x rc = %d\n", - !suspend, rc); - } - - return rc; -} - -static int usb_suspend_vote_cb(struct votable *votable, - void *data, - int suspend, - const char *client) -{ - int rc; - struct smbchg_chip *chip = data; - - if (suspend < 0) { - pr_err("No voters\n"); - suspend = false; - } - - rc = smbchg_usb_suspend(chip, suspend); - if (rc < 0) - return rc; - - if ((strcmp(client, THERMAL_EN_VOTER) == 0) - || (strcmp(client, POWER_SUPPLY_EN_VOTER) == 0) - || (strcmp(client, USER_EN_VOTER) == 0) - || (strcmp(client, FAKE_BATTERY_EN_VOTER) == 0)) - smbchg_parallel_usb_check_ok(chip); - - return rc; -} - -static int dc_suspend_vote_cb(struct votable *votable, - void *data, - int suspend, - const char *client) -{ - int rc; - struct smbchg_chip *chip = data; - - if (suspend < 0) { - pr_err("No voters\n"); - suspend = false; - } - - rc = smbchg_dc_suspend(chip, suspend); - if (rc < 0) - return rc; - - if (chip->dc_psy_type != -EINVAL && chip->dc_psy) - power_supply_changed(chip->dc_psy); - - return rc; -} - -static int set_fastchg_current_vote_cb(struct votable *votable, - void *data, - int fcc_ma, - const char *client) -{ - struct smbchg_chip *chip = data; - int rc; - - if (fcc_ma < 0) { - pr_err("No voters\n"); - return 0; - } - - if (chip->parallel.current_max_ma == 0) { - rc = smbchg_set_fastchg_current_raw(chip, fcc_ma); - if (rc < 0) { - pr_err("Can't set FCC fcc_ma=%d rc=%d\n", fcc_ma, rc); - return rc; - } - } - /* - * check if parallel charging can be enabled, and if enabled, - * distribute the fcc - */ - smbchg_parallel_usb_check_ok(chip); - return 0; -} - -static int smbchg_set_fastchg_current_user(struct smbchg_chip *chip, - int current_ma) -{ - int rc = 0; - - pr_smb(PR_STATUS, "User setting FCC to %d\n", current_ma); - - rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, current_ma); - if (rc < 0) - pr_err("Couldn't vote en rc %d\n", rc); - return rc; -} - -static struct ilim_entry *smbchg_wipower_find_entry(struct smbchg_chip *chip, - struct ilim_map *map, int uv) -{ - int i; - struct ilim_entry *ret = &(chip->wipower_default.entries[0]); - - for (i = 0; i < map->num; i++) { - if (is_between(map->entries[i].vmin_uv, map->entries[i].vmax_uv, - uv)) - ret = &map->entries[i]; - } - return ret; -} - -#define ZIN_ICL_PT 0xFC -#define ZIN_ICL_LV 0xFD -#define ZIN_ICL_HV 0xFE -#define ZIN_ICL_MASK SMB_MASK(4, 0) -static int smbchg_dcin_ilim_config(struct smbchg_chip *chip, int offset, int ma) -{ - int i, rc; - - i = find_smaller_in_array(chip->tables.dc_ilim_ma_table, - ma, chip->tables.dc_ilim_ma_len); - - if (i < 0) - i = 0; - - rc = smbchg_sec_masked_write(chip, chip->bat_if_base + offset, - ZIN_ICL_MASK, i); - if (rc) - dev_err(chip->dev, "Couldn't write bat if offset %d value = %d rc = %d\n", - offset, i, rc); - return rc; -} - -static int smbchg_wipower_ilim_config(struct smbchg_chip *chip, - struct ilim_entry *ilim) -{ - int rc = 0; - - if (chip->current_ilim.icl_pt_ma != ilim->icl_pt_ma) { - rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_PT, ilim->icl_pt_ma); - if (rc) - dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n", - ZIN_ICL_PT, ilim->icl_pt_ma, rc); - else - chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma; - } - - if (chip->current_ilim.icl_lv_ma != ilim->icl_lv_ma) { - rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_LV, ilim->icl_lv_ma); - if (rc) - dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n", - ZIN_ICL_LV, ilim->icl_lv_ma, rc); - else - chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma; - } - - if (chip->current_ilim.icl_hv_ma != ilim->icl_hv_ma) { - rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_HV, ilim->icl_hv_ma); - if (rc) - dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n", - ZIN_ICL_HV, ilim->icl_hv_ma, rc); - else - chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma; - } - return rc; -} - -static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx); -static int smbchg_wipower_dcin_btm_configure(struct smbchg_chip *chip, - struct ilim_entry *ilim) -{ - int rc; - - if (ilim->vmin_uv == chip->current_ilim.vmin_uv - && ilim->vmax_uv == chip->current_ilim.vmax_uv) - return 0; - - chip->param.channel = DCIN; - chip->param.btm_ctx = chip; - if (wipower_dcin_interval < ADC_MEAS1_INTERVAL_0MS) - wipower_dcin_interval = ADC_MEAS1_INTERVAL_0MS; - - if (wipower_dcin_interval > ADC_MEAS1_INTERVAL_16S) - wipower_dcin_interval = ADC_MEAS1_INTERVAL_16S; - - chip->param.timer_interval = wipower_dcin_interval; - chip->param.threshold_notification = &btm_notify_dcin; - chip->param.high_thr = ilim->vmax_uv + wipower_dcin_hyst_uv; - chip->param.low_thr = ilim->vmin_uv - wipower_dcin_hyst_uv; - chip->param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; - rc = qpnp_vadc_channel_monitor(chip->vadc_dev, &chip->param); - if (rc) { - dev_err(chip->dev, "Couldn't configure btm for dcin rc = %d\n", - rc); - } else { - chip->current_ilim.vmin_uv = ilim->vmin_uv; - chip->current_ilim.vmax_uv = ilim->vmax_uv; - pr_smb(PR_STATUS, "btm ilim = (%duV %duV %dmA %dmA %dmA)\n", - ilim->vmin_uv, ilim->vmax_uv, - ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma); - } - return rc; -} - -static int smbchg_wipower_icl_configure(struct smbchg_chip *chip, - int dcin_uv, bool div2) -{ - int rc = 0; - struct ilim_map *map = div2 ? &chip->wipower_div2 : &chip->wipower_pt; - struct ilim_entry *ilim = smbchg_wipower_find_entry(chip, map, dcin_uv); - - rc = smbchg_wipower_ilim_config(chip, ilim); - if (rc) { - dev_err(chip->dev, "failed to config ilim rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n", - rc, dcin_uv, div2, - ilim->vmin_uv, ilim->vmax_uv, - ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma); - return rc; - } - - rc = smbchg_wipower_dcin_btm_configure(chip, ilim); - if (rc) { - dev_err(chip->dev, "failed to config btm rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n", - rc, dcin_uv, div2, - ilim->vmin_uv, ilim->vmax_uv, - ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma); - return rc; - } - chip->wipower_configured = true; - return 0; -} - -static void smbchg_wipower_icl_deconfigure(struct smbchg_chip *chip) -{ - int rc; - struct ilim_entry *ilim = &(chip->wipower_default.entries[0]); - - if (!chip->wipower_configured) - return; - - rc = smbchg_wipower_ilim_config(chip, ilim); - if (rc) - dev_err(chip->dev, "Couldn't config default ilim rc = %d\n", - rc); - - rc = qpnp_vadc_end_channel_monitor(chip->vadc_dev); - if (rc) - dev_err(chip->dev, "Couldn't de configure btm for dcin rc = %d\n", - rc); - - chip->wipower_configured = false; - chip->current_ilim.vmin_uv = 0; - chip->current_ilim.vmax_uv = 0; - chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma; - chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma; - chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma; - pr_smb(PR_WIPOWER, "De config btm\n"); -} - -#define FV_STS 0x0C -#define DIV2_ACTIVE BIT(7) -static void __smbchg_wipower_check(struct smbchg_chip *chip) -{ - int chg_type; - bool usb_present, dc_present; - int rc; - int dcin_uv; - bool div2; - struct qpnp_vadc_result adc_result; - u8 reg; - - if (!wipower_dyn_icl_en) { - smbchg_wipower_icl_deconfigure(chip); - return; - } - - chg_type = get_prop_charge_type(chip); - usb_present = is_usb_present(chip); - dc_present = is_dc_present(chip); - if (chg_type != POWER_SUPPLY_CHARGE_TYPE_NONE - && !usb_present - && dc_present - && chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) { - rc = qpnp_vadc_read(chip->vadc_dev, DCIN, &adc_result); - if (rc) { - pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc); - return; - } - dcin_uv = adc_result.physical; - - /* check div_by_2 */ - rc = smbchg_read(chip, ®, chip->chgr_base + FV_STS, 1); - if (rc) { - pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc); - return; - } - div2 = !!(reg & DIV2_ACTIVE); - - pr_smb(PR_WIPOWER, - "config ICL chg_type = %d usb = %d dc = %d dcin_uv(adc_code) = %d (0x%x) div2 = %d\n", - chg_type, usb_present, dc_present, dcin_uv, - adc_result.adc_code, div2); - smbchg_wipower_icl_configure(chip, dcin_uv, div2); - } else { - pr_smb(PR_WIPOWER, - "deconfig ICL chg_type = %d usb = %d dc = %d\n", - chg_type, usb_present, dc_present); - smbchg_wipower_icl_deconfigure(chip); - } -} - -static void smbchg_wipower_check(struct smbchg_chip *chip) -{ - if (!chip->wipower_dyn_icl_avail) - return; - - mutex_lock(&chip->wipower_config); - __smbchg_wipower_check(chip); - mutex_unlock(&chip->wipower_config); -} - -static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx) -{ - struct smbchg_chip *chip = ctx; - - mutex_lock(&chip->wipower_config); - pr_smb(PR_WIPOWER, "%s state\n", - state == ADC_TM_LOW_STATE ? "low" : "high"); - chip->current_ilim.vmin_uv = 0; - chip->current_ilim.vmax_uv = 0; - __smbchg_wipower_check(chip); - mutex_unlock(&chip->wipower_config); -} - -static int force_dcin_icl_write(void *data, u64 val) -{ - struct smbchg_chip *chip = data; - - smbchg_wipower_check(chip); - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(force_dcin_icl_ops, NULL, - force_dcin_icl_write, "0x%02llx\n"); - -/* - * set the dc charge path's maximum allowed current draw - * that may be limited by the system's thermal level - */ -static int set_dc_current_limit_vote_cb(struct votable *votable, - void *data, - int icl_ma, - const char *client) -{ - struct smbchg_chip *chip = data; - - if (icl_ma < 0) { - pr_err("No voters\n"); - return 0; - } - - return smbchg_set_dc_current_max(chip, icl_ma); -} - -/* - * set the usb charge path's maximum allowed current draw - * that may be limited by the system's thermal level - */ -static int set_usb_current_limit_vote_cb(struct votable *votable, - void *data, - int icl_ma, - const char *client) -{ - struct smbchg_chip *chip = data; - int rc, aicl_ma; - const char *effective_id; - - if (icl_ma < 0) { - pr_err("No voters\n"); - return 0; - } - effective_id = get_effective_client_locked(chip->usb_icl_votable); - - if (!effective_id) - return 0; - - /* disable parallel charging if HVDCP is voting for 300mA */ - if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0) - smbchg_parallel_usb_disable(chip); - - if (chip->parallel.current_max_ma == 0) { - rc = smbchg_set_usb_current_max(chip, icl_ma); - if (rc) { - pr_err("Failed to set usb current max: %d\n", rc); - return rc; - } - } - - /* skip the aicl rerun if hvdcp icl voter is active */ - if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0) - return 0; - - aicl_ma = smbchg_get_aicl_level_ma(chip); - if (icl_ma > aicl_ma) - smbchg_rerun_aicl(chip); - smbchg_parallel_usb_check_ok(chip); - return 0; -} - -static int smbchg_system_temp_level_set(struct smbchg_chip *chip, - int lvl_sel) -{ - int rc = 0; - int prev_therm_lvl; - int thermal_icl_ma; - - if (!chip->thermal_mitigation) { - dev_err(chip->dev, "Thermal mitigation not supported\n"); - return -EINVAL; - } - - if (lvl_sel < 0) { - dev_err(chip->dev, "Unsupported level selected %d\n", lvl_sel); - return -EINVAL; - } - - if (lvl_sel >= chip->thermal_levels) { - dev_err(chip->dev, "Unsupported level selected %d forcing %d\n", - lvl_sel, chip->thermal_levels - 1); - lvl_sel = chip->thermal_levels - 1; - } - - if (lvl_sel == chip->therm_lvl_sel) - return 0; - - mutex_lock(&chip->therm_lvl_lock); - prev_therm_lvl = chip->therm_lvl_sel; - chip->therm_lvl_sel = lvl_sel; - if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) { - /* - * Disable charging if highest value selected by - * setting the DC and USB path in suspend - */ - rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, true, 0); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set dc suspend rc %d\n", rc); - goto out; - } - rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER, true, 0); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set usb suspend rc %d\n", rc); - goto out; - } - goto out; - } - - if (chip->therm_lvl_sel == 0) { - rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't disable USB thermal ICL vote rc=%d\n", - rc); - - rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't disable DC thermal ICL vote rc=%d\n", - rc); - } else { - thermal_icl_ma = - (int)chip->thermal_mitigation[chip->therm_lvl_sel]; - rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, true, - thermal_icl_ma); - if (rc < 0) - pr_err("Couldn't vote for USB thermal ICL rc=%d\n", rc); - - rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, true, - thermal_icl_ma); - if (rc < 0) - pr_err("Couldn't vote for DC thermal ICL rc=%d\n", rc); - } - - if (prev_therm_lvl == chip->thermal_levels - 1) { - /* - * If previously highest value was selected charging must have - * been disabed. Enable charging by taking the DC and USB path - * out of suspend. - */ - rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, false, 0); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set dc suspend rc %d\n", rc); - goto out; - } - rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER, - false, 0); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set usb suspend rc %d\n", rc); - goto out; - } - } -out: - mutex_unlock(&chip->therm_lvl_lock); - return rc; -} - -static int smbchg_ibat_ocp_threshold_ua = 4500000; -module_param(smbchg_ibat_ocp_threshold_ua, int, 0644); - -#define UCONV 1000000LL -#define MCONV 1000LL -#define FLASH_V_THRESHOLD 3000000 -#define FLASH_VDIP_MARGIN 100000 -#define VPH_FLASH_VDIP (FLASH_V_THRESHOLD + FLASH_VDIP_MARGIN) -#define BUCK_EFFICIENCY 800LL -static int smbchg_calc_max_flash_current(struct smbchg_chip *chip) -{ - int ocv_uv, esr_uohm, rbatt_uohm, ibat_now, rc; - int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw; - int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv; - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv); - if (rc) { - pr_smb(PR_STATUS, "bms psy does not support OCV\n"); - return 0; - } - - rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_RESISTANCE, - &esr_uohm); - if (rc) { - pr_smb(PR_STATUS, "bms psy does not support resistance\n"); - return 0; - } - - rc = msm_bcl_read(BCL_PARAM_CURRENT, &ibat_now); - if (rc) { - pr_smb(PR_STATUS, "BCL current read failed: %d\n", rc); - return 0; - } - - rbatt_uohm = esr_uohm + chip->rpara_uohm + chip->rslow_uohm; - /* - * Calculate the maximum current that can pulled out of the battery - * before the battery voltage dips below a safe threshold. - */ - ibat_safe_ua = div_s64((ocv_uv - VPH_FLASH_VDIP) * UCONV, - rbatt_uohm); - - if (ibat_safe_ua <= smbchg_ibat_ocp_threshold_ua) { - /* - * If the calculated current is below the OCP threshold, then - * use it as the possible flash current. - */ - ibat_flash_ua = ibat_safe_ua - ibat_now; - vph_flash_uv = VPH_FLASH_VDIP; - } else { - /* - * If the calculated current is above the OCP threshold, then - * use the ocp threshold instead. - * - * Any higher current will be tripping the battery OCP. - */ - ibat_flash_ua = smbchg_ibat_ocp_threshold_ua - ibat_now; - vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm - * smbchg_ibat_ocp_threshold_ua, UCONV); - } - /* Calculate the input voltage of the flash module. */ - vin_flash_uv = max((chip->vled_max_uv + 500000LL), - div64_s64((vph_flash_uv * 1200), 1000)); - /* Calculate the available power for the flash module. */ - avail_flash_power_fw = BUCK_EFFICIENCY * vph_flash_uv * ibat_flash_ua; - /* - * Calculate the available amount of current the flash module can draw - * before collapsing the battery. (available power/ flash input voltage) - */ - avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV); - pr_smb(PR_MISC, - "avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d\n", - avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm); - return (int)avail_flash_ua; -} - -#define FCC_CMP_CFG 0xF3 -#define FCC_COMP_MASK SMB_MASK(1, 0) -static int smbchg_fastchg_current_comp_set(struct smbchg_chip *chip, - int comp_current) -{ - int rc; - u8 i; - - for (i = 0; i < chip->tables.fcc_comp_len; i++) - if (comp_current == chip->tables.fcc_comp_table[i]) - break; - - if (i >= chip->tables.fcc_comp_len) - return -EINVAL; - - rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CMP_CFG, - FCC_COMP_MASK, i); - - if (rc) - dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n", - rc); - - return rc; -} - -#define CFG_TCC_REG 0xF9 -#define CHG_ITERM_MASK SMB_MASK(2, 0) -static int smbchg_iterm_set(struct smbchg_chip *chip, int iterm_ma) -{ - int rc; - u8 reg; - - reg = find_closest_in_array( - chip->tables.iterm_ma_table, - chip->tables.iterm_ma_len, - iterm_ma); - - rc = smbchg_sec_masked_write(chip, - chip->chgr_base + CFG_TCC_REG, - CHG_ITERM_MASK, reg); - if (rc) { - dev_err(chip->dev, - "Couldn't set iterm rc = %d\n", rc); - return rc; - } - pr_smb(PR_STATUS, "set tcc (%d) to 0x%02x\n", - iterm_ma, reg); - chip->iterm_ma = iterm_ma; - - return 0; -} - -#define FV_CMP_CFG 0xF5 -#define FV_COMP_MASK SMB_MASK(5, 0) -static int smbchg_float_voltage_comp_set(struct smbchg_chip *chip, int code) -{ - int rc; - u8 val; - - val = code & FV_COMP_MASK; - rc = smbchg_sec_masked_write(chip, chip->chgr_base + FV_CMP_CFG, - FV_COMP_MASK, val); - - if (rc) - dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n", - rc); - - return rc; -} - -#define VFLOAT_CFG_REG 0xF4 -#define MIN_FLOAT_MV 3600 -#define MAX_FLOAT_MV 4500 -#define VFLOAT_MASK SMB_MASK(5, 0) - -#define MID_RANGE_FLOAT_MV_MIN 3600 -#define MID_RANGE_FLOAT_MIN_VAL 0x05 -#define MID_RANGE_FLOAT_STEP_MV 20 - -#define HIGH_RANGE_FLOAT_MIN_MV 4340 -#define HIGH_RANGE_FLOAT_MIN_VAL 0x2A -#define HIGH_RANGE_FLOAT_STEP_MV 10 - -#define VHIGH_RANGE_FLOAT_MIN_MV 4360 -#define VHIGH_RANGE_FLOAT_MIN_VAL 0x2C -#define VHIGH_RANGE_FLOAT_STEP_MV 20 -static int smbchg_float_voltage_set(struct smbchg_chip *chip, int vfloat_mv) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval prop; - int rc, delta; - u8 temp; - - if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) { - dev_err(chip->dev, "bad float voltage mv =%d asked to set\n", - vfloat_mv); - return -EINVAL; - } - - if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) { - /* mid range */ - delta = vfloat_mv - MID_RANGE_FLOAT_MV_MIN; - temp = MID_RANGE_FLOAT_MIN_VAL + delta - / MID_RANGE_FLOAT_STEP_MV; - vfloat_mv -= delta % MID_RANGE_FLOAT_STEP_MV; - } else if (vfloat_mv <= VHIGH_RANGE_FLOAT_MIN_MV) { - /* high range */ - delta = vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV; - temp = HIGH_RANGE_FLOAT_MIN_VAL + delta - / HIGH_RANGE_FLOAT_STEP_MV; - vfloat_mv -= delta % HIGH_RANGE_FLOAT_STEP_MV; - } else { - /* very high range */ - delta = vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV; - temp = VHIGH_RANGE_FLOAT_MIN_VAL + delta - / VHIGH_RANGE_FLOAT_STEP_MV; - vfloat_mv -= delta % VHIGH_RANGE_FLOAT_STEP_MV; - } - - if (parallel_psy) { - prop.intval = vfloat_mv + 50; - rc = power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop); - if (rc) - dev_err(chip->dev, "Couldn't set float voltage on parallel psy rc: %d\n", - rc); - } - - rc = smbchg_sec_masked_write(chip, chip->chgr_base + VFLOAT_CFG_REG, - VFLOAT_MASK, temp); - - if (rc) - dev_err(chip->dev, "Couldn't set float voltage rc = %d\n", rc); - else - chip->vfloat_mv = vfloat_mv; - - return rc; -} - -static int smbchg_float_voltage_get(struct smbchg_chip *chip) -{ - return chip->vfloat_mv; -} - -#define SFT_CFG 0xFD -#define SFT_EN_MASK SMB_MASK(5, 4) -#define SFT_TO_MASK SMB_MASK(3, 2) -#define PRECHG_SFT_TO_MASK SMB_MASK(1, 0) -#define SFT_TIMER_DISABLE_BIT BIT(5) -#define PRECHG_SFT_TIMER_DISABLE_BIT BIT(4) -#define SAFETY_TIME_MINUTES_SHIFT 2 -static int smbchg_safety_timer_enable(struct smbchg_chip *chip, bool enable) -{ - int rc; - u8 reg; - - if (enable == chip->safety_timer_en) - return 0; - - if (enable) - reg = 0; - else - reg = SFT_TIMER_DISABLE_BIT | PRECHG_SFT_TIMER_DISABLE_BIT; - - rc = smbchg_sec_masked_write(chip, chip->chgr_base + SFT_CFG, - SFT_EN_MASK, reg); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't %s safety timer rc = %d\n", - enable ? "enable" : "disable", rc); - return rc; - } - chip->safety_timer_en = enable; - return 0; -} - -enum skip_reason { - REASON_OTG_ENABLED = BIT(0), - REASON_FLASH_ENABLED = BIT(1) -}; - -#define BAT_IF_TRIM7_REG 0xF7 -#define CFG_750KHZ_BIT BIT(1) -#define MISC_CFG_NTC_VOUT_REG 0xF3 -#define CFG_NTC_VOUT_FSW_BIT BIT(0) -static int smbchg_switch_buck_frequency(struct smbchg_chip *chip, - bool flash_active) -{ - int rc; - - if (!(chip->wa_flags & SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA)) - return 0; - - if (chip->flash_active == flash_active) { - pr_smb(PR_STATUS, "Fsw not changed, flash_active: %d\n", - flash_active); - return 0; - } - - /* - * As per the systems team recommendation, before the flash fires, - * buck switching frequency(Fsw) needs to be increased to 1MHz. Once the - * flash is disabled, Fsw needs to be set back to 750KHz. - */ - rc = smbchg_sec_masked_write(chip, chip->misc_base + - MISC_CFG_NTC_VOUT_REG, CFG_NTC_VOUT_FSW_BIT, - flash_active ? CFG_NTC_VOUT_FSW_BIT : 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set switching frequency multiplier rc=%d\n", - rc); - return rc; - } - - rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BAT_IF_TRIM7_REG, - CFG_750KHZ_BIT, flash_active ? 0 : CFG_750KHZ_BIT); - if (rc < 0) { - dev_err(chip->dev, "Cannot set switching freq: %d\n", rc); - return rc; - } - - pr_smb(PR_STATUS, "Fsw @ %sHz\n", flash_active ? "1M" : "750K"); - chip->flash_active = flash_active; - return 0; -} - -#define OTG_TRIM6 0xF6 -#define TR_ENB_SKIP_BIT BIT(2) -#define OTG_EN_BIT BIT(0) -static int smbchg_otg_pulse_skip_disable(struct smbchg_chip *chip, - enum skip_reason reason, bool disable) -{ - int rc; - bool disabled; - - disabled = !!chip->otg_pulse_skip_dis; - pr_smb(PR_STATUS, "%s pulse skip, reason %d\n", - disable ? "disabling" : "enabling", reason); - if (disable) - chip->otg_pulse_skip_dis |= reason; - else - chip->otg_pulse_skip_dis &= ~reason; - if (disabled == !!chip->otg_pulse_skip_dis) - return 0; - disabled = !!chip->otg_pulse_skip_dis; - - rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_TRIM6, - TR_ENB_SKIP_BIT, disabled ? TR_ENB_SKIP_BIT : 0); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't %s otg pulse skip rc = %d\n", - disabled ? "disable" : "enable", rc); - return rc; - } - pr_smb(PR_STATUS, "%s pulse skip\n", disabled ? "disabled" : "enabled"); - return 0; -} - -#define LOW_PWR_OPTIONS_REG 0xFF -#define FORCE_TLIM_BIT BIT(4) -static int smbchg_force_tlim_en(struct smbchg_chip *chip, bool enable) -{ - int rc; - - rc = smbchg_sec_masked_write(chip, chip->otg_base + LOW_PWR_OPTIONS_REG, - FORCE_TLIM_BIT, enable ? FORCE_TLIM_BIT : 0); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't %s otg force tlim rc = %d\n", - enable ? "enable" : "disable", rc); - return rc; - } - return rc; -} - -static void smbchg_vfloat_adjust_check(struct smbchg_chip *chip) -{ - if (!chip->use_vfloat_adjustments) - return; - - smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST); - pr_smb(PR_STATUS, "Starting vfloat adjustments\n"); - schedule_delayed_work(&chip->vfloat_adjust_work, 0); -} - -#define FV_STS_REG 0xC -#define AICL_INPUT_STS_BIT BIT(6) -static bool smbchg_is_input_current_limited(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->chgr_base + FV_STS_REG, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read FV_STS rc=%d\n", rc); - return false; - } - - return !!(reg & AICL_INPUT_STS_BIT); -} - -#define SW_ESR_PULSE_MS 1500 -static void smbchg_cc_esr_wa_check(struct smbchg_chip *chip) -{ - int rc, esr_count; - - if (!(chip->wa_flags & SMBCHG_CC_ESR_WA)) - return; - - if (!is_usb_present(chip) && !is_dc_present(chip)) { - pr_smb(PR_STATUS, "No inputs present, skipping\n"); - return; - } - - if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST) { - pr_smb(PR_STATUS, "Not in fast charge, skipping\n"); - return; - } - - if (!smbchg_is_input_current_limited(chip)) { - pr_smb(PR_STATUS, "Not input current limited, skipping\n"); - return; - } - - set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1); - rc = get_property_from_fg(chip, - POWER_SUPPLY_PROP_ESR_COUNT, &esr_count); - if (rc) { - pr_smb(PR_STATUS, - "could not read ESR counter rc = %d\n", rc); - return; - } - - /* - * The esr_count is counting down the number of fuel gauge cycles - * before a ESR pulse is needed. - * - * After a successful ESR pulse, this count is reset to some - * high number like 28. If this reaches 0, then the fuel gauge - * hardware should force a ESR pulse. - * - * However, if the device is in constant current charge mode while - * being input current limited, the ESR pulse will not affect the - * battery current, so the measurement will fail. - * - * As a failsafe, force a manual ESR pulse if this value is read as - * 0. - */ - if (esr_count != 0) { - pr_smb(PR_STATUS, "ESR count is not zero, skipping\n"); - return; - } - - pr_smb(PR_STATUS, "Lowering charge current for ESR pulse\n"); - smbchg_stay_awake(chip, PM_ESR_PULSE); - smbchg_sw_esr_pulse_en(chip, true); - msleep(SW_ESR_PULSE_MS); - pr_smb(PR_STATUS, "Raising charge current for ESR pulse\n"); - smbchg_relax(chip, PM_ESR_PULSE); - smbchg_sw_esr_pulse_en(chip, false); -} - -static void smbchg_soc_changed(struct smbchg_chip *chip) -{ - smbchg_cc_esr_wa_check(chip); -} - -#define DC_AICL_CFG 0xF3 -#define MISC_TRIM_OPT_15_8 0xF5 -#define USB_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3)) -#define USB_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3)) -#define USB_AICL_DEGLITCH_LONG 0 -#define DC_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3)) -#define DC_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3)) -#define DC_AICL_DEGLITCH_LONG 0 -#define AICL_RERUN_MASK (BIT(5) | BIT(4)) -#define AICL_RERUN_ON (BIT(5) | BIT(4)) -#define AICL_RERUN_OFF 0 - -static int smbchg_hw_aicl_rerun_enable_indirect_cb(struct votable *votable, - void *data, - int enable, - const char *client) -{ - int rc = 0; - struct smbchg_chip *chip = data; - - if (enable < 0) { - pr_err("No voters\n"); - enable = 0; - } - /* - * If the indirect voting result of all the clients is to enable hw aicl - * rerun, then remove our vote to disable hw aicl rerun - */ - rc = vote(chip->hw_aicl_rerun_disable_votable, - HW_AICL_RERUN_ENABLE_INDIRECT_VOTER, !enable, 0); - if (rc < 0) { - pr_err("Couldn't vote for hw rerun rc= %d\n", rc); - return rc; - } - - return rc; -} - -static int smbchg_hw_aicl_rerun_disable_cb(struct votable *votable, void *data, - int disable, - const char *client) -{ - int rc = 0; - struct smbchg_chip *chip = data; - - if (disable < 0) { - pr_err("No voters\n"); - disable = 0; - } - - rc = smbchg_sec_masked_write(chip, - chip->misc_base + MISC_TRIM_OPT_15_8, - AICL_RERUN_MASK, disable ? AICL_RERUN_OFF : AICL_RERUN_ON); - if (rc < 0) - pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n", rc); - - return rc; -} - -static int smbchg_aicl_deglitch_config_cb(struct votable *votable, void *data, - int shorter, - const char *client) -{ - int rc = 0; - struct smbchg_chip *chip = data; - - if (shorter < 0) { - pr_err("No voters\n"); - shorter = 0; - } - - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + USB_AICL_CFG, - USB_AICL_DEGLITCH_MASK, - shorter ? USB_AICL_DEGLITCH_SHORT : USB_AICL_DEGLITCH_LONG); - if (rc < 0) { - pr_err("Couldn't write to USB_AICL_CFG rc=%d\n", rc); - return rc; - } - rc = smbchg_sec_masked_write(chip, - chip->dc_chgpth_base + DC_AICL_CFG, - DC_AICL_DEGLITCH_MASK, - shorter ? DC_AICL_DEGLITCH_SHORT : DC_AICL_DEGLITCH_LONG); - if (rc < 0) { - pr_err("Couldn't write to DC_AICL_CFG rc=%d\n", rc); - return rc; - } - return rc; -} - -static void smbchg_aicl_deglitch_wa_en(struct smbchg_chip *chip, bool en) -{ - int rc; - - rc = vote(chip->aicl_deglitch_short_votable, - VARB_WORKAROUND_VOTER, en, 0); - if (rc < 0) { - pr_err("Couldn't vote %s deglitch rc=%d\n", - en ? "short" : "long", rc); - return; - } - pr_smb(PR_STATUS, "AICL deglitch set to %s\n", en ? "short" : "long"); - - rc = vote(chip->hw_aicl_rerun_enable_indirect_votable, - VARB_WORKAROUND_VOTER, en, 0); - if (rc < 0) { - pr_err("Couldn't vote hw aicl rerun rc= %d\n", rc); - return; - } - chip->aicl_deglitch_short = en; -} - -static void smbchg_aicl_deglitch_wa_check(struct smbchg_chip *chip) -{ - union power_supply_propval prop = {0,}; - int rc; - bool low_volt_chgr = true; - - if (!(chip->wa_flags & SMBCHG_AICL_DEGLITCH_WA)) - return; - - if (!is_usb_present(chip) && !is_dc_present(chip)) { - pr_smb(PR_STATUS, "Charger removed\n"); - smbchg_aicl_deglitch_wa_en(chip, false); - return; - } - - if (!chip->bms_psy) - return; - - if (is_usb_present(chip)) { - if (is_hvdcp_present(chip)) - low_volt_chgr = false; - } else if (is_dc_present(chip)) { - if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) - low_volt_chgr = false; - else - low_volt_chgr = chip->low_volt_dcin; - } - - if (!low_volt_chgr) { - pr_smb(PR_STATUS, "High volt charger! Don't set deglitch\n"); - smbchg_aicl_deglitch_wa_en(chip, false); - return; - } - - /* It is possible that battery voltage went high above threshold - * when the charger is inserted and can go low because of system - * load. We shouldn't be reconfiguring AICL deglitch when this - * happens as it will lead to oscillation again which is being - * fixed here. Do it once when the battery voltage crosses the - * threshold (e.g. 4.2 V) and clear it only when the charger - * is removed. - */ - if (!chip->vbat_above_headroom) { - rc = power_supply_get_property(chip->bms_psy, - POWER_SUPPLY_PROP_VOLTAGE_MIN, &prop); - if (rc < 0) { - pr_err("could not read voltage_min, rc=%d\n", rc); - return; - } - chip->vbat_above_headroom = !prop.intval; - } - smbchg_aicl_deglitch_wa_en(chip, chip->vbat_above_headroom); -} - -#define MISC_TEST_REG 0xE2 -#define BB_LOOP_DISABLE_ICL BIT(2) -static int smbchg_icl_loop_disable_check(struct smbchg_chip *chip) -{ - bool icl_disabled = !chip->chg_otg_enabled && chip->flash_triggered; - int rc = 0; - - if ((chip->wa_flags & SMBCHG_FLASH_ICL_DISABLE_WA) - && icl_disabled != chip->icl_disabled) { - rc = smbchg_sec_masked_write(chip, - chip->misc_base + MISC_TEST_REG, - BB_LOOP_DISABLE_ICL, - icl_disabled ? BB_LOOP_DISABLE_ICL : 0); - chip->icl_disabled = icl_disabled; - } - - return rc; -} - -#define UNKNOWN_BATT_TYPE "Unknown Battery" -#define LOADING_BATT_TYPE "Loading Battery Data" -static int smbchg_config_chg_battery_type(struct smbchg_chip *chip) -{ - int rc = 0, max_voltage_uv = 0, fastchg_ma = 0, ret = 0, iterm_ua = 0; - struct device_node *batt_node, *profile_node; - struct device_node *node = chip->pdev->dev.of_node; - union power_supply_propval prop = {0,}; - - rc = power_supply_get_property(chip->bms_psy, - POWER_SUPPLY_PROP_BATTERY_TYPE, &prop); - if (rc) { - pr_smb(PR_STATUS, "Unable to read battery-type rc=%d\n", rc); - return 0; - } - if (!strcmp(prop.strval, UNKNOWN_BATT_TYPE) || - !strcmp(prop.strval, LOADING_BATT_TYPE)) { - pr_smb(PR_MISC, "Battery-type not identified\n"); - return 0; - } - /* quit if there is no change in the battery-type from previous */ - if (chip->battery_type && !strcmp(prop.strval, chip->battery_type)) - return 0; - - chip->battery_type = prop.strval; - batt_node = of_parse_phandle(node, "qcom,battery-data", 0); - if (!batt_node) { - pr_smb(PR_MISC, "No batterydata available\n"); - return 0; - } - - rc = power_supply_get_property(chip->bms_psy, - POWER_SUPPLY_PROP_RESISTANCE_ID, &prop); - if (rc < 0) { - pr_smb(PR_STATUS, "Unable to read battery-id rc=%d\n", rc); - return 0; - } - - profile_node = of_batterydata_get_best_profile(batt_node, - prop.intval / 1000, NULL); - if (IS_ERR_OR_NULL(profile_node)) { - rc = PTR_ERR(profile_node); - pr_err("couldn't find profile handle %d\n", rc); - return rc; - } - - /* change vfloat */ - rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv", - &max_voltage_uv); - if (rc) { - pr_warn("couldn't find battery max voltage rc=%d\n", rc); - ret = rc; - } else { - if (chip->vfloat_mv != (max_voltage_uv / 1000)) { - pr_info("Vfloat changed from %dmV to %dmV for battery-type %s\n", - chip->vfloat_mv, (max_voltage_uv / 1000), - chip->battery_type); - rc = smbchg_float_voltage_set(chip, - (max_voltage_uv / 1000)); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set float voltage rc = %d\n", rc); - return rc; - } - } - } - - /* change chg term */ - rc = of_property_read_u32(profile_node, "qcom,chg-term-ua", - &iterm_ua); - if (rc && rc != -EINVAL) { - pr_warn("couldn't read battery term current=%d\n", rc); - ret = rc; - } else if (!rc) { - if (chip->iterm_ma != (iterm_ua / 1000) - && !chip->iterm_disabled) { - pr_info("Term current changed from %dmA to %dmA for battery-type %s\n", - chip->iterm_ma, (iterm_ua / 1000), - chip->battery_type); - rc = smbchg_iterm_set(chip, - (iterm_ua / 1000)); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set iterm rc = %d\n", rc); - return rc; - } - } - chip->iterm_ma = iterm_ua / 1000; - } - - /* - * Only configure from profile if fastchg-ma is not defined in the - * charger device node. - */ - if (!of_find_property(chip->pdev->dev.of_node, - "qcom,fastchg-current-ma", NULL)) { - rc = of_property_read_u32(profile_node, - "qcom,fastchg-current-ma", &fastchg_ma); - if (rc) { - ret = rc; - } else { - pr_smb(PR_MISC, - "fastchg-ma changed from to %dma for battery-type %s\n", - fastchg_ma, chip->battery_type); - rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, - fastchg_ma); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't vote for fastchg current rc=%d\n", - rc); - return rc; - } - } - } - - return ret; -} - -#define MAX_INV_BATT_ID 7700 -#define MIN_INV_BATT_ID 7300 -static void check_battery_type(struct smbchg_chip *chip) -{ - union power_supply_propval prop = {0,}; - bool en; - - if (!chip->bms_psy && chip->bms_psy_name) - chip->bms_psy = - power_supply_get_by_name((char *)chip->bms_psy_name); - if (chip->bms_psy) { - power_supply_get_property(chip->bms_psy, - POWER_SUPPLY_PROP_BATTERY_TYPE, &prop); - en = (strcmp(prop.strval, UNKNOWN_BATT_TYPE) != 0 - || chip->charge_unknown_battery) - && (strcmp(prop.strval, LOADING_BATT_TYPE) != 0); - vote(chip->battchg_suspend_votable, - BATTCHG_UNKNOWN_BATTERY_EN_VOTER, !en, 0); - - if (!chip->skip_usb_suspend_for_fake_battery) { - power_supply_get_property(chip->bms_psy, - POWER_SUPPLY_PROP_RESISTANCE_ID, &prop); - /* suspend USB path for invalid battery-id */ - en = (prop.intval <= MAX_INV_BATT_ID && - prop.intval >= MIN_INV_BATT_ID) ? 1 : 0; - vote(chip->usb_suspend_votable, FAKE_BATTERY_EN_VOTER, - en, 0); - } - } -} - -static void smbchg_external_power_changed(struct power_supply *psy) -{ - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - union power_supply_propval prop = {0,}; - int rc, current_limit = 0, soc; - enum power_supply_type usb_supply_type; - char *usb_type_name = "null"; - - if (chip->bms_psy_name) - chip->bms_psy = - power_supply_get_by_name((char *)chip->bms_psy_name); - - smbchg_aicl_deglitch_wa_check(chip); - if (chip->bms_psy) { - check_battery_type(chip); - soc = get_prop_batt_capacity(chip); - if (chip->previous_soc != soc) { - chip->previous_soc = soc; - smbchg_soc_changed(chip); - } - - rc = smbchg_config_chg_battery_type(chip); - if (rc) - pr_smb(PR_MISC, - "Couldn't update charger configuration rc=%d\n", - rc); - } - - rc = power_supply_get_property(chip->usb_psy, - POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop); - if (rc == 0) - vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER, - !prop.intval, 0); - - current_limit = chip->usb_current_max / 1000; - - /* Override if type-c charger used */ - if (chip->typec_current_ma > 500 && - current_limit < chip->typec_current_ma) - current_limit = chip->typec_current_ma; - - read_usb_type(chip, &usb_type_name, &usb_supply_type); - - if (usb_supply_type != POWER_SUPPLY_TYPE_USB) - goto skip_current_for_non_sdp; - - pr_smb(PR_MISC, "usb type = %s current_limit = %d\n", - usb_type_name, current_limit); - - rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true, - current_limit); - if (rc < 0) - pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc); - -skip_current_for_non_sdp: - smbchg_vfloat_adjust_check(chip); - - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); -} - -static int smbchg_otg_regulator_enable(struct regulator_dev *rdev) -{ - int rc = 0; - struct smbchg_chip *chip = rdev_get_drvdata(rdev); - - chip->otg_retries = 0; - chip->chg_otg_enabled = true; - smbchg_icl_loop_disable_check(chip); - smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, true); - - /* If pin control mode then return from here */ - if (chip->otg_pinctrl) - return rc; - - /* sleep to make sure the pulse skip is actually disabled */ - msleep(20); - rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, - OTG_EN_BIT, OTG_EN_BIT); - if (rc < 0) - dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n", rc); - else - chip->otg_enable_time = ktime_get(); - pr_smb(PR_STATUS, "Enabling OTG Boost\n"); - return rc; -} - -static int smbchg_otg_regulator_disable(struct regulator_dev *rdev) -{ - int rc = 0; - struct smbchg_chip *chip = rdev_get_drvdata(rdev); - - if (!chip->otg_pinctrl) { - rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, - OTG_EN_BIT, 0); - if (rc < 0) - dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n", - rc); - } - - chip->chg_otg_enabled = false; - smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, false); - smbchg_icl_loop_disable_check(chip); - pr_smb(PR_STATUS, "Disabling OTG Boost\n"); - return rc; -} - -static int smbchg_otg_regulator_is_enable(struct regulator_dev *rdev) -{ - int rc = 0; - u8 reg = 0; - struct smbchg_chip *chip = rdev_get_drvdata(rdev); - - rc = smbchg_read(chip, ®, chip->bat_if_base + CMD_CHG_REG, 1); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't read OTG enable bit rc=%d\n", rc); - return rc; - } - - return (reg & OTG_EN_BIT) ? 1 : 0; -} - -struct regulator_ops smbchg_otg_reg_ops = { - .enable = smbchg_otg_regulator_enable, - .disable = smbchg_otg_regulator_disable, - .is_enabled = smbchg_otg_regulator_is_enable, -}; - -#define USBIN_CHGR_CFG 0xF1 -#define ADAPTER_ALLOWANCE_MASK 0x7 -#define USBIN_ADAPTER_9V 0x3 -#define USBIN_ADAPTER_5V_9V_CONT 0x2 -#define USBIN_ADAPTER_5V_UNREGULATED_9V 0x5 -#define HVDCP_EN_BIT BIT(3) -static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev) -{ - int rc = 0; - struct smbchg_chip *chip = rdev_get_drvdata(rdev); - - rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, true, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't suspend charger rc=%d\n", rc); - return rc; - } - - rc = smbchg_read(chip, &chip->original_usbin_allowance, - chip->usb_chgpth_base + USBIN_CHGR_CFG, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc); - return rc; - } - - /* - * To disallow source detect and usbin_uv interrupts, set the adapter - * allowance to 9V, so that the audio boost operating in reverse never - * gets detected as a valid input - */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc); - return rc; - } - - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + USBIN_CHGR_CFG, - 0xFF, USBIN_ADAPTER_9V); - if (rc < 0) { - dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc); - return rc; - } - - pr_smb(PR_STATUS, "Enabling OTG Boost\n"); - return rc; -} - -static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev) -{ - int rc = 0; - struct smbchg_chip *chip = rdev_get_drvdata(rdev); - - rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, false, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't unsuspend charger rc=%d\n", rc); - return rc; - } - - /* - * Reenable HVDCP and set the adapter allowance back to the original - * value in order to allow normal USBs to be recognized as a valid - * input. - */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, HVDCP_EN_BIT); - if (rc < 0) { - dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc); - return rc; - } - - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + USBIN_CHGR_CFG, - 0xFF, chip->original_usbin_allowance); - if (rc < 0) { - dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc); - return rc; - } - - pr_smb(PR_STATUS, "Disabling OTG Boost\n"); - return rc; -} - -static int smbchg_external_otg_regulator_is_enable(struct regulator_dev *rdev) -{ - struct smbchg_chip *chip = rdev_get_drvdata(rdev); - - return get_client_vote(chip->usb_suspend_votable, OTG_EN_VOTER); -} - -struct regulator_ops smbchg_external_otg_reg_ops = { - .enable = smbchg_external_otg_regulator_enable, - .disable = smbchg_external_otg_regulator_disable, - .is_enabled = smbchg_external_otg_regulator_is_enable, -}; - -static int smbchg_regulator_init(struct smbchg_chip *chip) -{ - int rc = 0; - struct regulator_config cfg = {}; - struct device_node *regulator_node; - - cfg.dev = chip->dev; - cfg.driver_data = chip; - - chip->otg_vreg.rdesc.owner = THIS_MODULE; - chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE; - chip->otg_vreg.rdesc.ops = &smbchg_otg_reg_ops; - chip->otg_vreg.rdesc.of_match = "qcom,smbcharger-boost-otg"; - chip->otg_vreg.rdesc.name = "qcom,smbcharger-boost-otg"; - - chip->otg_vreg.rdev = devm_regulator_register(chip->dev, - &chip->otg_vreg.rdesc, &cfg); - if (IS_ERR(chip->otg_vreg.rdev)) { - rc = PTR_ERR(chip->otg_vreg.rdev); - chip->otg_vreg.rdev = NULL; - if (rc != -EPROBE_DEFER) - dev_err(chip->dev, - "OTG reg failed, rc=%d\n", rc); - } - if (rc) - return rc; - - regulator_node = of_get_child_by_name(chip->dev->of_node, - "qcom,smbcharger-external-otg"); - if (!regulator_node) { - dev_dbg(chip->dev, "external-otg node absent\n"); - return 0; - } - - chip->ext_otg_vreg.rdesc.owner = THIS_MODULE; - chip->ext_otg_vreg.rdesc.type = REGULATOR_VOLTAGE; - chip->ext_otg_vreg.rdesc.ops = &smbchg_external_otg_reg_ops; - chip->ext_otg_vreg.rdesc.of_match = "qcom,smbcharger-external-otg"; - chip->ext_otg_vreg.rdesc.name = "qcom,smbcharger-external-otg"; - if (of_get_property(chip->dev->of_node, "otg-parent-supply", NULL)) - chip->ext_otg_vreg.rdesc.supply_name = "otg-parent"; - cfg.dev = chip->dev; - cfg.driver_data = chip; - - chip->ext_otg_vreg.rdev = devm_regulator_register(chip->dev, - &chip->ext_otg_vreg.rdesc, - &cfg); - if (IS_ERR(chip->ext_otg_vreg.rdev)) { - rc = PTR_ERR(chip->ext_otg_vreg.rdev); - chip->ext_otg_vreg.rdev = NULL; - if (rc != -EPROBE_DEFER) - dev_err(chip->dev, - "external OTG reg failed, rc=%d\n", rc); - } - - return rc; -} - -#define CMD_CHG_LED_REG 0x43 -#define CHG_LED_CTRL_BIT BIT(0) -#define LED_SW_CTRL_BIT 0x1 -#define LED_CHG_CTRL_BIT 0x0 -#define CHG_LED_ON 0x03 -#define CHG_LED_OFF 0x00 -#define LED_BLINKING_PATTERN1 0x01 -#define LED_BLINKING_PATTERN2 0x02 -#define LED_BLINKING_CFG_MASK SMB_MASK(2, 1) -#define CHG_LED_SHIFT 1 -static int smbchg_chg_led_controls(struct smbchg_chip *chip) -{ - u8 reg, mask; - int rc; - - if (chip->cfg_chg_led_sw_ctrl) { - /* turn-off LED by default for software control */ - mask = CHG_LED_CTRL_BIT | LED_BLINKING_CFG_MASK; - reg = LED_SW_CTRL_BIT; - } else { - mask = CHG_LED_CTRL_BIT; - reg = LED_CHG_CTRL_BIT; - } - - rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_LED_REG, - mask, reg); - if (rc < 0) - dev_err(chip->dev, - "Couldn't write LED_CTRL_BIT rc=%d\n", rc); - return rc; -} - -static void smbchg_chg_led_brightness_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct smbchg_chip *chip = container_of(cdev, - struct smbchg_chip, led_cdev); - union power_supply_propval pval = {0, }; - u8 reg; - int rc; - - reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT : - CHG_LED_OFF << CHG_LED_SHIFT; - pval.intval = value > LED_OFF ? 1 : 0; - power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER, - &pval); - pr_smb(PR_STATUS, - "set the charger led brightness to value=%d\n", - value); - rc = smbchg_sec_masked_write(chip, - chip->bat_if_base + CMD_CHG_LED_REG, - LED_BLINKING_CFG_MASK, reg); - if (rc) - dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n", - rc); -} - -static enum -led_brightness smbchg_chg_led_brightness_get(struct led_classdev *cdev) -{ - struct smbchg_chip *chip = container_of(cdev, - struct smbchg_chip, led_cdev); - u8 reg_val, chg_led_sts; - int rc; - - rc = smbchg_read(chip, ®_val, chip->bat_if_base + CMD_CHG_LED_REG, - 1); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't read CHG_LED_REG sts rc=%d\n", - rc); - return rc; - } - - chg_led_sts = (reg_val & LED_BLINKING_CFG_MASK) >> CHG_LED_SHIFT; - - pr_smb(PR_STATUS, "chg_led_sts = %02x\n", chg_led_sts); - - return (chg_led_sts == CHG_LED_OFF) ? LED_OFF : LED_FULL; -} - -static void smbchg_chg_led_blink_set(struct smbchg_chip *chip, - unsigned long blinking) -{ - union power_supply_propval pval = {0, }; - u8 reg; - int rc; - - pval.intval = (blinking == 0) ? 0 : 1; - power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER, - &pval); - - if (blinking == 0) { - reg = CHG_LED_OFF << CHG_LED_SHIFT; - } else { - if (blinking == 1) - reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT; - else if (blinking == 2) - reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT; - else - reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT; - } - - rc = smbchg_sec_masked_write(chip, - chip->bat_if_base + CMD_CHG_LED_REG, - LED_BLINKING_CFG_MASK, reg); - if (rc) - dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n", - rc); -} - -static ssize_t smbchg_chg_led_blink_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct led_classdev *cdev = dev_get_drvdata(dev); - struct smbchg_chip *chip = container_of(cdev, struct smbchg_chip, - led_cdev); - unsigned long blinking; - ssize_t rc = -EINVAL; - - rc = kstrtoul(buf, 10, &blinking); - if (rc) - return rc; - - smbchg_chg_led_blink_set(chip, blinking); - - return len; -} - -static DEVICE_ATTR(blink, 0664, NULL, smbchg_chg_led_blink_store); - -static struct attribute *led_blink_attributes[] = { - &dev_attr_blink.attr, - NULL, -}; - -static struct attribute_group smbchg_led_attr_group = { - .attrs = led_blink_attributes -}; - -static int smbchg_register_chg_led(struct smbchg_chip *chip) -{ - int rc; - - chip->led_cdev.name = "red"; - chip->led_cdev.brightness_set = smbchg_chg_led_brightness_set; - chip->led_cdev.brightness_get = smbchg_chg_led_brightness_get; - - rc = led_classdev_register(chip->dev, &chip->led_cdev); - if (rc) { - dev_err(chip->dev, "unable to register charger led, rc=%d\n", - rc); - return rc; - } - - rc = sysfs_create_group(&chip->led_cdev.dev->kobj, - &smbchg_led_attr_group); - if (rc) { - dev_err(chip->dev, "led sysfs rc: %d\n", rc); - return rc; - } - - return rc; -} - -static int vf_adjust_low_threshold = 5; -module_param(vf_adjust_low_threshold, int, 0644); - -static int vf_adjust_high_threshold = 7; -module_param(vf_adjust_high_threshold, int, 0644); - -static int vf_adjust_n_samples = 10; -module_param(vf_adjust_n_samples, int, 0644); - -static int vf_adjust_max_delta_mv = 40; -module_param(vf_adjust_max_delta_mv, int, 0644); - -static int vf_adjust_trim_steps_per_adjust = 1; -module_param(vf_adjust_trim_steps_per_adjust, int, 0644); - -#define CENTER_TRIM_CODE 7 -#define MAX_LIN_CODE 14 -#define MAX_TRIM_CODE 15 -#define SCALE_SHIFT 4 -#define VF_TRIM_OFFSET_MASK SMB_MASK(3, 0) -#define VF_STEP_SIZE_MV 10 -#define SCALE_LSB_MV 17 -static int smbchg_trim_add_steps(int prev_trim, int delta_steps) -{ - int scale_steps; - int linear_offset, linear_scale; - int offset_code = prev_trim & VF_TRIM_OFFSET_MASK; - int scale_code = (prev_trim & ~VF_TRIM_OFFSET_MASK) >> SCALE_SHIFT; - - if (abs(delta_steps) > 1) { - pr_smb(PR_STATUS, - "Cant trim multiple steps delta_steps = %d\n", - delta_steps); - return prev_trim; - } - if (offset_code <= CENTER_TRIM_CODE) - linear_offset = offset_code + CENTER_TRIM_CODE; - else if (offset_code > CENTER_TRIM_CODE) - linear_offset = MAX_TRIM_CODE - offset_code; - - if (scale_code <= CENTER_TRIM_CODE) - linear_scale = scale_code + CENTER_TRIM_CODE; - else if (scale_code > CENTER_TRIM_CODE) - linear_scale = scale_code - (CENTER_TRIM_CODE + 1); - - /* check if we can accomodate delta steps with just the offset */ - if (linear_offset + delta_steps >= 0 - && linear_offset + delta_steps <= MAX_LIN_CODE) { - linear_offset += delta_steps; - - if (linear_offset > CENTER_TRIM_CODE) - offset_code = linear_offset - CENTER_TRIM_CODE; - else - offset_code = MAX_TRIM_CODE - linear_offset; - - return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code; - } - - /* changing offset cannot satisfy delta steps, change the scale bits */ - scale_steps = delta_steps > 0 ? 1 : -1; - - if (linear_scale + scale_steps < 0 - || linear_scale + scale_steps > MAX_LIN_CODE) { - pr_smb(PR_STATUS, - "Cant trim scale_steps = %d delta_steps = %d\n", - scale_steps, delta_steps); - return prev_trim; - } - - linear_scale += scale_steps; - - if (linear_scale > CENTER_TRIM_CODE) - scale_code = linear_scale - CENTER_TRIM_CODE; - else - scale_code = linear_scale + (CENTER_TRIM_CODE + 1); - prev_trim = (prev_trim & VF_TRIM_OFFSET_MASK) - | scale_code << SCALE_SHIFT; - - /* - * now that we have changed scale which is a 17mV jump, change the - * offset bits (10mV) too so the effective change is just 7mV - */ - delta_steps = -1 * delta_steps; - - linear_offset = clamp(linear_offset + delta_steps, 0, MAX_LIN_CODE); - if (linear_offset > CENTER_TRIM_CODE) - offset_code = linear_offset - CENTER_TRIM_CODE; - else - offset_code = MAX_TRIM_CODE - linear_offset; - - return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code; -} - -#define TRIM_14 0xFE -#define VF_TRIM_MASK 0xFF -static int smbchg_adjust_vfloat_mv_trim(struct smbchg_chip *chip, - int delta_mv) -{ - int sign, delta_steps, rc = 0; - u8 prev_trim, new_trim; - int i; - - sign = delta_mv > 0 ? 1 : -1; - delta_steps = (delta_mv + sign * VF_STEP_SIZE_MV / 2) - / VF_STEP_SIZE_MV; - - rc = smbchg_read(chip, &prev_trim, chip->misc_base + TRIM_14, 1); - if (rc) { - dev_err(chip->dev, "Unable to read trim 14: %d\n", rc); - return rc; - } - - for (i = 1; i <= abs(delta_steps) - && i <= vf_adjust_trim_steps_per_adjust; i++) { - new_trim = (u8)smbchg_trim_add_steps(prev_trim, - delta_steps > 0 ? 1 : -1); - if (new_trim == prev_trim) { - pr_smb(PR_STATUS, - "VFloat trim unchanged from %02x\n", prev_trim); - /* treat no trim change as an error */ - return -EINVAL; - } - - rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_14, - VF_TRIM_MASK, new_trim); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't change vfloat trim rc=%d\n", rc); - } - pr_smb(PR_STATUS, - "VFlt trim %02x to %02x, delta steps: %d\n", - prev_trim, new_trim, delta_steps); - prev_trim = new_trim; - } - - return rc; -} - -#define VFLOAT_RESAMPLE_DELAY_MS 10000 -static void smbchg_vfloat_adjust_work(struct work_struct *work) -{ - struct smbchg_chip *chip = container_of(work, - struct smbchg_chip, - vfloat_adjust_work.work); - int vbat_uv, vbat_mv, ibat_ua, rc, delta_vfloat_mv; - bool taper, enable; - - smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST); - taper = (get_prop_charge_type(chip) - == POWER_SUPPLY_CHARGE_TYPE_TAPER); - enable = taper && (chip->parallel.current_max_ma == 0); - - if (!enable) { - pr_smb(PR_MISC, - "Stopping vfloat adj taper=%d parallel_ma = %d\n", - taper, chip->parallel.current_max_ma); - goto stop; - } - - if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) { - pr_smb(PR_STATUS, "JEITA active, skipping\n"); - goto stop; - } - - set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1); - rc = get_property_from_fg(chip, - POWER_SUPPLY_PROP_VOLTAGE_NOW, &vbat_uv); - if (rc) { - pr_smb(PR_STATUS, - "bms psy does not support voltage rc = %d\n", rc); - goto stop; - } - vbat_mv = vbat_uv / 1000; - - if ((vbat_mv - chip->vfloat_mv) < -1 * vf_adjust_max_delta_mv) { - pr_smb(PR_STATUS, "Skip vbat out of range: %d\n", vbat_mv); - goto reschedule; - } - - rc = get_property_from_fg(chip, - POWER_SUPPLY_PROP_CURRENT_NOW, &ibat_ua); - if (rc) { - pr_smb(PR_STATUS, - "bms psy does not support current_now rc = %d\n", rc); - goto stop; - } - - if (ibat_ua / 1000 > -chip->iterm_ma) { - pr_smb(PR_STATUS, "Skip ibat too high: %d\n", ibat_ua); - goto reschedule; - } - - pr_smb(PR_STATUS, "sample number = %d vbat_mv = %d ibat_ua = %d\n", - chip->n_vbat_samples, - vbat_mv, - ibat_ua); - - chip->max_vbat_sample = max(chip->max_vbat_sample, vbat_mv); - chip->n_vbat_samples += 1; - if (chip->n_vbat_samples < vf_adjust_n_samples) { - pr_smb(PR_STATUS, "Skip %d samples; max = %d\n", - chip->n_vbat_samples, chip->max_vbat_sample); - goto reschedule; - } - /* if max vbat > target vfloat, delta_vfloat_mv could be negative */ - delta_vfloat_mv = chip->vfloat_mv - chip->max_vbat_sample; - pr_smb(PR_STATUS, "delta_vfloat_mv = %d, samples = %d, mvbat = %d\n", - delta_vfloat_mv, chip->n_vbat_samples, chip->max_vbat_sample); - /* - * enough valid samples has been collected, adjust trim codes - * based on maximum of collected vbat samples if necessary - */ - if (delta_vfloat_mv > vf_adjust_high_threshold - || delta_vfloat_mv < -1 * vf_adjust_low_threshold) { - rc = smbchg_adjust_vfloat_mv_trim(chip, delta_vfloat_mv); - if (rc) { - pr_smb(PR_STATUS, - "Stopping vfloat adj after trim adj rc = %d\n", - rc); - goto stop; - } - chip->max_vbat_sample = 0; - chip->n_vbat_samples = 0; - goto reschedule; - } - -stop: - chip->max_vbat_sample = 0; - chip->n_vbat_samples = 0; - smbchg_relax(chip, PM_REASON_VFLOAT_ADJUST); - return; - -reschedule: - schedule_delayed_work(&chip->vfloat_adjust_work, - msecs_to_jiffies(VFLOAT_RESAMPLE_DELAY_MS)); - return; -} - -static int smbchg_charging_status_change(struct smbchg_chip *chip) -{ - smbchg_vfloat_adjust_check(chip); - set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS, - get_prop_batt_status(chip)); - return 0; -} - -#define BB_CLMP_SEL 0xF8 -#define BB_CLMP_MASK SMB_MASK(1, 0) -#define BB_CLMP_VFIX_3338MV 0x1 -#define BB_CLMP_VFIX_3512MV 0x2 -static int smbchg_set_optimal_charging_mode(struct smbchg_chip *chip, int type) -{ - int rc; - bool hvdcp2 = (type == POWER_SUPPLY_TYPE_USB_HVDCP - && smbchg_is_usbin_active_pwr_src(chip)); - - /* - * Set the charger switching freq to 1MHZ if HVDCP 2.0, - * or 750KHZ otherwise - */ - rc = smbchg_sec_masked_write(chip, - chip->bat_if_base + BAT_IF_TRIM7_REG, - CFG_750KHZ_BIT, hvdcp2 ? 0 : CFG_750KHZ_BIT); - if (rc) { - dev_err(chip->dev, "Cannot set switching freq: %d\n", rc); - return rc; - } - - /* - * Set the charger switch frequency clamp voltage threshold to 3.338V - * if HVDCP 2.0, or 3.512V otherwise. - */ - rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BB_CLMP_SEL, - BB_CLMP_MASK, - hvdcp2 ? BB_CLMP_VFIX_3338MV : BB_CLMP_VFIX_3512MV); - if (rc) { - dev_err(chip->dev, "Cannot set switching freq: %d\n", rc); - return rc; - } - - return 0; -} - -#define DEFAULT_SDP_MA 100 -#define DEFAULT_CDP_MA 1500 -static int smbchg_change_usb_supply_type(struct smbchg_chip *chip, - enum power_supply_type type) -{ - int rc, current_limit_ma; - - /* - * if the type is not unknown, set the type before changing ICL vote - * in order to ensure that the correct current limit registers are - * used - */ - if (type != POWER_SUPPLY_TYPE_UNKNOWN) - chip->usb_supply_type = type; - - /* - * Type-C only supports STD(900), MEDIUM(1500) and HIGH(3000) current - * modes, skip all BC 1.2 current if external typec is supported. - * Note: for SDP supporting current based on USB notifications. - */ - if (chip->typec_psy && (type != POWER_SUPPLY_TYPE_USB)) - current_limit_ma = chip->typec_current_ma; - else if (type == POWER_SUPPLY_TYPE_USB) - current_limit_ma = DEFAULT_SDP_MA; - else if (type == POWER_SUPPLY_TYPE_USB_CDP) - current_limit_ma = DEFAULT_CDP_MA; - else if (type == POWER_SUPPLY_TYPE_USB_HVDCP) - current_limit_ma = smbchg_default_hvdcp_icl_ma; - else if (type == POWER_SUPPLY_TYPE_USB_HVDCP_3) - current_limit_ma = smbchg_default_hvdcp3_icl_ma; - else - current_limit_ma = smbchg_default_dcp_icl_ma; - - pr_smb(PR_STATUS, "Type %d: setting mA = %d\n", - type, current_limit_ma); - rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true, - current_limit_ma); - if (rc < 0) { - pr_err("Couldn't vote for new USB ICL rc=%d\n", rc); - goto out; - } - - /* otherwise if it is unknown, set type after the vote */ - if (type == POWER_SUPPLY_TYPE_UNKNOWN) - chip->usb_supply_type = type; - - if (!chip->skip_usb_notification) - power_supply_changed(chip->usb_psy); - - /* set the correct buck switching frequency */ - rc = smbchg_set_optimal_charging_mode(chip, type); - if (rc < 0) - pr_err("Couldn't set charger optimal mode rc=%d\n", rc); - -out: - return rc; -} - -#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4) -#define HVDCP_5V 0x00 -#define HVDCP_9V 0x10 -#define USB_CMD_HVDCP_1 0x42 -#define FORCE_HVDCP_2p0 BIT(3) - -static int force_9v_hvdcp(struct smbchg_chip *chip) -{ - int rc; - - /* Force 5V HVDCP */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_5V); - if (rc) { - pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc); - return rc; - } - - /* Force QC2.0 */ - rc = smbchg_masked_write(chip, - chip->usb_chgpth_base + USB_CMD_HVDCP_1, - FORCE_HVDCP_2p0, FORCE_HVDCP_2p0); - rc |= smbchg_masked_write(chip, - chip->usb_chgpth_base + USB_CMD_HVDCP_1, - FORCE_HVDCP_2p0, 0); - if (rc < 0) { - pr_err("Couldn't force QC2.0 rc=%d\n", rc); - return rc; - } - - /* Delay to switch into HVDCP 2.0 and avoid UV */ - msleep(500); - - /* Force 9V HVDCP */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_9V); - if (rc) - pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc); - - return rc; -} - -static void smbchg_hvdcp_det_work(struct work_struct *work) -{ - struct smbchg_chip *chip = container_of(work, - struct smbchg_chip, - hvdcp_det_work.work); - int rc; - - if (is_hvdcp_present(chip)) { - if (!chip->hvdcp3_supported && - (chip->wa_flags & SMBCHG_HVDCP_9V_EN_WA)) { - /* force HVDCP 2.0 */ - rc = force_9v_hvdcp(chip); - if (rc) - pr_err("could not force 9V HVDCP continuing rc=%d\n", - rc); - } - smbchg_change_usb_supply_type(chip, - POWER_SUPPLY_TYPE_USB_HVDCP); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_aicl_deglitch_wa_check(chip); - } - smbchg_relax(chip, PM_DETECT_HVDCP); -} - -static int set_usb_psy_dp_dm(struct smbchg_chip *chip, int state) -{ - int rc; - u8 reg; - union power_supply_propval pval = {0, }; - - /* - * ensure that we are not in the middle of an insertion where usbin_uv - * is low and src_detect hasnt gone high. If so force dp=F dm=F - * which guarantees proper type detection - */ - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1); - if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) { - pr_smb(PR_MISC, "overwriting state = %d with %d\n", - state, POWER_SUPPLY_DP_DM_DPF_DMF); - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - return regulator_enable(chip->dpdm_reg); - } - pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state); - pval.intval = state; - return power_supply_set_property(chip->usb_psy, - POWER_SUPPLY_PROP_DP_DM, &pval); -} - -#define APSD_CFG 0xF5 -#define AUTO_SRC_DETECT_EN_BIT BIT(0) -#define APSD_TIMEOUT_MS 1500 -static void restore_from_hvdcp_detection(struct smbchg_chip *chip) -{ - int rc; - - pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); - - /* switch to 9V HVDCP */ - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_9V); - if (rc < 0) - pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc); - - /* enable HVDCP */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, HVDCP_EN_BIT); - if (rc < 0) - pr_err("Couldn't enable HVDCP rc=%d\n", rc); - - /* enable APSD */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + APSD_CFG, - AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT); - if (rc < 0) - pr_err("Couldn't enable APSD rc=%d\n", rc); - - /* Reset back to 5V unregulated */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + USBIN_CHGR_CFG, - ADAPTER_ALLOWANCE_MASK, USBIN_ADAPTER_5V_UNREGULATED_9V); - if (rc < 0) - pr_err("Couldn't write usb allowance rc=%d\n", rc); - - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, AICL_EN_BIT); - if (rc < 0) - pr_err("Couldn't enable AICL rc=%d\n", rc); - - chip->hvdcp_3_det_ignore_uv = false; - chip->pulse_cnt = 0; -} - -#define RESTRICTED_CHG_FCC_PERCENT 50 -static int smbchg_restricted_charging(struct smbchg_chip *chip, bool enable) -{ - int current_table_index, fastchg_current; - int rc = 0; - - /* If enable, set the fcc to the set point closest - * to 50% of the configured fcc while remaining below it - */ - current_table_index = find_smaller_in_array( - chip->tables.usb_ilim_ma_table, - chip->cfg_fastchg_current_ma - * RESTRICTED_CHG_FCC_PERCENT / 100, - chip->tables.usb_ilim_ma_len); - fastchg_current = - chip->tables.usb_ilim_ma_table[current_table_index]; - rc = vote(chip->fcc_votable, RESTRICTED_CHG_FCC_VOTER, enable, - fastchg_current); - - pr_smb(PR_STATUS, "restricted_charging set to %d\n", enable); - chip->restricted_charging = enable; - - return rc; -} - -static void handle_usb_removal(struct smbchg_chip *chip) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - int rc; - - pr_smb(PR_STATUS, "triggered\n"); - smbchg_aicl_deglitch_wa_check(chip); - /* Clear the OV detected status set before */ - if (chip->usb_ov_det) - chip->usb_ov_det = false; - /* Clear typec current status */ - if (chip->typec_psy) - chip->typec_current_ma = 0; - smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN); - extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present); - if (chip->dpdm_reg) - regulator_disable(chip->dpdm_reg); - schedule_work(&chip->usb_set_online_work); - - pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n"); - chip->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN; - power_supply_changed(chip->usb_psy); - - if (parallel_psy && chip->parallel_charger_detected) { - pval.intval = false; - power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_PRESENT, &pval); - } - if (chip->parallel.avail && chip->aicl_done_irq - && chip->enable_aicl_wake) { - disable_irq_wake(chip->aicl_done_irq); - chip->enable_aicl_wake = false; - } - chip->parallel.enabled_once = false; - chip->vbat_above_headroom = false; - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - ICL_OVERRIDE_BIT, 0); - if (rc < 0) - pr_err("Couldn't set override rc = %d\n", rc); - - vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER, false, 0); - chip->usb_icl_delta = 0; - vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, false, 0); - vote(chip->aicl_deglitch_short_votable, - HVDCP_SHORT_DEGLITCH_VOTER, false, 0); - if (!chip->hvdcp_not_supported) - restore_from_hvdcp_detection(chip); -} - -static bool is_usbin_uv_high(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc); - return false; - } - return reg &= USBIN_UV_BIT; -} - -#define HVDCP_NOTIFY_MS 2500 -static void handle_usb_insertion(struct smbchg_chip *chip) -{ - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; - enum power_supply_type usb_supply_type; - int rc; - char *usb_type_name = "null"; - - pr_smb(PR_STATUS, "triggered\n"); - /* usb inserted */ - read_usb_type(chip, &usb_type_name, &usb_supply_type); - pr_smb(PR_STATUS, - "inserted type = %d (%s)", usb_supply_type, usb_type_name); - - smbchg_aicl_deglitch_wa_check(chip); - if (chip->typec_psy) - update_typec_status(chip); - smbchg_change_usb_supply_type(chip, usb_supply_type); - - /* Only notify USB if it's not a charger */ - if (usb_supply_type == POWER_SUPPLY_TYPE_USB || - usb_supply_type == POWER_SUPPLY_TYPE_USB_CDP) - extcon_set_cable_state_(chip->extcon, EXTCON_USB, - chip->usb_present); - - /* Notify the USB psy if OV condition is not present */ - if (!chip->usb_ov_det) { - /* - * Note that this could still be a very weak charger - * if the handle_usb_insertion was triggered from - * the falling edge of an USBIN_OV interrupt - */ - pr_smb(PR_MISC, "setting usb psy health %s\n", - chip->very_weak_charger - ? "UNSPEC_FAILURE" : "GOOD"); - chip->usb_health = chip->very_weak_charger - ? POWER_SUPPLY_HEALTH_UNSPEC_FAILURE - : POWER_SUPPLY_HEALTH_GOOD; - power_supply_changed(chip->usb_psy); - } - schedule_work(&chip->usb_set_online_work); - - if (!chip->hvdcp_not_supported && - (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP)) { - cancel_delayed_work_sync(&chip->hvdcp_det_work); - smbchg_stay_awake(chip, PM_DETECT_HVDCP); - schedule_delayed_work(&chip->hvdcp_det_work, - msecs_to_jiffies(HVDCP_NOTIFY_MS)); - } - - if (parallel_psy) { - pval.intval = true; - rc = power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_PRESENT, &pval); - chip->parallel_charger_detected = rc ? false : true; - if (rc) - pr_debug("parallel-charger absent rc=%d\n", rc); - } - - if (chip->parallel.avail && chip->aicl_done_irq - && !chip->enable_aicl_wake) { - rc = enable_irq_wake(chip->aicl_done_irq); - chip->enable_aicl_wake = true; - } -} - -void update_usb_status(struct smbchg_chip *chip, bool usb_present, bool force) -{ - mutex_lock(&chip->usb_status_lock); - if (force) { - chip->usb_present = usb_present; - chip->usb_present ? handle_usb_insertion(chip) - : handle_usb_removal(chip); - goto unlock; - } - if (!chip->usb_present && usb_present) { - chip->usb_present = usb_present; - handle_usb_insertion(chip); - } else if (chip->usb_present && !usb_present) { - chip->usb_present = usb_present; - handle_usb_removal(chip); - } - - /* update FG */ - set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS, - get_prop_batt_status(chip)); -unlock: - mutex_unlock(&chip->usb_status_lock); -} - -static int otg_oc_reset(struct smbchg_chip *chip) -{ - int rc; - - rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, - OTG_EN_BIT, 0); - if (rc) - pr_err("Failed to disable OTG rc=%d\n", rc); - - msleep(20); - - /* - * There is a possibility that an USBID interrupt might have - * occurred notifying USB power supply to disable OTG. We - * should not enable OTG in such cases. - */ - if (!is_otg_present(chip)) { - pr_smb(PR_STATUS, - "OTG is not present, not enabling OTG_EN_BIT\n"); - goto out; - } - - rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG, - OTG_EN_BIT, OTG_EN_BIT); - if (rc) - pr_err("Failed to re-enable OTG rc=%d\n", rc); - -out: - return rc; -} - -static int get_current_time(unsigned long *now_tm_sec) -{ - struct rtc_time tm; - struct rtc_device *rtc; - int rc; - - rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); - if (rtc == NULL) { - pr_err("%s: unable to open rtc device (%s)\n", - __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); - return -EINVAL; - } - - rc = rtc_read_time(rtc, &tm); - if (rc) { - pr_err("Error reading rtc device (%s) : %d\n", - CONFIG_RTC_HCTOSYS_DEVICE, rc); - goto close_time; - } - - rc = rtc_valid_tm(&tm); - if (rc) { - pr_err("Invalid RTC time (%s): %d\n", - CONFIG_RTC_HCTOSYS_DEVICE, rc); - goto close_time; - } - rtc_tm_to_time(&tm, now_tm_sec); - -close_time: - rtc_class_close(rtc); - return rc; -} - -#define AICL_IRQ_LIMIT_SECONDS 60 -#define AICL_IRQ_LIMIT_COUNT 25 -static void increment_aicl_count(struct smbchg_chip *chip) -{ - bool bad_charger = false; - int max_aicl_count, rc; - u8 reg; - long elapsed_seconds; - unsigned long now_seconds; - - pr_smb(PR_INTERRUPT, "aicl count c:%d dgltch:%d first:%ld\n", - chip->aicl_irq_count, chip->aicl_deglitch_short, - chip->first_aicl_seconds); - - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + ICL_STS_1_REG, 1); - if (!rc) - chip->aicl_complete = reg & AICL_STS_BIT; - else - chip->aicl_complete = false; - - if (chip->aicl_deglitch_short || chip->force_aicl_rerun) { - if (!chip->aicl_irq_count) - get_current_time(&chip->first_aicl_seconds); - get_current_time(&now_seconds); - elapsed_seconds = now_seconds - - chip->first_aicl_seconds; - - if (elapsed_seconds > AICL_IRQ_LIMIT_SECONDS) { - pr_smb(PR_INTERRUPT, - "resetting: elp:%ld first:%ld now:%ld c=%d\n", - elapsed_seconds, chip->first_aicl_seconds, - now_seconds, chip->aicl_irq_count); - chip->aicl_irq_count = 1; - get_current_time(&chip->first_aicl_seconds); - return; - } - /* - * Double the amount of AICLs allowed if parallel charging is - * enabled. - */ - max_aicl_count = AICL_IRQ_LIMIT_COUNT - * (chip->parallel.avail ? 2 : 1); - chip->aicl_irq_count++; - - if (chip->aicl_irq_count > max_aicl_count) { - pr_smb(PR_INTERRUPT, "elp:%ld first:%ld now:%ld c=%d\n", - elapsed_seconds, chip->first_aicl_seconds, - now_seconds, chip->aicl_irq_count); - pr_smb(PR_INTERRUPT, "Disable AICL rerun\n"); - chip->very_weak_charger = true; - bad_charger = true; - - /* - * Disable AICL rerun since many interrupts were - * triggered in a short time - */ - /* disable hw aicl */ - rc = vote(chip->hw_aicl_rerun_disable_votable, - WEAK_CHARGER_HW_AICL_VOTER, true, 0); - if (rc < 0) { - pr_err("Couldn't disable hw aicl rerun rc=%d\n", - rc); - return; - } - - /* Vote 100mA current limit */ - rc = vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER, - true, CURRENT_100_MA); - if (rc < 0) { - pr_err("Can't vote %d current limit rc=%d\n", - CURRENT_100_MA, rc); - } - - chip->aicl_irq_count = 0; - } else if ((get_prop_charge_type(chip) == - POWER_SUPPLY_CHARGE_TYPE_FAST) && - (reg & AICL_SUSP_BIT)) { - /* - * If the AICL_SUSP_BIT is on, then AICL reruns have - * already been disabled. Set the very weak charger - * flag so that the driver reports a bad charger - * and does not reenable AICL reruns. - */ - chip->very_weak_charger = true; - bad_charger = true; - } - if (bad_charger) { - pr_smb(PR_MISC, - "setting usb psy health UNSPEC_FAILURE\n"); - chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; - power_supply_changed(chip->usb_psy); - schedule_work(&chip->usb_set_online_work); - } - } -} - -static int wait_for_usbin_uv(struct smbchg_chip *chip, bool high) -{ - int rc; - int tries = 3; - struct completion *completion = &chip->usbin_uv_lowered; - bool usbin_uv; - - if (high) - completion = &chip->usbin_uv_raised; - - while (tries--) { - rc = wait_for_completion_interruptible_timeout( - completion, - msecs_to_jiffies(APSD_TIMEOUT_MS)); - if (rc >= 0) - break; - } - - usbin_uv = is_usbin_uv_high(chip); - - if (high == usbin_uv) - return 0; - - pr_err("usbin uv didnt go to a %s state, still at %s, tries = %d, rc = %d\n", - high ? "risen" : "lowered", - usbin_uv ? "high" : "low", - tries, rc); - return -EINVAL; -} - -static int wait_for_src_detect(struct smbchg_chip *chip, bool high) -{ - int rc; - int tries = 3; - struct completion *completion = &chip->src_det_lowered; - bool src_detect; - - if (high) - completion = &chip->src_det_raised; - - while (tries--) { - rc = wait_for_completion_interruptible_timeout( - completion, - msecs_to_jiffies(APSD_TIMEOUT_MS)); - if (rc >= 0) - break; - } - - src_detect = is_src_detect_high(chip); - - if (high == src_detect) - return 0; - - pr_err("src detect didnt go to a %s state, still at %s, tries = %d, rc = %d\n", - high ? "risen" : "lowered", - src_detect ? "high" : "low", - tries, rc); - return -EINVAL; -} - -static int fake_insertion_removal(struct smbchg_chip *chip, bool insertion) -{ - int rc; - bool src_detect; - bool usbin_uv; - - if (insertion) { - reinit_completion(&chip->src_det_raised); - reinit_completion(&chip->usbin_uv_lowered); - } else { - reinit_completion(&chip->src_det_lowered); - reinit_completion(&chip->usbin_uv_raised); - } - - /* ensure that usbin uv real time status is in the right state */ - usbin_uv = is_usbin_uv_high(chip); - if (usbin_uv != insertion) { - pr_err("Skip faking, usbin uv is already %d\n", usbin_uv); - return -EINVAL; - } - - /* ensure that src_detect real time status is in the right state */ - src_detect = is_src_detect_high(chip); - if (src_detect == insertion) { - pr_err("Skip faking, src detect is already %d\n", src_detect); - return -EINVAL; - } - - pr_smb(PR_MISC, "Allow only %s charger\n", - insertion ? "5-9V" : "9V only"); - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + USBIN_CHGR_CFG, - ADAPTER_ALLOWANCE_MASK, - insertion ? - USBIN_ADAPTER_5V_9V_CONT : USBIN_ADAPTER_9V); - if (rc < 0) { - pr_err("Couldn't write usb allowance rc=%d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Waiting on %s usbin uv\n", - insertion ? "falling" : "rising"); - rc = wait_for_usbin_uv(chip, !insertion); - if (rc < 0) { - pr_err("wait for usbin uv failed rc = %d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Waiting on %s src det\n", - insertion ? "rising" : "falling"); - rc = wait_for_src_detect(chip, insertion); - if (rc < 0) { - pr_err("wait for src detect failed rc = %d\n", rc); - return rc; - } - - return 0; -} - -static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip) -{ - int rc = 0; - u8 reg; - - /* switch to 5V HVDCP */ - pr_smb(PR_MISC, "Switch to 5V HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_5V); - if (rc < 0) { - pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc); - goto out; - } - - /* wait for HVDCP to lower to 5V */ - msleep(500); - /* - * Check if the same hvdcp session is in progress. src_det should be - * high and that we are still in 5V hvdcp - */ - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "src det low after 500mS sleep\n"); - goto out; - } - - /* disable HVDCP */ - pr_smb(PR_MISC, "Disable HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, 0); - if (rc < 0) { - pr_err("Couldn't disable HVDCP rc=%d\n", rc); - goto out; - } - - pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300); - if (rc < 0) { - pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc); - goto out; - } - - pr_smb(PR_MISC, "Disable AICL\n"); - smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, 0); - - chip->hvdcp_3_det_ignore_uv = true; - /* fake a removal */ - pr_smb(PR_MISC, "Faking Removal\n"); - rc = fake_insertion_removal(chip, false); - if (rc < 0) { - pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc); - goto handle_removal; - } - - /* disable APSD */ - pr_smb(PR_MISC, "Disabling APSD\n"); - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + APSD_CFG, - AUTO_SRC_DETECT_EN_BIT, 0); - if (rc < 0) { - pr_err("Couldn't disable APSD rc=%d\n", rc); - goto out; - } - - /* fake an insertion */ - pr_smb(PR_MISC, "Faking Insertion\n"); - rc = fake_insertion_removal(chip, true); - if (rc < 0) { - pr_err("Couldn't fake insertion rc=%d\n", rc); - goto handle_removal; - } - chip->hvdcp_3_det_ignore_uv = false; - - pr_smb(PR_MISC, "Enable AICL\n"); - smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, AICL_EN_BIT); - - set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DMF); - /* - * DCP will switch to HVDCP in this time by removing the short - * between DP DM - */ - msleep(HVDCP_NOTIFY_MS); - /* - * Check if the same hvdcp session is in progress. src_det should be - * high and the usb type should be none since APSD was disabled - */ - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "src det low after 2s sleep\n"); - rc = -EINVAL; - goto out; - } - - smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1); - if ((reg >> TYPE_BITS_OFFSET) != 0) { - pr_smb(PR_MISC, "type bits set after 2s sleep - abort\n"); - rc = -EINVAL; - goto out; - } - - set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DM3P3); - /* Wait 60mS after entering continuous mode */ - msleep(60); - - return 0; -out: - chip->hvdcp_3_det_ignore_uv = false; - restore_from_hvdcp_detection(chip); - return rc; -handle_removal: - chip->hvdcp_3_det_ignore_uv = false; - update_usb_status(chip, 0, 0); - return rc; -} - -static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip) -{ - int rc = 0; - - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - rc = regulator_enable(chip->dpdm_reg); - if (rc < 0) { - pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); - return rc; - } - - /* switch to 9V HVDCP */ - pr_smb(PR_MISC, "Switch to 9V HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_9V); - if (rc < 0) { - pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc); - return rc; - } - - /* enable HVDCP */ - pr_smb(PR_MISC, "Enable HVDCP\n"); - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, HVDCP_EN_BIT); - if (rc < 0) { - pr_err("Couldn't enable HVDCP rc=%d\n", rc); - return rc; - } - - /* enable APSD */ - pr_smb(PR_MISC, "Enabling APSD\n"); - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + APSD_CFG, - AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT); - if (rc < 0) { - pr_err("Couldn't enable APSD rc=%d\n", rc); - return rc; - } - - /* Disable AICL */ - pr_smb(PR_MISC, "Disable AICL\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, 0); - if (rc < 0) { - pr_err("Couldn't disable AICL rc=%d\n", rc); - return rc; - } - - /* fake a removal */ - chip->hvdcp_3_det_ignore_uv = true; - pr_smb(PR_MISC, "Faking Removal\n"); - rc = fake_insertion_removal(chip, false); - if (rc < 0) { - pr_err("Couldn't fake removal rc=%d\n", rc); - goto out; - } - - /* - * reset the enabled once flag for parallel charging so - * parallel charging can immediately restart after the HVDCP pulsing - * is complete - */ - chip->parallel.enabled_once = false; - - /* fake an insertion */ - pr_smb(PR_MISC, "Faking Insertion\n"); - rc = fake_insertion_removal(chip, true); - if (rc < 0) { - pr_err("Couldn't fake insertion rc=%d\n", rc); - goto out; - } - chip->hvdcp_3_det_ignore_uv = false; - - /* Enable AICL */ - pr_smb(PR_MISC, "Enable AICL\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, 0); - if (rc < 0) { - pr_err("Couldn't enable AICL rc=%d\n", rc); - return rc; - } - -out: - /* - * There are many QC 2.0 chargers that collapse before the aicl deglitch - * timer can mitigate. Hence set the aicl deglitch time to a shorter - * period. - */ - - rc = vote(chip->aicl_deglitch_short_votable, - HVDCP_SHORT_DEGLITCH_VOTER, true, 0); - if (rc < 0) - pr_err("Couldn't reduce aicl deglitch rc=%d\n", rc); - - pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); - - chip->hvdcp_3_det_ignore_uv = false; - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "HVDCP removed\n"); - update_usb_status(chip, 0, 0); - } - return rc; -} - -#define USB_CMD_APSD 0x41 -#define APSD_RERUN BIT(0) -static int rerun_apsd(struct smbchg_chip *chip) -{ - int rc; - - reinit_completion(&chip->src_det_raised); - reinit_completion(&chip->usbin_uv_lowered); - reinit_completion(&chip->src_det_lowered); - reinit_completion(&chip->usbin_uv_raised); - - /* re-run APSD */ - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + USB_CMD_APSD, - APSD_RERUN, APSD_RERUN); - if (rc) { - pr_err("Couldn't re-run APSD rc=%d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Waiting on rising usbin uv\n"); - rc = wait_for_usbin_uv(chip, true); - if (rc < 0) { - pr_err("wait for usbin uv failed rc = %d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Waiting on falling src det\n"); - rc = wait_for_src_detect(chip, false); - if (rc < 0) { - pr_err("wait for src detect failed rc = %d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Waiting on falling usbin uv\n"); - rc = wait_for_usbin_uv(chip, false); - if (rc < 0) { - pr_err("wait for usbin uv failed rc = %d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Waiting on rising src det\n"); - rc = wait_for_src_detect(chip, true); - if (rc < 0) { - pr_err("wait for src detect failed rc = %d\n", rc); - return rc; - } - - return rc; -} - -#define SCHG_LITE_USBIN_HVDCP_5_9V 0x8 -#define SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK 0x38 -#define SCHG_LITE_USBIN_HVDCP_SEL_IDLE BIT(3) -static bool is_hvdcp_5v_cont_mode(struct smbchg_chip *chip) -{ - int rc; - u8 reg = 0; - - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + USBIN_HVDCP_STS, 1); - if (rc) { - pr_err("Unable to read HVDCP status rc=%d\n", rc); - return false; - } - - pr_smb(PR_STATUS, "HVDCP status = %x\n", reg); - - if (reg & SCHG_LITE_USBIN_HVDCP_SEL_IDLE) { - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + INPUT_STS, 1); - if (rc) { - pr_err("Unable to read INPUT status rc=%d\n", rc); - return false; - } - pr_smb(PR_STATUS, "INPUT status = %x\n", reg); - if ((reg & SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK) == - SCHG_LITE_USBIN_HVDCP_5_9V) - return true; - } - return false; -} - -static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip) -{ - int rc = 0; - - /* check if HVDCP is already in 5V continuous mode */ - if (is_hvdcp_5v_cont_mode(chip)) { - pr_smb(PR_MISC, "HVDCP by default is in 5V continuous mode\n"); - return 0; - } - - /* switch to 5V HVDCP */ - pr_smb(PR_MISC, "Switch to 5V HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_5V); - if (rc < 0) { - pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc); - goto out; - } - - /* wait for HVDCP to lower to 5V */ - msleep(500); - /* - * Check if the same hvdcp session is in progress. src_det should be - * high and that we are still in 5V hvdcp - */ - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "src det low after 500mS sleep\n"); - goto out; - } - - pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300); - if (rc < 0) { - pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc); - goto out; - } - - pr_smb(PR_MISC, "Disable AICL\n"); - smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, 0); - - chip->hvdcp_3_det_ignore_uv = true; - - /* re-run APSD */ - rc = rerun_apsd(chip); - if (rc) { - pr_err("APSD rerun failed\n"); - goto out; - } - - chip->hvdcp_3_det_ignore_uv = false; - - pr_smb(PR_MISC, "Enable AICL\n"); - smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, AICL_EN_BIT); - /* - * DCP will switch to HVDCP in this time by removing the short - * between DP DM - */ - msleep(HVDCP_NOTIFY_MS); - /* - * Check if the same hvdcp session is in progress. src_det should be - * high and the usb type should be none since APSD was disabled - */ - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "src det low after 2s sleep\n"); - rc = -EINVAL; - goto out; - } - - /* We are set if HVDCP in 5V continuous mode */ - if (!is_hvdcp_5v_cont_mode(chip)) { - pr_err("HVDCP could not be set in 5V continuous mode\n"); - goto out; - } - - return 0; -out: - chip->hvdcp_3_det_ignore_uv = false; - restore_from_hvdcp_detection(chip); - return rc; -} - -static int smbchg_unprepare_for_pulsing_lite(struct smbchg_chip *chip) -{ - int rc = 0; - - pr_smb(PR_MISC, "Forcing 9V HVDCP 2.0\n"); - rc = force_9v_hvdcp(chip); - if (rc) { - pr_err("Failed to force 9V HVDCP=%d\n", rc); - return rc; - } - - pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); - - return rc; -} - -#define CMD_HVDCP_2 0x43 -#define SINGLE_INCREMENT BIT(0) -#define SINGLE_DECREMENT BIT(1) -static int smbchg_dp_pulse_lite(struct smbchg_chip *chip) -{ - int rc = 0; - - pr_smb(PR_MISC, "Increment DP\n"); - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2, - SINGLE_INCREMENT, SINGLE_INCREMENT); - if (rc) - pr_err("Single-increment failed rc=%d\n", rc); - - return rc; -} - -static int smbchg_dm_pulse_lite(struct smbchg_chip *chip) -{ - int rc = 0; - - pr_smb(PR_MISC, "Decrement DM\n"); - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2, - SINGLE_DECREMENT, SINGLE_DECREMENT); - if (rc) - pr_err("Single-decrement failed rc=%d\n", rc); - - return rc; -} - -static int smbchg_hvdcp3_confirmed(struct smbchg_chip *chip) -{ - int rc = 0; - - /* - * reset the enabled once flag for parallel charging because this is - * effectively a new insertion. - */ - chip->parallel.enabled_once = false; - - pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); - - smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_USB_HVDCP_3); - - return rc; -} - -static int smbchg_dp_dm(struct smbchg_chip *chip, int val) -{ - int rc = 0; - int target_icl_vote_ma; - - switch (val) { - case POWER_SUPPLY_DP_DM_PREPARE: - if (!is_hvdcp_present(chip)) { - pr_err("No pulsing unless HVDCP\n"); - return -ENODEV; - } - if (chip->schg_version == QPNP_SCHG_LITE) - rc = smbchg_prepare_for_pulsing_lite(chip); - else - rc = smbchg_prepare_for_pulsing(chip); - break; - case POWER_SUPPLY_DP_DM_UNPREPARE: - if (chip->schg_version == QPNP_SCHG_LITE) - rc = smbchg_unprepare_for_pulsing_lite(chip); - else - rc = smbchg_unprepare_for_pulsing(chip); - break; - case POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3: - rc = smbchg_hvdcp3_confirmed(chip); - break; - case POWER_SUPPLY_DP_DM_DP_PULSE: - if (chip->schg_version == QPNP_SCHG) - rc = set_usb_psy_dp_dm(chip, - POWER_SUPPLY_DP_DM_DP_PULSE); - else - rc = smbchg_dp_pulse_lite(chip); - if (!rc) - chip->pulse_cnt++; - pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt); - break; - case POWER_SUPPLY_DP_DM_DM_PULSE: - if (chip->schg_version == QPNP_SCHG) - rc = set_usb_psy_dp_dm(chip, - POWER_SUPPLY_DP_DM_DM_PULSE); - else - rc = smbchg_dm_pulse_lite(chip); - if (!rc && chip->pulse_cnt) - chip->pulse_cnt--; - pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt); - break; - case POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED: - chip->hvdcp3_supported = true; - pr_smb(PR_MISC, "HVDCP3 supported\n"); - break; - case POWER_SUPPLY_DP_DM_ICL_DOWN: - chip->usb_icl_delta -= 100; - target_icl_vote_ma = get_client_vote(chip->usb_icl_votable, - PSY_ICL_VOTER); - vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true, - target_icl_vote_ma + chip->usb_icl_delta); - break; - case POWER_SUPPLY_DP_DM_ICL_UP: - chip->usb_icl_delta += 100; - target_icl_vote_ma = get_client_vote(chip->usb_icl_votable, - PSY_ICL_VOTER); - vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true, - target_icl_vote_ma + chip->usb_icl_delta); - break; - default: - break; - } - - return rc; -} - -static void update_typec_capability_status(struct smbchg_chip *chip, - const union power_supply_propval *val) -{ - pr_smb(PR_TYPEC, "typec capability = %dma\n", val->intval); - - pr_debug("changing ICL from %dma to %dma\n", chip->typec_current_ma, - val->intval); - chip->typec_current_ma = val->intval; - smbchg_change_usb_supply_type(chip, chip->usb_supply_type); -} - -static void update_typec_otg_status(struct smbchg_chip *chip, int mode, - bool force) -{ - union power_supply_propval pval = {0, }; - pr_smb(PR_TYPEC, "typec mode = %d\n", mode); - - if (mode == POWER_SUPPLY_TYPE_DFP) { - chip->typec_dfp = true; - pval.intval = 1; - extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST, - chip->typec_dfp); - /* update FG */ - set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS, - get_prop_batt_status(chip)); - } else if (force || chip->typec_dfp) { - chip->typec_dfp = false; - pval.intval = 0; - extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST, - chip->typec_dfp); - /* update FG */ - set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS, - get_prop_batt_status(chip)); - } -} - -static int smbchg_usb_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - - switch (psp) { - case POWER_SUPPLY_PROP_CURRENT_MAX: - val->intval = chip->usb_current_max; - break; - case POWER_SUPPLY_PROP_PRESENT: - val->intval = chip->usb_present; - break; - case POWER_SUPPLY_PROP_ONLINE: - val->intval = chip->usb_online; - break; - case POWER_SUPPLY_PROP_TYPE: - val->intval = chip->usb_supply_type; - break; - case POWER_SUPPLY_PROP_HEALTH: - val->intval = chip->usb_health; - break; - default: - return -EINVAL; - } - return 0; -} - -static int smbchg_usb_set_property(struct power_supply *psy, - enum power_supply_property psp, - const union power_supply_propval *val) -{ - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - - switch (psp) { - case POWER_SUPPLY_PROP_CURRENT_MAX: - chip->usb_current_max = val->intval; - break; - case POWER_SUPPLY_PROP_ONLINE: - chip->usb_online = val->intval; - break; - default: - return -EINVAL; - } - - power_supply_changed(psy); - return 0; -} - -static int -smbchg_usb_is_writeable(struct power_supply *psy, enum power_supply_property psp) -{ - switch (psp) { - case POWER_SUPPLY_PROP_CURRENT_MAX: - return 1; - default: - break; - } - - return 0; -} - - -static char *smbchg_usb_supplicants[] = { - "battery", - "bms", -}; - -static enum power_supply_property smbchg_usb_properties[] = { - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_CURRENT_MAX, - POWER_SUPPLY_PROP_TYPE, - POWER_SUPPLY_PROP_HEALTH, -}; - -#define CHARGE_OUTPUT_VTG_RATIO 840 -static int smbchg_get_iusb(struct smbchg_chip *chip) -{ - int rc, iusb_ua = -EINVAL; - struct qpnp_vadc_result adc_result; - - if (!is_usb_present(chip) && !is_dc_present(chip)) - return 0; - - if (chip->vchg_vadc_dev && chip->vchg_adc_channel != -EINVAL) { - rc = qpnp_vadc_read(chip->vchg_vadc_dev, - chip->vchg_adc_channel, &adc_result); - if (rc) { - pr_smb(PR_STATUS, - "error in VCHG (channel-%d) read rc = %d\n", - chip->vchg_adc_channel, rc); - return 0; - } - iusb_ua = div_s64(adc_result.physical * 1000, - CHARGE_OUTPUT_VTG_RATIO); - } - - return iusb_ua; -} - -static enum power_supply_property smbchg_battery_properties[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED, - POWER_SUPPLY_PROP_CHARGING_ENABLED, - POWER_SUPPLY_PROP_CHARGE_TYPE, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_TECHNOLOGY, - POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL, - POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, - POWER_SUPPLY_PROP_VOLTAGE_MAX, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE, - POWER_SUPPLY_PROP_INPUT_CURRENT_MAX, - POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, - POWER_SUPPLY_PROP_INPUT_CURRENT_NOW, - POWER_SUPPLY_PROP_FLASH_ACTIVE, - POWER_SUPPLY_PROP_FLASH_TRIGGER, - POWER_SUPPLY_PROP_DP_DM, - POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED, - POWER_SUPPLY_PROP_RERUN_AICL, - POWER_SUPPLY_PROP_RESTRICTED_CHARGING, -}; - -static int smbchg_battery_set_property(struct power_supply *psy, - enum power_supply_property prop, - const union power_supply_propval *val) -{ - int rc = 0; - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - - switch (prop) { - case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED: - vote(chip->battchg_suspend_votable, BATTCHG_USER_EN_VOTER, - !val->intval, 0); - break; - case POWER_SUPPLY_PROP_CHARGING_ENABLED: - rc = vote(chip->usb_suspend_votable, USER_EN_VOTER, - !val->intval, 0); - rc = vote(chip->dc_suspend_votable, USER_EN_VOTER, - !val->intval, 0); - chip->chg_enabled = val->intval; - schedule_work(&chip->usb_set_online_work); - break; - case POWER_SUPPLY_PROP_CAPACITY: - chip->fake_battery_soc = val->intval; - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - break; - case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: - smbchg_system_temp_level_set(chip, val->intval); - break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: - rc = smbchg_set_fastchg_current_user(chip, val->intval / 1000); - break; - case POWER_SUPPLY_PROP_VOLTAGE_MAX: - rc = smbchg_float_voltage_set(chip, val->intval); - break; - case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE: - rc = smbchg_safety_timer_enable(chip, val->intval); - break; - case POWER_SUPPLY_PROP_FLASH_ACTIVE: - rc = smbchg_switch_buck_frequency(chip, val->intval); - if (rc) { - pr_err("Couldn't switch buck frequency, rc=%d\n", rc); - /* - * Trigger a panic if there is an error while switching - * buck frequency. This will prevent LS FET damage. - */ - BUG_ON(1); - } - - rc = smbchg_otg_pulse_skip_disable(chip, - REASON_FLASH_ENABLED, val->intval); - break; - case POWER_SUPPLY_PROP_FLASH_TRIGGER: - chip->flash_triggered = !!val->intval; - smbchg_icl_loop_disable_check(chip); - break; - case POWER_SUPPLY_PROP_FORCE_TLIM: - rc = smbchg_force_tlim_en(chip, val->intval); - break; - case POWER_SUPPLY_PROP_DP_DM: - rc = smbchg_dp_dm(chip, val->intval); - break; - case POWER_SUPPLY_PROP_RERUN_AICL: - smbchg_rerun_aicl(chip); - break; - case POWER_SUPPLY_PROP_RESTRICTED_CHARGING: - rc = smbchg_restricted_charging(chip, val->intval); - break; - case POWER_SUPPLY_PROP_CURRENT_CAPABILITY: - if (chip->typec_psy) - update_typec_capability_status(chip, val); - break; - case POWER_SUPPLY_PROP_TYPEC_MODE: - if (chip->typec_psy) - update_typec_otg_status(chip, val->intval, false); - break; - default: - return -EINVAL; - } - - return rc; -} - -static int smbchg_battery_is_writeable(struct power_supply *psy, - enum power_supply_property prop) -{ - int rc; - - switch (prop) { - case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED: - case POWER_SUPPLY_PROP_CHARGING_ENABLED: - case POWER_SUPPLY_PROP_CAPACITY: - case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: - case POWER_SUPPLY_PROP_VOLTAGE_MAX: - case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE: - case POWER_SUPPLY_PROP_DP_DM: - case POWER_SUPPLY_PROP_RERUN_AICL: - case POWER_SUPPLY_PROP_RESTRICTED_CHARGING: - rc = 1; - break; - default: - rc = 0; - break; - } - return rc; -} - -static int smbchg_battery_get_property(struct power_supply *psy, - enum power_supply_property prop, - union power_supply_propval *val) -{ - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - - switch (prop) { - case POWER_SUPPLY_PROP_STATUS: - val->intval = get_prop_batt_status(chip); - break; - case POWER_SUPPLY_PROP_PRESENT: - val->intval = get_prop_batt_present(chip); - break; - case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED: - val->intval - = get_effective_result(chip->battchg_suspend_votable); - if (val->intval < 0) /* no votes */ - val->intval = 1; - else - val->intval = !val->intval; - break; - case POWER_SUPPLY_PROP_CHARGING_ENABLED: - val->intval = chip->chg_enabled; - break; - case POWER_SUPPLY_PROP_CHARGE_TYPE: - val->intval = get_prop_charge_type(chip); - break; - case POWER_SUPPLY_PROP_VOLTAGE_MAX: - val->intval = smbchg_float_voltage_get(chip); - break; - case POWER_SUPPLY_PROP_HEALTH: - val->intval = get_prop_batt_health(chip); - break; - case POWER_SUPPLY_PROP_TECHNOLOGY: - val->intval = POWER_SUPPLY_TECHNOLOGY_LION; - break; - case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX: - val->intval = smbchg_calc_max_flash_current(chip); - break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: - val->intval = chip->fastchg_current_ma * 1000; - break; - case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: - val->intval = chip->therm_lvl_sel; - break; - case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX: - val->intval = smbchg_get_aicl_level_ma(chip) * 1000; - break; - case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED: - val->intval = (int)chip->aicl_complete; - break; - case POWER_SUPPLY_PROP_RESTRICTED_CHARGING: - val->intval = (int)chip->restricted_charging; - break; - /* properties from fg */ - case POWER_SUPPLY_PROP_CAPACITY: - val->intval = get_prop_batt_capacity(chip); - break; - case POWER_SUPPLY_PROP_CURRENT_NOW: - val->intval = get_prop_batt_current_now(chip); - break; - case POWER_SUPPLY_PROP_VOLTAGE_NOW: - val->intval = get_prop_batt_voltage_now(chip); - break; - case POWER_SUPPLY_PROP_TEMP: - val->intval = get_prop_batt_temp(chip); - break; - case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: - val->intval = get_prop_batt_voltage_max_design(chip); - break; - case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE: - val->intval = chip->safety_timer_en; - break; - case POWER_SUPPLY_PROP_FLASH_ACTIVE: - val->intval = chip->otg_pulse_skip_dis; - break; - case POWER_SUPPLY_PROP_FLASH_TRIGGER: - val->intval = chip->flash_triggered; - break; - case POWER_SUPPLY_PROP_DP_DM: - val->intval = chip->pulse_cnt; - break; - case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED: - val->intval = smbchg_is_input_current_limited(chip); - break; - case POWER_SUPPLY_PROP_RERUN_AICL: - val->intval = 0; - break; - case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW: - val->intval = smbchg_get_iusb(chip); - break; - default: - return -EINVAL; - } - return 0; -} - -static char *smbchg_dc_supplicants[] = { - "bms", -}; - -static enum power_supply_property smbchg_dc_properties[] = { - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_CHARGING_ENABLED, - POWER_SUPPLY_PROP_CURRENT_MAX, -}; - -static int smbchg_dc_set_property(struct power_supply *psy, - enum power_supply_property prop, - const union power_supply_propval *val) -{ - int rc = 0; - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - - switch (prop) { - case POWER_SUPPLY_PROP_CHARGING_ENABLED: - rc = vote(chip->dc_suspend_votable, POWER_SUPPLY_EN_VOTER, - !val->intval, 0); - break; - case POWER_SUPPLY_PROP_CURRENT_MAX: - rc = vote(chip->dc_icl_votable, USER_ICL_VOTER, true, - val->intval / 1000); - break; - default: - return -EINVAL; - } - - return rc; -} - -static int smbchg_dc_get_property(struct power_supply *psy, - enum power_supply_property prop, - union power_supply_propval *val) -{ - struct smbchg_chip *chip = power_supply_get_drvdata(psy); - - switch (prop) { - case POWER_SUPPLY_PROP_PRESENT: - val->intval = is_dc_present(chip); - break; - case POWER_SUPPLY_PROP_CHARGING_ENABLED: - val->intval = get_effective_result(chip->dc_suspend_votable); - if (val->intval < 0) /* no votes */ - val->intval = 1; - else - val->intval = !val->intval; - break; - case POWER_SUPPLY_PROP_ONLINE: - /* return if dc is charging the battery */ - val->intval = (smbchg_get_pwr_path(chip) == PWR_PATH_DC) - && (get_prop_batt_status(chip) - == POWER_SUPPLY_STATUS_CHARGING); - break; - case POWER_SUPPLY_PROP_CURRENT_MAX: - val->intval = chip->dc_max_current_ma * 1000; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int smbchg_dc_is_writeable(struct power_supply *psy, - enum power_supply_property prop) -{ - int rc; - - switch (prop) { - case POWER_SUPPLY_PROP_CHARGING_ENABLED: - case POWER_SUPPLY_PROP_CURRENT_MAX: - rc = 1; - break; - default: - rc = 0; - break; - } - return rc; -} - -#define HOT_BAT_HARD_BIT BIT(0) -#define HOT_BAT_SOFT_BIT BIT(1) -#define COLD_BAT_HARD_BIT BIT(2) -#define COLD_BAT_SOFT_BIT BIT(3) -#define BAT_OV_BIT BIT(4) -#define BAT_LOW_BIT BIT(5) -#define BAT_MISSING_BIT BIT(6) -#define BAT_TERM_MISSING_BIT BIT(7) -static irqreturn_t batt_hot_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1); - chip->batt_hot = !!(reg & HOT_BAT_HARD_BIT); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - smbchg_wipower_check(chip); - set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH, - get_prop_batt_health(chip)); - return IRQ_HANDLED; -} - -static irqreturn_t batt_cold_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1); - chip->batt_cold = !!(reg & COLD_BAT_HARD_BIT); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - smbchg_wipower_check(chip); - set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH, - get_prop_batt_health(chip)); - return IRQ_HANDLED; -} - -static irqreturn_t batt_warm_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1); - chip->batt_warm = !!(reg & HOT_BAT_SOFT_BIT); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH, - get_prop_batt_health(chip)); - return IRQ_HANDLED; -} - -static irqreturn_t batt_cool_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1); - chip->batt_cool = !!(reg & COLD_BAT_SOFT_BIT); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH, - get_prop_batt_health(chip)); - return IRQ_HANDLED; -} - -static irqreturn_t batt_pres_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1); - chip->batt_present = !(reg & BAT_MISSING_BIT); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH, - get_prop_batt_health(chip)); - return IRQ_HANDLED; -} - -static irqreturn_t vbat_low_handler(int irq, void *_chip) -{ - pr_warn_ratelimited("vbat low\n"); - return IRQ_HANDLED; -} - -#define CHG_COMP_SFT_BIT BIT(3) -static irqreturn_t chg_error_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - int rc = 0; - u8 reg; - - pr_smb(PR_INTERRUPT, "chg-error triggered\n"); - - rc = smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc); - } else { - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - if (reg & CHG_COMP_SFT_BIT) - set_property_on_fg(chip, - POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED, - 1); - } - - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - smbchg_wipower_check(chip); - return IRQ_HANDLED; -} - -static irqreturn_t fastchg_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - - pr_smb(PR_INTERRUPT, "p2f triggered\n"); - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - smbchg_wipower_check(chip); - return IRQ_HANDLED; -} - -static irqreturn_t chg_hot_handler(int irq, void *_chip) -{ - pr_warn_ratelimited("chg hot\n"); - smbchg_wipower_check(_chip); - return IRQ_HANDLED; -} - -static irqreturn_t chg_term_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - - pr_smb(PR_INTERRUPT, "tcc triggered\n"); - /* - * Charge termination is a pulse and not level triggered. That means, - * TCC bit in RT_STS can get cleared by the time this interrupt is - * handled. Instead of relying on that to determine whether the - * charge termination had happened, we've to simply notify the FG - * about this as long as the interrupt is handled. - */ - set_property_on_fg(chip, POWER_SUPPLY_PROP_CHARGE_DONE, 1); - - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - - return IRQ_HANDLED; -} - -static irqreturn_t taper_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - taper_irq_en(chip, false); - smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - smbchg_parallel_usb_taper(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - smbchg_wipower_check(chip); - return IRQ_HANDLED; -} - -static irqreturn_t recharge_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - smbchg_parallel_usb_check_ok(chip); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - return IRQ_HANDLED; -} - -static irqreturn_t wdog_timeout_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->misc_base + RT_STS, 1); - pr_warn_ratelimited("wdog timeout rt_stat = 0x%02x\n", reg); - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); - smbchg_charging_status_change(chip); - return IRQ_HANDLED; -} - -/** - * power_ok_handler() - called when the switcher turns on or turns off - * @chip: pointer to smbchg_chip - * @rt_stat: the status bit indicating switcher turning on or off - */ -static irqreturn_t power_ok_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - u8 reg = 0; - - smbchg_read(chip, ®, chip->misc_base + RT_STS, 1); - pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg); - return IRQ_HANDLED; -} - -/** - * dcin_uv_handler() - called when the dc voltage crosses the uv threshold - * @chip: pointer to smbchg_chip - * @rt_stat: the status bit indicating whether dc voltage is uv - */ -#define DCIN_UNSUSPEND_DELAY_MS 1000 -static irqreturn_t dcin_uv_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - bool dc_present = is_dc_present(chip); - - pr_smb(PR_STATUS, "chip->dc_present = %d dc_present = %d\n", - chip->dc_present, dc_present); - - if (chip->dc_present != dc_present) { - /* dc changed */ - chip->dc_present = dc_present; - if (chip->dc_psy_type != -EINVAL && chip->batt_psy) - power_supply_changed(chip->dc_psy); - smbchg_charging_status_change(chip); - smbchg_aicl_deglitch_wa_check(chip); - chip->vbat_above_headroom = false; - } - - smbchg_wipower_check(chip); - return IRQ_HANDLED; -} - -/** - * usbin_ov_handler() - this is called when an overvoltage condition occurs - * @chip: pointer to smbchg_chip chip - */ -static irqreturn_t usbin_ov_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - int rc; - u8 reg; - bool usb_present; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc); - goto out; - } - - /* OV condition is detected. Notify it to USB psy */ - if (reg & USBIN_OV_BIT) { - chip->usb_ov_det = true; - pr_smb(PR_MISC, "setting usb psy health OV\n"); - chip->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; - power_supply_changed(chip->usb_psy); - } else { - chip->usb_ov_det = false; - /* If USB is present, then handle the USB insertion */ - usb_present = is_usb_present(chip); - if (usb_present) - update_usb_status(chip, usb_present, false); - } -out: - return IRQ_HANDLED; -} - -/** - * usbin_uv_handler() - this is called when USB charger is removed - * @chip: pointer to smbchg_chip chip - * @rt_stat: the status bit indicating chg insertion/removal - */ -#define ICL_MODE_MASK SMB_MASK(5, 4) -#define ICL_MODE_HIGH_CURRENT 0 -static irqreturn_t usbin_uv_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - int aicl_level = smbchg_get_aicl_level_ma(chip); - int rc; - u8 reg; - - rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1); - if (rc) { - pr_err("could not read rt sts: %d", rc); - goto out; - } - - pr_smb(PR_STATUS, - "%s chip->usb_present = %d rt_sts = 0x%02x hvdcp_3_det_ignore_uv = %d aicl = %d\n", - chip->hvdcp_3_det_ignore_uv ? "Ignoring":"", - chip->usb_present, reg, chip->hvdcp_3_det_ignore_uv, - aicl_level); - - /* - * set usb_psy's dp=f dm=f if this is a new insertion, i.e. it is - * not already src_detected and usbin_uv is seen falling - */ - if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) { - pr_smb(PR_MISC, "setting usb dp=f dm=f\n"); - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - rc = regulator_enable(chip->dpdm_reg); - if (rc < 0) { - pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); - return rc; - } - } - - if (reg & USBIN_UV_BIT) - complete_all(&chip->usbin_uv_raised); - else - complete_all(&chip->usbin_uv_lowered); - - if (chip->hvdcp_3_det_ignore_uv) - goto out; - - if ((reg & USBIN_UV_BIT) && (reg & USBIN_SRC_DET_BIT)) { - pr_smb(PR_STATUS, "Very weak charger detected\n"); - chip->very_weak_charger = true; - rc = smbchg_read(chip, ®, - chip->usb_chgpth_base + ICL_STS_2_REG, 1); - if (rc) { - dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", - rc); - goto out; - } - if ((reg & ICL_MODE_MASK) != ICL_MODE_HIGH_CURRENT) { - /* - * If AICL is not even enabled, this is either an - * SDP or a grossly out of spec charger. Do not - * draw any current from it. - */ - rc = vote(chip->usb_suspend_votable, - WEAK_CHARGER_EN_VOTER, true, 0); - if (rc < 0) - pr_err("could not disable charger: %d", rc); - } else if (aicl_level == chip->tables.usb_ilim_ma_table[0]) { - /* - * we are in a situation where the adapter is not able - * to supply even 300mA. Disable hw aicl reruns else it - * is only a matter of time when we get back here again - */ - rc = vote(chip->hw_aicl_rerun_disable_votable, - WEAK_CHARGER_HW_AICL_VOTER, true, 0); - if (rc < 0) - pr_err("Couldn't disable hw aicl rerun rc=%d\n", - rc); - } - pr_smb(PR_MISC, "setting usb psy health UNSPEC_FAILURE\n"); - chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; - power_supply_changed(chip->usb_psy); - schedule_work(&chip->usb_set_online_work); - } - - smbchg_wipower_check(chip); -out: - return IRQ_HANDLED; -} - -/** - * src_detect_handler() - this is called on rising edge when USB charger type - * is detected and on falling edge when USB voltage falls - * below the coarse detect voltage(1V), use it for - * handling USB charger insertion and removal. - * @chip: pointer to smbchg_chip - * @rt_stat: the status bit indicating chg insertion/removal - */ -static irqreturn_t src_detect_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - bool usb_present = is_usb_present(chip); - bool src_detect = is_src_detect_high(chip); - int rc; - - pr_smb(PR_STATUS, - "%s chip->usb_present = %d usb_present = %d src_detect = %d hvdcp_3_det_ignore_uv=%d\n", - chip->hvdcp_3_det_ignore_uv ? "Ignoring":"", - chip->usb_present, usb_present, src_detect, - chip->hvdcp_3_det_ignore_uv); - - if (src_detect) - complete_all(&chip->src_det_raised); - else - complete_all(&chip->src_det_lowered); - - if (chip->hvdcp_3_det_ignore_uv) - goto out; - - /* - * When VBAT is above the AICL threshold (4.25V) - 180mV (4.07V), - * an input collapse due to AICL will actually cause an USBIN_UV - * interrupt to fire as well. - * - * Handle USB insertions and removals in the source detect handler - * instead of the USBIN_UV handler since the latter is untrustworthy - * when the battery voltage is high. - */ - chip->very_weak_charger = false; - /* - * a src detect marks a new insertion or a real removal, - * vote for enable aicl hw reruns - */ - rc = vote(chip->hw_aicl_rerun_disable_votable, - WEAK_CHARGER_HW_AICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't enable hw aicl rerun rc=%d\n", rc); - - rc = vote(chip->usb_suspend_votable, WEAK_CHARGER_EN_VOTER, false, 0); - if (rc < 0) - pr_err("could not enable charger: %d\n", rc); - - if (src_detect) { - update_usb_status(chip, usb_present, 0); - } else { - update_usb_status(chip, 0, false); - chip->aicl_irq_count = 0; - } -out: - return IRQ_HANDLED; -} - -/** - * otg_oc_handler() - called when the usb otg goes over current - */ -#define NUM_OTG_RETRIES 5 -#define OTG_OC_RETRY_DELAY_US 50000 -static irqreturn_t otg_oc_handler(int irq, void *_chip) -{ - int rc; - struct smbchg_chip *chip = _chip; - s64 elapsed_us = ktime_us_delta(ktime_get(), chip->otg_enable_time); - - pr_smb(PR_INTERRUPT, "triggered\n"); - - if (chip->schg_version == QPNP_SCHG_LITE) { - pr_warn("OTG OC triggered - OTG disabled\n"); - return IRQ_HANDLED; - } - - if (elapsed_us > OTG_OC_RETRY_DELAY_US) - chip->otg_retries = 0; - - /* - * Due to a HW bug in the PMI8994 charger, the current inrush that - * occurs when connecting certain OTG devices can cause the OTG - * overcurrent protection to trip. - * - * The work around is to try reenabling the OTG when getting an - * overcurrent interrupt once. - */ - if (chip->otg_retries < NUM_OTG_RETRIES) { - chip->otg_retries += 1; - pr_smb(PR_STATUS, - "Retrying OTG enable. Try #%d, elapsed_us %lld\n", - chip->otg_retries, elapsed_us); - rc = otg_oc_reset(chip); - if (rc) - pr_err("Failed to reset OTG OC state rc=%d\n", rc); - chip->otg_enable_time = ktime_get(); - } - return IRQ_HANDLED; -} - -/** - * otg_fail_handler() - called when the usb otg fails - * (when vbat < OTG UVLO threshold) - */ -static irqreturn_t otg_fail_handler(int irq, void *_chip) -{ - pr_smb(PR_INTERRUPT, "triggered\n"); - return IRQ_HANDLED; -} - -/** - * aicl_done_handler() - called when the usb AICL algorithm is finished - * and a current is set. - */ -static irqreturn_t aicl_done_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - bool usb_present = is_usb_present(chip); - int aicl_level = smbchg_get_aicl_level_ma(chip); - - pr_smb(PR_INTERRUPT, "triggered, aicl: %d\n", aicl_level); - - increment_aicl_count(chip); - - if (usb_present) - smbchg_parallel_usb_check_ok(chip); - - if (chip->aicl_complete && chip->batt_psy) - power_supply_changed(chip->batt_psy); - - return IRQ_HANDLED; -} - -/** - * usbid_change_handler() - called when the usb RID changes. - * This is used mostly for detecting OTG - */ -static irqreturn_t usbid_change_handler(int irq, void *_chip) -{ - struct smbchg_chip *chip = _chip; - bool otg_present; - - pr_smb(PR_INTERRUPT, "triggered\n"); - - otg_present = is_otg_present(chip); - pr_smb(PR_MISC, "setting usb psy OTG = %d\n", - otg_present ? 1 : 0); - - extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST, otg_present); - - if (otg_present) - pr_smb(PR_STATUS, "OTG detected\n"); - - /* update FG */ - set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS, - get_prop_batt_status(chip)); - - return IRQ_HANDLED; -} - -static int determine_initial_status(struct smbchg_chip *chip) -{ - union power_supply_propval type = {0, }; - - /* - * It is okay to read the interrupt status here since - * interrupts aren't requested. reading interrupt status - * clears the interrupt so be careful to read interrupt - * status only in interrupt handling code - */ - - batt_pres_handler(0, chip); - batt_hot_handler(0, chip); - batt_warm_handler(0, chip); - batt_cool_handler(0, chip); - batt_cold_handler(0, chip); - if (chip->typec_psy) { - get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type); - update_typec_otg_status(chip, type.intval, true); - } else { - usbid_change_handler(0, chip); - } - src_detect_handler(0, chip); - - chip->usb_present = is_usb_present(chip); - chip->dc_present = is_dc_present(chip); - - if (chip->usb_present) { - int rc = 0; - pr_smb(PR_MISC, "setting usb dp=f dm=f\n"); - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - rc = regulator_enable(chip->dpdm_reg); - if (rc < 0) { - pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); - return rc; - } - handle_usb_insertion(chip); - } else { - handle_usb_removal(chip); - } - - return 0; -} - -static int prechg_time[] = { - 24, - 48, - 96, - 192, -}; -static int chg_time[] = { - 192, - 384, - 768, - 1536, -}; - -enum bpd_type { - BPD_TYPE_BAT_NONE, - BPD_TYPE_BAT_ID, - BPD_TYPE_BAT_THM, - BPD_TYPE_BAT_THM_BAT_ID, - BPD_TYPE_DEFAULT, -}; - -static const char * const bpd_label[] = { - [BPD_TYPE_BAT_NONE] = "bpd_none", - [BPD_TYPE_BAT_ID] = "bpd_id", - [BPD_TYPE_BAT_THM] = "bpd_thm", - [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id", -}; - -static inline int get_bpd(const char *name) -{ - int i = 0; - for (i = 0; i < ARRAY_SIZE(bpd_label); i++) { - if (strcmp(bpd_label[i], name) == 0) - return i; - } - return -EINVAL; -} - -#define REVISION1_REG 0x0 -#define DIG_MINOR 0 -#define DIG_MAJOR 1 -#define ANA_MINOR 2 -#define ANA_MAJOR 3 -#define CHGR_CFG1 0xFB -#define RECHG_THRESHOLD_SRC_BIT BIT(1) -#define TERM_I_SRC_BIT BIT(2) -#define TERM_SRC_FG BIT(2) -#define CHG_INHIB_CFG_REG 0xF7 -#define CHG_INHIBIT_50MV_VAL 0x00 -#define CHG_INHIBIT_100MV_VAL 0x01 -#define CHG_INHIBIT_200MV_VAL 0x02 -#define CHG_INHIBIT_300MV_VAL 0x03 -#define CHG_INHIBIT_MASK 0x03 -#define USE_REGISTER_FOR_CURRENT BIT(2) -#define CHGR_CFG2 0xFC -#define CHG_EN_SRC_BIT BIT(7) -#define CHG_EN_POLARITY_BIT BIT(6) -#define P2F_CHG_TRAN BIT(5) -#define CHG_BAT_OV_ECC BIT(4) -#define I_TERM_BIT BIT(3) -#define AUTO_RECHG_BIT BIT(2) -#define CHARGER_INHIBIT_BIT BIT(0) -#define USB51_COMMAND_POL BIT(2) -#define USB51AC_CTRL BIT(1) -#define TR_8OR32B 0xFE -#define BUCK_8_16_FREQ_BIT BIT(0) -#define BM_CFG 0xF3 -#define BATT_MISSING_ALGO_BIT BIT(2) -#define BMD_PIN_SRC_MASK SMB_MASK(1, 0) -#define PIN_SRC_SHIFT 0 -#define CHGR_CFG 0xFF -#define RCHG_LVL_BIT BIT(0) -#define VCHG_EN_BIT BIT(1) -#define VCHG_INPUT_CURRENT_BIT BIT(3) -#define CFG_AFVC 0xF6 -#define VFLOAT_COMP_ENABLE_MASK SMB_MASK(2, 0) -#define TR_RID_REG 0xFA -#define FG_INPUT_FET_DELAY_BIT BIT(3) -#define TRIM_OPTIONS_7_0 0xF6 -#define INPUT_MISSING_POLLER_EN_BIT BIT(3) -#define CHGR_CCMP_CFG 0xFA -#define JEITA_TEMP_HARD_LIMIT_BIT BIT(5) -#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4) -#define HVDCP_ADAPTER_SEL_9V_BIT BIT(4) -#define HVDCP_AUTH_ALG_EN_BIT BIT(6) -#define CMD_APSD 0x41 -#define APSD_RERUN_BIT BIT(0) -#define OTG_CFG 0xF1 -#define HICCUP_ENABLED_BIT BIT(6) -#define OTG_PIN_POLARITY_BIT BIT(4) -#define OTG_PIN_ACTIVE_LOW BIT(4) -#define OTG_EN_CTRL_MASK SMB_MASK(3, 2) -#define OTG_PIN_CTRL_RID_DIS 0x04 -#define OTG_CMD_CTRL_RID_EN 0x08 -#define AICL_ADC_BIT BIT(6) -static void batt_ov_wa_check(struct smbchg_chip *chip) -{ - int rc; - u8 reg; - - /* disable-'battery OV disables charging' feature */ - rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2, - CHG_BAT_OV_ECC, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc); - return; - } - - /* - * if battery OV is set: - * restart charging by disable/enable charging - */ - rc = smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't read Battery RT status rc = %d\n", rc); - return; - } - - if (reg & BAT_OV_BIT) { - rc = smbchg_charging_en(chip, false); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't disable charging: rc = %d\n", rc); - return; - } - - /* delay for charging-disable to take affect */ - msleep(200); - - rc = smbchg_charging_en(chip, true); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't enable charging: rc = %d\n", rc); - return; - } - } -} - -static int smbchg_hw_init(struct smbchg_chip *chip) -{ - int rc, i; - u8 reg, mask; - - rc = smbchg_read(chip, chip->revision, - chip->misc_base + REVISION1_REG, 4); - if (rc < 0) { - dev_err(chip->dev, "Couldn't read revision rc=%d\n", - rc); - return rc; - } - pr_smb(PR_STATUS, "Charger Revision DIG: %d.%d; ANA: %d.%d\n", - chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR], - chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]); - - /* Setup 9V HVDCP */ - if (!chip->hvdcp_not_supported) { - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_9V); - if (rc < 0) { - pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", - rc); - return rc; - } - } - - if (chip->aicl_rerun_period_s > 0) { - rc = smbchg_set_aicl_rerun_period_s(chip, - chip->aicl_rerun_period_s); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set AICL rerun timer rc=%d\n", - rc); - return rc; - } - } - - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_RID_REG, - FG_INPUT_FET_DELAY_BIT, FG_INPUT_FET_DELAY_BIT); - if (rc < 0) { - dev_err(chip->dev, "Couldn't disable fg input fet delay rc=%d\n", - rc); - return rc; - } - - rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_OPTIONS_7_0, - INPUT_MISSING_POLLER_EN_BIT, - INPUT_MISSING_POLLER_EN_BIT); - if (rc < 0) { - dev_err(chip->dev, "Couldn't enable input missing poller rc=%d\n", - rc); - return rc; - } - - /* - * Do not force using current from the register i.e. use auto - * power source detect (APSD) mA ratings for the initial current values. - * - * If this is set, AICL will not rerun at 9V for HVDCPs - */ - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL, - USE_REGISTER_FOR_CURRENT, 0); - - if (rc < 0) { - dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc); - return rc; - } - - /* - * set chg en by cmd register, set chg en by writing bit 1, - * enable auto pre to fast, enable auto recharge by default. - * enable current termination and charge inhibition based on - * the device tree configuration. - */ - rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2, - CHG_EN_SRC_BIT | CHG_EN_POLARITY_BIT | P2F_CHG_TRAN - | I_TERM_BIT | AUTO_RECHG_BIT | CHARGER_INHIBIT_BIT, - CHG_EN_POLARITY_BIT - | (chip->chg_inhibit_en ? CHARGER_INHIBIT_BIT : 0) - | (chip->iterm_disabled ? I_TERM_BIT : 0)); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc); - return rc; - } - - /* - * enable battery charging to make sure it hasn't been changed earlier - * by the bootloader. - */ - rc = smbchg_charging_en(chip, true); - if (rc < 0) { - dev_err(chip->dev, "Couldn't enable battery charging=%d\n", rc); - return rc; - } - - /* - * Based on the configuration, use the analog sensors or the fuelgauge - * adc for recharge threshold source. - */ - - if (chip->chg_inhibit_source_fg) - rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1, - TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, - TERM_SRC_FG | RECHG_THRESHOLD_SRC_BIT); - else - rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1, - TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, 0); - - if (rc < 0) { - dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc); - return rc; - } - - /* - * control USB suspend via command bits and set correct 100/500mA - * polarity on the usb current - */ - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - USB51_COMMAND_POL | USB51AC_CTRL, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc); - return rc; - } - - check_battery_type(chip); - - /* set the float voltage */ - if (chip->vfloat_mv != -EINVAL) { - rc = smbchg_float_voltage_set(chip, chip->vfloat_mv); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set float voltage rc = %d\n", rc); - return rc; - } - pr_smb(PR_STATUS, "set vfloat to %d\n", chip->vfloat_mv); - } - - /* set the fast charge current compensation */ - if (chip->fastchg_current_comp != -EINVAL) { - rc = smbchg_fastchg_current_comp_set(chip, - chip->fastchg_current_comp); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n", - rc); - return rc; - } - pr_smb(PR_STATUS, "set fastchg current comp to %d\n", - chip->fastchg_current_comp); - } - - /* set the float voltage compensation */ - if (chip->float_voltage_comp != -EINVAL) { - rc = smbchg_float_voltage_comp_set(chip, - chip->float_voltage_comp); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n", - rc); - return rc; - } - pr_smb(PR_STATUS, "set float voltage comp to %d\n", - chip->float_voltage_comp); - } - - /* set iterm */ - if (chip->iterm_ma != -EINVAL) { - if (chip->iterm_disabled) { - dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n"); - return -EINVAL; - } else { - smbchg_iterm_set(chip, chip->iterm_ma); - } - } - - /* set the safety time voltage */ - if (chip->safety_time != -EINVAL) { - reg = (chip->safety_time > 0 ? 0 : SFT_TIMER_DISABLE_BIT) | - (chip->prechg_safety_time > 0 - ? 0 : PRECHG_SFT_TIMER_DISABLE_BIT); - - for (i = 0; i < ARRAY_SIZE(chg_time); i++) { - if (chip->safety_time <= chg_time[i]) { - reg |= i << SAFETY_TIME_MINUTES_SHIFT; - break; - } - } - for (i = 0; i < ARRAY_SIZE(prechg_time); i++) { - if (chip->prechg_safety_time <= prechg_time[i]) { - reg |= i; - break; - } - } - - rc = smbchg_sec_masked_write(chip, - chip->chgr_base + SFT_CFG, - SFT_EN_MASK | SFT_TO_MASK | - (chip->prechg_safety_time > 0 - ? PRECHG_SFT_TO_MASK : 0), reg); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set safety timer rc = %d\n", - rc); - return rc; - } - chip->safety_timer_en = true; - } else { - rc = smbchg_read(chip, ®, chip->chgr_base + SFT_CFG, 1); - if (rc < 0) - dev_err(chip->dev, "Unable to read SFT_CFG rc = %d\n", - rc); - else if (!(reg & SFT_EN_MASK)) - chip->safety_timer_en = true; - } - - /* configure jeita temperature hard limit */ - if (chip->jeita_temp_hard_limit >= 0) { - rc = smbchg_sec_masked_write(chip, - chip->chgr_base + CHGR_CCMP_CFG, - JEITA_TEMP_HARD_LIMIT_BIT, - chip->jeita_temp_hard_limit - ? 0 : JEITA_TEMP_HARD_LIMIT_BIT); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't set jeita temp hard limit rc = %d\n", - rc); - return rc; - } - } - - /* make the buck switch faster to prevent some vbus oscillation */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + TR_8OR32B, - BUCK_8_16_FREQ_BIT, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set buck frequency rc = %d\n", rc); - return rc; - } - - /* battery missing detection */ - mask = BATT_MISSING_ALGO_BIT; - reg = chip->bmd_algo_disabled ? 0 : BATT_MISSING_ALGO_BIT; - if (chip->bmd_pin_src < BPD_TYPE_DEFAULT) { - mask |= BMD_PIN_SRC_MASK; - reg |= chip->bmd_pin_src << PIN_SRC_SHIFT; - } - rc = smbchg_sec_masked_write(chip, - chip->bat_if_base + BM_CFG, mask, reg); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set batt_missing config = %d\n", - rc); - return rc; - } - - if (chip->vchg_adc_channel != -EINVAL) { - /* configure and enable VCHG */ - rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG, - VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT, - VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set recharge rc = %d\n", - rc); - return rc; - } - } - - smbchg_charging_status_change(chip); - - vote(chip->usb_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0); - vote(chip->dc_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0); - /* resume threshold */ - if (chip->resume_delta_mv != -EINVAL) { - - /* - * Configure only if the recharge threshold source is not - * fuel gauge ADC. - */ - if (!chip->chg_inhibit_source_fg) { - if (chip->resume_delta_mv < 100) - reg = CHG_INHIBIT_50MV_VAL; - else if (chip->resume_delta_mv < 200) - reg = CHG_INHIBIT_100MV_VAL; - else if (chip->resume_delta_mv < 300) - reg = CHG_INHIBIT_200MV_VAL; - else - reg = CHG_INHIBIT_300MV_VAL; - - rc = smbchg_sec_masked_write(chip, - chip->chgr_base + CHG_INHIB_CFG_REG, - CHG_INHIBIT_MASK, reg); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n", - rc); - return rc; - } - } - - rc = smbchg_sec_masked_write(chip, - chip->chgr_base + CHGR_CFG, - RCHG_LVL_BIT, - (chip->resume_delta_mv - < chip->tables.rchg_thr_mv) - ? 0 : RCHG_LVL_BIT); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set recharge rc = %d\n", - rc); - return rc; - } - } - - /* DC path current settings */ - if (chip->dc_psy_type != -EINVAL) { - rc = vote(chip->dc_icl_votable, PSY_ICL_VOTER, true, - chip->dc_target_current_ma); - if (rc < 0) { - dev_err(chip->dev, - "Couldn't vote for initial DC ICL rc=%d\n", rc); - return rc; - } - } - - - /* - * on some devices the battery is powered via external sources which - * could raise its voltage above the float voltage. smbchargers go - * in to reverse boost in such a situation and the workaround is to - * disable float voltage compensation (note that the battery will appear - * hot/cold when powered via external source). - */ - if (chip->soft_vfloat_comp_disabled) { - rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_AFVC, - VFLOAT_COMP_ENABLE_MASK, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n", - rc); - return rc; - } - } - - rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, - chip->cfg_fastchg_current_ma); - if (rc < 0) { - dev_err(chip->dev, "Couldn't vote fastchg ma rc = %d\n", rc); - return rc; - } - - rc = smbchg_read(chip, &chip->original_usbin_allowance, - chip->usb_chgpth_base + USBIN_CHGR_CFG, 1); - if (rc < 0) - dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc); - - if (chip->wipower_dyn_icl_avail) { - rc = smbchg_wipower_ilim_config(chip, - &(chip->wipower_default.entries[0])); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set default wipower ilim = %d\n", - rc); - return rc; - } - } - /* unsuspend dc path, it could be suspended by the bootloader */ - rc = smbchg_dc_suspend(chip, 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't unsuspend dc path= %d\n", rc); - return rc; - } - - if (chip->force_aicl_rerun) { - /* vote to enable hw aicl */ - rc = vote(chip->hw_aicl_rerun_enable_indirect_votable, - DEFAULT_CONFIG_HW_AICL_VOTER, true, 0); - if (rc < 0) { - pr_err("Couldn't vote enable hw aicl rerun rc=%d\n", - rc); - return rc; - } - } - - if (chip->schg_version == QPNP_SCHG_LITE) { - /* enable OTG hiccup mode */ - rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG, - HICCUP_ENABLED_BIT, HICCUP_ENABLED_BIT); - if (rc < 0) - dev_err(chip->dev, "Couldn't set OTG OC config rc = %d\n", - rc); - } - - if (chip->otg_pinctrl) { - /* configure OTG enable to pin control active low */ - rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG, - OTG_PIN_POLARITY_BIT | OTG_EN_CTRL_MASK, - OTG_PIN_ACTIVE_LOW | OTG_PIN_CTRL_RID_DIS); - if (rc < 0) { - dev_err(chip->dev, "Couldn't set OTG EN config rc = %d\n", - rc); - return rc; - } - } - - if (chip->wa_flags & SMBCHG_BATT_OV_WA) - batt_ov_wa_check(chip); - - /* turn off AICL adc for improved accuracy */ - rc = smbchg_sec_masked_write(chip, - chip->misc_base + MISC_TRIM_OPT_15_8, AICL_ADC_BIT, 0); - if (rc) - pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n", - rc); - - return rc; -} - -static struct of_device_id smbchg_match_table[] = { - { - .compatible = "qcom,qpnp-smbcharger", - }, - { }, -}; - -#define DC_MA_MIN 300 -#define DC_MA_MAX 2000 -#define OF_PROP_READ(chip, prop, dt_property, retval, optional) \ -do { \ - if (retval) \ - break; \ - if (optional) \ - prop = -EINVAL; \ - \ - retval = of_property_read_u32(chip->pdev->dev.of_node, \ - "qcom," dt_property , \ - &prop); \ - \ - if ((retval == -EINVAL) && optional) \ - retval = 0; \ - else if (retval) \ - dev_err(chip->dev, "Error reading " #dt_property \ - " property rc = %d\n", rc); \ -} while (0) - -#define ILIM_ENTRIES 3 -#define VOLTAGE_RANGE_ENTRIES 2 -#define RANGE_ENTRY (ILIM_ENTRIES + VOLTAGE_RANGE_ENTRIES) -static int smb_parse_wipower_map_dt(struct smbchg_chip *chip, - struct ilim_map *map, char *property) -{ - struct device_node *node = chip->dev->of_node; - int total_elements, size; - struct property *prop; - const __be32 *data; - int num, i; - - prop = of_find_property(node, property, &size); - if (!prop) { - dev_err(chip->dev, "%s missing\n", property); - return -EINVAL; - } - - total_elements = size / sizeof(int); - if (total_elements % RANGE_ENTRY) { - dev_err(chip->dev, "%s table not in multiple of %d, total elements = %d\n", - property, RANGE_ENTRY, total_elements); - return -EINVAL; - } - - data = prop->value; - num = total_elements / RANGE_ENTRY; - map->entries = devm_kzalloc(chip->dev, - num * sizeof(struct ilim_entry), GFP_KERNEL); - if (!map->entries) { - dev_err(chip->dev, "kzalloc failed for default ilim\n"); - return -ENOMEM; - } - for (i = 0; i < num; i++) { - map->entries[i].vmin_uv = be32_to_cpup(data++); - map->entries[i].vmax_uv = be32_to_cpup(data++); - map->entries[i].icl_pt_ma = be32_to_cpup(data++); - map->entries[i].icl_lv_ma = be32_to_cpup(data++); - map->entries[i].icl_hv_ma = be32_to_cpup(data++); - } - map->num = num; - return 0; -} - -static int smb_parse_wipower_dt(struct smbchg_chip *chip) -{ - int rc = 0; - - chip->wipower_dyn_icl_avail = false; - - if (!chip->vadc_dev) - goto err; - - rc = smb_parse_wipower_map_dt(chip, &chip->wipower_default, - "qcom,wipower-default-ilim-map"); - if (rc) { - dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n", - rc); - goto err; - } - - rc = smb_parse_wipower_map_dt(chip, &chip->wipower_pt, - "qcom,wipower-pt-ilim-map"); - if (rc) { - dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n", - rc); - goto err; - } - - rc = smb_parse_wipower_map_dt(chip, &chip->wipower_div2, - "qcom,wipower-div2-ilim-map"); - if (rc) { - dev_err(chip->dev, "failed to parse wipower-div2-ilim-map rc = %d\n", - rc); - goto err; - } - chip->wipower_dyn_icl_avail = true; - return 0; -err: - chip->wipower_default.num = 0; - chip->wipower_pt.num = 0; - chip->wipower_default.num = 0; - if (chip->wipower_default.entries) - devm_kfree(chip->dev, chip->wipower_default.entries); - if (chip->wipower_pt.entries) - devm_kfree(chip->dev, chip->wipower_pt.entries); - if (chip->wipower_div2.entries) - devm_kfree(chip->dev, chip->wipower_div2.entries); - chip->wipower_default.entries = NULL; - chip->wipower_pt.entries = NULL; - chip->wipower_div2.entries = NULL; - chip->vadc_dev = NULL; - return rc; -} - -#define DEFAULT_VLED_MAX_UV 3500000 -#define DEFAULT_FCC_MA 2000 -static int smb_parse_dt(struct smbchg_chip *chip) -{ - int rc = 0, ocp_thresh = -EINVAL; - struct device_node *node = chip->dev->of_node; - const char *dc_psy_type, *bpd; - - if (!node) { - dev_err(chip->dev, "device tree info. missing\n"); - return -EINVAL; - } - - /* read optional u32 properties */ - OF_PROP_READ(chip, ocp_thresh, - "ibat-ocp-threshold-ua", rc, 1); - if (ocp_thresh >= 0) - smbchg_ibat_ocp_threshold_ua = ocp_thresh; - OF_PROP_READ(chip, chip->iterm_ma, "iterm-ma", rc, 1); - OF_PROP_READ(chip, chip->cfg_fastchg_current_ma, - "fastchg-current-ma", rc, 1); - if (chip->cfg_fastchg_current_ma == -EINVAL) - chip->cfg_fastchg_current_ma = DEFAULT_FCC_MA; - OF_PROP_READ(chip, chip->vfloat_mv, "float-voltage-mv", rc, 1); - OF_PROP_READ(chip, chip->safety_time, "charging-timeout-mins", rc, 1); - OF_PROP_READ(chip, chip->vled_max_uv, "vled-max-uv", rc, 1); - if (chip->vled_max_uv < 0) - chip->vled_max_uv = DEFAULT_VLED_MAX_UV; - OF_PROP_READ(chip, chip->rpara_uohm, "rparasitic-uohm", rc, 1); - if (chip->rpara_uohm < 0) - chip->rpara_uohm = 0; - OF_PROP_READ(chip, chip->prechg_safety_time, "precharging-timeout-mins", - rc, 1); - OF_PROP_READ(chip, chip->fastchg_current_comp, "fastchg-current-comp", - rc, 1); - OF_PROP_READ(chip, chip->float_voltage_comp, "float-voltage-comp", - rc, 1); - if (chip->safety_time != -EINVAL && - (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) { - dev_err(chip->dev, "Bad charging-timeout-mins %d\n", - chip->safety_time); - return -EINVAL; - } - if (chip->prechg_safety_time != -EINVAL && - (chip->prechg_safety_time > - prechg_time[ARRAY_SIZE(prechg_time) - 1])) { - dev_err(chip->dev, "Bad precharging-timeout-mins %d\n", - chip->prechg_safety_time); - return -EINVAL; - } - OF_PROP_READ(chip, chip->resume_delta_mv, "resume-delta-mv", rc, 1); - OF_PROP_READ(chip, chip->parallel.min_current_thr_ma, - "parallel-usb-min-current-ma", rc, 1); - OF_PROP_READ(chip, chip->parallel.min_9v_current_thr_ma, - "parallel-usb-9v-min-current-ma", rc, 1); - OF_PROP_READ(chip, chip->parallel.allowed_lowering_ma, - "parallel-allowed-lowering-ma", rc, 1); - if (chip->parallel.min_current_thr_ma != -EINVAL - && chip->parallel.min_9v_current_thr_ma != -EINVAL) - chip->parallel.avail = true; - /* - * use the dt values if they exist, otherwise do not touch the params - */ - of_property_read_u32(node, "qcom,parallel-main-chg-fcc-percent", - &smbchg_main_chg_fcc_percent); - of_property_read_u32(node, "qcom,parallel-main-chg-icl-percent", - &smbchg_main_chg_icl_percent); - pr_smb(PR_STATUS, "parallel usb thr: %d, 9v thr: %d\n", - chip->parallel.min_current_thr_ma, - chip->parallel.min_9v_current_thr_ma); - OF_PROP_READ(chip, chip->jeita_temp_hard_limit, - "jeita-temp-hard-limit", rc, 1); - OF_PROP_READ(chip, chip->aicl_rerun_period_s, - "aicl-rerun-period-s", rc, 1); - OF_PROP_READ(chip, chip->vchg_adc_channel, - "vchg-adc-channel-id", rc, 1); - - /* read boolean configuration properties */ - chip->use_vfloat_adjustments = of_property_read_bool(node, - "qcom,autoadjust-vfloat"); - chip->bmd_algo_disabled = of_property_read_bool(node, - "qcom,bmd-algo-disabled"); - chip->iterm_disabled = of_property_read_bool(node, - "qcom,iterm-disabled"); - chip->soft_vfloat_comp_disabled = of_property_read_bool(node, - "qcom,soft-vfloat-comp-disabled"); - chip->chg_enabled = !(of_property_read_bool(node, - "qcom,charging-disabled")); - chip->charge_unknown_battery = of_property_read_bool(node, - "qcom,charge-unknown-battery"); - chip->chg_inhibit_en = of_property_read_bool(node, - "qcom,chg-inhibit-en"); - chip->chg_inhibit_source_fg = of_property_read_bool(node, - "qcom,chg-inhibit-fg"); - chip->low_volt_dcin = of_property_read_bool(node, - "qcom,low-volt-dcin"); - chip->force_aicl_rerun = of_property_read_bool(node, - "qcom,force-aicl-rerun"); - chip->skip_usb_suspend_for_fake_battery = of_property_read_bool(node, - "qcom,skip-usb-suspend-for-fake-battery"); - - /* parse the battery missing detection pin source */ - rc = of_property_read_string(chip->pdev->dev.of_node, - "qcom,bmd-pin-src", &bpd); - if (rc) { - /* Select BAT_THM as default BPD scheme */ - chip->bmd_pin_src = BPD_TYPE_DEFAULT; - rc = 0; - } else { - chip->bmd_pin_src = get_bpd(bpd); - if (chip->bmd_pin_src < 0) { - dev_err(chip->dev, - "failed to determine bpd schema %d\n", rc); - return rc; - } - } - - /* parse the dc power supply configuration */ - rc = of_property_read_string(node, "qcom,dc-psy-type", &dc_psy_type); - if (rc) { - chip->dc_psy_type = -EINVAL; - rc = 0; - } else { - if (strcmp(dc_psy_type, "Mains") == 0) - chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS; - else if (strcmp(dc_psy_type, "Wireless") == 0) - chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS; - else if (strcmp(dc_psy_type, "Wipower") == 0) - chip->dc_psy_type = POWER_SUPPLY_TYPE_WIPOWER; - } - if (chip->dc_psy_type != -EINVAL) { - OF_PROP_READ(chip, chip->dc_target_current_ma, - "dc-psy-ma", rc, 0); - if (rc) - return rc; - if (chip->dc_target_current_ma < DC_MA_MIN - || chip->dc_target_current_ma > DC_MA_MAX) { - dev_err(chip->dev, "Bad dc mA %d\n", - chip->dc_target_current_ma); - return -EINVAL; - } - } - - if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) - smb_parse_wipower_dt(chip); - - /* read the bms power supply name */ - rc = of_property_read_string(node, "qcom,bms-psy-name", - &chip->bms_psy_name); - if (rc) - chip->bms_psy_name = NULL; - - /* read the battery power supply name */ - rc = of_property_read_string(node, "qcom,battery-psy-name", - &chip->battery_psy_name); - if (rc) - chip->battery_psy_name = "battery"; - - /* Get the charger led support property */ - chip->cfg_chg_led_sw_ctrl = - of_property_read_bool(node, "qcom,chg-led-sw-controls"); - chip->cfg_chg_led_support = - of_property_read_bool(node, "qcom,chg-led-support"); - - if (of_find_property(node, "qcom,thermal-mitigation", - &chip->thermal_levels)) { - chip->thermal_mitigation = devm_kzalloc(chip->dev, - chip->thermal_levels, - GFP_KERNEL); - - if (chip->thermal_mitigation == NULL) { - dev_err(chip->dev, "thermal mitigation kzalloc() failed.\n"); - return -ENOMEM; - } - - chip->thermal_levels /= sizeof(int); - rc = of_property_read_u32_array(node, - "qcom,thermal-mitigation", - chip->thermal_mitigation, chip->thermal_levels); - if (rc) { - dev_err(chip->dev, - "Couldn't read threm limits rc = %d\n", rc); - return rc; - } - } - - chip->skip_usb_notification - = of_property_read_bool(node, - "qcom,skip-usb-notification"); - - chip->otg_pinctrl = of_property_read_bool(node, "qcom,otg-pinctrl"); - - return 0; -} - -#define SUBTYPE_REG 0x5 -#define SMBCHG_CHGR_SUBTYPE 0x1 -#define SMBCHG_OTG_SUBTYPE 0x8 -#define SMBCHG_BAT_IF_SUBTYPE 0x3 -#define SMBCHG_USB_CHGPTH_SUBTYPE 0x4 -#define SMBCHG_DC_CHGPTH_SUBTYPE 0x5 -#define SMBCHG_MISC_SUBTYPE 0x7 -#define SMBCHG_LITE_CHGR_SUBTYPE 0x51 -#define SMBCHG_LITE_OTG_SUBTYPE 0x58 -#define SMBCHG_LITE_BAT_IF_SUBTYPE 0x53 -#define SMBCHG_LITE_USB_CHGPTH_SUBTYPE 0x54 -#define SMBCHG_LITE_DC_CHGPTH_SUBTYPE 0x55 -#define SMBCHG_LITE_MISC_SUBTYPE 0x57 -static int smbchg_request_irq(struct smbchg_chip *chip, - struct device_node *child, - int irq_num, char *irq_name, - irqreturn_t (irq_handler)(int irq, void *_chip), - int flags) -{ - int rc; - - irq_num = of_irq_get_byname(child, irq_name); - if (irq_num < 0) { - dev_err(chip->dev, "Unable to get %s irqn", irq_name); - rc = -ENXIO; - } - rc = devm_request_threaded_irq(chip->dev, - irq_num, NULL, irq_handler, flags, irq_name, - chip); - if (rc < 0) { - dev_err(chip->dev, "Unable to request %s irq: %dn", - irq_name, rc); - rc = -ENXIO; - } - return 0; -} - -static int smbchg_request_irqs(struct smbchg_chip *chip) -{ - int rc = 0; - unsigned int base; - struct device_node *child; - u8 subtype; - unsigned long flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING - | IRQF_ONESHOT; - - if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) { - pr_err("no child nodes\n"); - return -ENXIO; - } - - for_each_available_child_of_node(chip->pdev->dev.of_node, child) { - rc = of_property_read_u32(child, "reg", &base); - if (rc < 0) { - rc = 0; - continue; - } - - rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1); - if (rc) { - dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n", - rc); - return rc; - } - - switch (subtype) { - case SMBCHG_CHGR_SUBTYPE: - case SMBCHG_LITE_CHGR_SUBTYPE: - rc = smbchg_request_irq(chip, child, - chip->chg_error_irq, "chg-error", - chg_error_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, chip->taper_irq, - "chg-taper-thr", taper_handler, - (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - disable_irq_nosync(chip->taper_irq); - rc = smbchg_request_irq(chip, child, chip->chg_term_irq, - "chg-tcc-thr", chg_term_handler, - (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, chip->recharge_irq, - "chg-rechg-thr", recharge_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, chip->fastchg_irq, - "chg-p2f-thr", fastchg_handler, flags); - if (rc < 0) - return rc; - enable_irq_wake(chip->chg_term_irq); - enable_irq_wake(chip->chg_error_irq); - enable_irq_wake(chip->fastchg_irq); - break; - case SMBCHG_BAT_IF_SUBTYPE: - case SMBCHG_LITE_BAT_IF_SUBTYPE: - rc = smbchg_request_irq(chip, child, chip->batt_hot_irq, - "batt-hot", batt_hot_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->batt_warm_irq, - "batt-warm", batt_warm_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->batt_cool_irq, - "batt-cool", batt_cool_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->batt_cold_irq, - "batt-cold", batt_cold_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->batt_missing_irq, - "batt-missing", batt_pres_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->vbat_low_irq, - "batt-low", vbat_low_handler, flags); - if (rc < 0) - return rc; - - enable_irq_wake(chip->batt_hot_irq); - enable_irq_wake(chip->batt_warm_irq); - enable_irq_wake(chip->batt_cool_irq); - enable_irq_wake(chip->batt_cold_irq); - enable_irq_wake(chip->batt_missing_irq); - enable_irq_wake(chip->vbat_low_irq); - break; - case SMBCHG_USB_CHGPTH_SUBTYPE: - case SMBCHG_LITE_USB_CHGPTH_SUBTYPE: - rc = smbchg_request_irq(chip, child, - chip->usbin_uv_irq, - "usbin-uv", usbin_uv_handler, - flags | IRQF_EARLY_RESUME); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->usbin_ov_irq, - "usbin-ov", usbin_ov_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->src_detect_irq, - "usbin-src-det", - src_detect_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->aicl_done_irq, - "aicl-done", - aicl_done_handler, - (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - - if (chip->schg_version != QPNP_SCHG_LITE) { - rc = smbchg_request_irq(chip, child, - chip->otg_fail_irq, "otg-fail", - otg_fail_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->otg_oc_irq, "otg-oc", - otg_oc_handler, - (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->usbid_change_irq, "usbid-change", - usbid_change_handler, - (IRQF_TRIGGER_FALLING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - enable_irq_wake(chip->otg_oc_irq); - enable_irq_wake(chip->usbid_change_irq); - enable_irq_wake(chip->otg_fail_irq); - } - enable_irq_wake(chip->usbin_uv_irq); - enable_irq_wake(chip->usbin_ov_irq); - enable_irq_wake(chip->src_detect_irq); - if (chip->parallel.avail && chip->usb_present) { - rc = enable_irq_wake(chip->aicl_done_irq); - chip->enable_aicl_wake = true; - } - break; - case SMBCHG_DC_CHGPTH_SUBTYPE: - case SMBCHG_LITE_DC_CHGPTH_SUBTYPE: - rc = smbchg_request_irq(chip, child, chip->dcin_uv_irq, - "dcin-uv", dcin_uv_handler, flags); - if (rc < 0) - return rc; - enable_irq_wake(chip->dcin_uv_irq); - break; - case SMBCHG_MISC_SUBTYPE: - case SMBCHG_LITE_MISC_SUBTYPE: - rc = smbchg_request_irq(chip, child, chip->power_ok_irq, - "power-ok", power_ok_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, chip->chg_hot_irq, - "temp-shutdown", chg_hot_handler, flags); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, chip->wdog_timeout_irq, - "wdog-timeout", - wdog_timeout_handler, flags); - if (rc < 0) - return rc; - enable_irq_wake(chip->chg_hot_irq); - enable_irq_wake(chip->wdog_timeout_irq); - break; - case SMBCHG_OTG_SUBTYPE: - break; - case SMBCHG_LITE_OTG_SUBTYPE: - rc = smbchg_request_irq(chip, child, - chip->usbid_change_irq, "usbid-change", - usbid_change_handler, - (IRQF_TRIGGER_FALLING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->otg_oc_irq, "otg-oc", - otg_oc_handler, - (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); - if (rc < 0) - return rc; - rc = smbchg_request_irq(chip, child, - chip->otg_fail_irq, "otg-fail", - otg_fail_handler, flags); - if (rc < 0) - return rc; - enable_irq_wake(chip->usbid_change_irq); - enable_irq_wake(chip->otg_oc_irq); - enable_irq_wake(chip->otg_fail_irq); - break; - } - } - - return rc; -} - -#define REQUIRE_BASE(chip, base, rc) \ -do { \ - if (!rc && !chip->base) { \ - dev_err(chip->dev, "Missing " #base "\n"); \ - rc = -EINVAL; \ - } \ -} while (0) - -static int smbchg_parse_peripherals(struct smbchg_chip *chip) -{ - int rc = 0; - unsigned int base; - struct device_node *child; - u8 subtype; - - if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) { - pr_err("no child nodes\n"); - return -ENXIO; - } - - for_each_available_child_of_node(chip->pdev->dev.of_node, child) { - rc = of_property_read_u32(child, "reg", &base); - if (rc < 0) { - rc = 0; - continue; - } - - rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1); - if (rc) { - dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n", - rc); - return rc; - } - - switch (subtype) { - case SMBCHG_CHGR_SUBTYPE: - case SMBCHG_LITE_CHGR_SUBTYPE: - chip->chgr_base = base; - break; - case SMBCHG_BAT_IF_SUBTYPE: - case SMBCHG_LITE_BAT_IF_SUBTYPE: - chip->bat_if_base = base; - break; - case SMBCHG_USB_CHGPTH_SUBTYPE: - case SMBCHG_LITE_USB_CHGPTH_SUBTYPE: - chip->usb_chgpth_base = base; - break; - case SMBCHG_DC_CHGPTH_SUBTYPE: - case SMBCHG_LITE_DC_CHGPTH_SUBTYPE: - chip->dc_chgpth_base = base; - break; - case SMBCHG_MISC_SUBTYPE: - case SMBCHG_LITE_MISC_SUBTYPE: - chip->misc_base = base; - break; - case SMBCHG_OTG_SUBTYPE: - case SMBCHG_LITE_OTG_SUBTYPE: - chip->otg_base = base; - break; - } - } - - REQUIRE_BASE(chip, chgr_base, rc); - REQUIRE_BASE(chip, bat_if_base, rc); - REQUIRE_BASE(chip, usb_chgpth_base, rc); - REQUIRE_BASE(chip, dc_chgpth_base, rc); - REQUIRE_BASE(chip, misc_base, rc); - - return rc; -} - -static inline void dump_reg(struct smbchg_chip *chip, u16 addr, - const char *name) -{ - u8 reg; - - smbchg_read(chip, ®, addr, 1); - pr_smb(PR_DUMP, "%s - %04X = %02X\n", name, addr, reg); -} - -/* dumps useful registers for debug */ -static void dump_regs(struct smbchg_chip *chip) -{ - u16 addr; - - /* charger peripheral */ - for (addr = 0xB; addr <= 0x10; addr++) - dump_reg(chip, chip->chgr_base + addr, "CHGR Status"); - for (addr = 0xF0; addr <= 0xFF; addr++) - dump_reg(chip, chip->chgr_base + addr, "CHGR Config"); - /* battery interface peripheral */ - dump_reg(chip, chip->bat_if_base + RT_STS, "BAT_IF Status"); - dump_reg(chip, chip->bat_if_base + CMD_CHG_REG, "BAT_IF Command"); - for (addr = 0xF0; addr <= 0xFB; addr++) - dump_reg(chip, chip->bat_if_base + addr, "BAT_IF Config"); - /* usb charge path peripheral */ - for (addr = 0x7; addr <= 0x10; addr++) - dump_reg(chip, chip->usb_chgpth_base + addr, "USB Status"); - dump_reg(chip, chip->usb_chgpth_base + CMD_IL, "USB Command"); - for (addr = 0xF0; addr <= 0xF5; addr++) - dump_reg(chip, chip->usb_chgpth_base + addr, "USB Config"); - /* dc charge path peripheral */ - dump_reg(chip, chip->dc_chgpth_base + RT_STS, "DC Status"); - for (addr = 0xF0; addr <= 0xF6; addr++) - dump_reg(chip, chip->dc_chgpth_base + addr, "DC Config"); - /* misc peripheral */ - dump_reg(chip, chip->misc_base + IDEV_STS, "MISC Status"); - dump_reg(chip, chip->misc_base + RT_STS, "MISC Status"); - for (addr = 0xF0; addr <= 0xF3; addr++) - dump_reg(chip, chip->misc_base + addr, "MISC CFG"); -} - -static int create_debugfs_entries(struct smbchg_chip *chip) -{ - struct dentry *ent; - - chip->debug_root = debugfs_create_dir("qpnp-smbcharger", NULL); - if (!chip->debug_root) { - dev_err(chip->dev, "Couldn't create debug dir\n"); - return -EINVAL; - } - - ent = debugfs_create_file("force_dcin_icl_check", - S_IFREG | S_IWUSR | S_IRUGO, - chip->debug_root, chip, - &force_dcin_icl_ops); - if (!ent) { - dev_err(chip->dev, - "Couldn't create force dcin icl check file\n"); - return -EINVAL; - } - return 0; -} - -static int smbchg_check_chg_version(struct smbchg_chip *chip) -{ - struct pmic_revid_data *pmic_rev_id; - struct device_node *revid_dev_node; - int rc; - - revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node, - "qcom,pmic-revid", 0); - if (!revid_dev_node) { - pr_err("Missing qcom,pmic-revid property - driver failed\n"); - return -EINVAL; - } - - pmic_rev_id = get_revid_data(revid_dev_node); - if (IS_ERR(pmic_rev_id)) { - rc = PTR_ERR(revid_dev_node); - if (rc != -EPROBE_DEFER) - pr_err("Unable to get pmic_revid rc=%d\n", rc); - return rc; - } - - switch (pmic_rev_id->pmic_subtype) { - case PMI8994: - chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA - | SMBCHG_BATT_OV_WA - | SMBCHG_CC_ESR_WA - | SMBCHG_RESTART_WA; - use_pmi8994_tables(chip); - chip->schg_version = QPNP_SCHG; - break; - case PMI8950: - case PMI8937: - chip->wa_flags |= SMBCHG_BATT_OV_WA; - if (pmic_rev_id->rev4 < 2) /* PMI8950 1.0 */ { - chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA; - } else { /* rev > PMI8950 v1.0 */ - chip->wa_flags |= SMBCHG_HVDCP_9V_EN_WA - | SMBCHG_USB100_WA; - } - use_pmi8994_tables(chip); - chip->tables.aicl_rerun_period_table = - aicl_rerun_period_schg_lite; - chip->tables.aicl_rerun_period_len = - ARRAY_SIZE(aicl_rerun_period_schg_lite); - - chip->schg_version = QPNP_SCHG_LITE; - if (pmic_rev_id->pmic_subtype == PMI8937) - chip->hvdcp_not_supported = true; - break; - case PMI8996: - chip->wa_flags |= SMBCHG_CC_ESR_WA - | SMBCHG_FLASH_ICL_DISABLE_WA - | SMBCHG_RESTART_WA - | SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA; - use_pmi8996_tables(chip); - chip->schg_version = QPNP_SCHG; - break; - default: - pr_err("PMIC subtype %d not supported, WA flags not set\n", - pmic_rev_id->pmic_subtype); - } - - pr_smb(PR_STATUS, "pmic=%s, wa_flags=0x%x, hvdcp_supported=%s\n", - pmic_rev_id->pmic_name, chip->wa_flags, - chip->hvdcp_not_supported ? "false" : "true"); - - return 0; -} - -static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip) -{ - enum power_supply_type usb_supply_type; - char *usb_type_name; - int rc; - - if (!(chip->wa_flags & SMBCHG_RESTART_WA)) - return; - - read_usb_type(chip, &usb_type_name, &usb_supply_type); - if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP - && !is_hvdcp_present(chip)) { - pr_smb(PR_STATUS, "DCP found rerunning APSD\n"); - rc = vote(chip->usb_icl_votable, - CHG_SUSPEND_WORKAROUND_ICL_VOTER, true, 300); - if (rc < 0) - pr_err("Couldn't vote for 300mA for suspend wa, going ahead rc=%d\n", - rc); - - pr_smb(PR_STATUS, "Faking Removal\n"); - fake_insertion_removal(chip, false); - msleep(500); - pr_smb(PR_STATUS, "Faking Insertion\n"); - fake_insertion_removal(chip, true); - - read_usb_type(chip, &usb_type_name, &usb_supply_type); - if (usb_supply_type != POWER_SUPPLY_TYPE_USB_DCP) { - msleep(500); - pr_smb(PR_STATUS, "Fake Removal again as type!=DCP\n"); - fake_insertion_removal(chip, false); - msleep(500); - pr_smb(PR_STATUS, "Fake Insert again as type!=DCP\n"); - fake_insertion_removal(chip, true); - } - - rc = vote(chip->usb_icl_votable, - CHG_SUSPEND_WORKAROUND_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't vote for 0 for suspend wa, going ahead rc=%d\n", - rc); - } -} - -static int smbchg_probe(struct platform_device *pdev) -{ - int rc; - struct smbchg_chip *chip; - struct power_supply *typec_psy = NULL; - struct qpnp_vadc_chip *vadc_dev, *vchg_vadc_dev; - const char *typec_psy_name; - struct power_supply_config usb_psy_cfg = {}; - struct power_supply_config batt_psy_cfg = {}; - struct power_supply_config dc_psy_cfg = {}; - - if (of_property_read_bool(pdev->dev.of_node, "qcom,external-typec")) { - /* read the type power supply name */ - rc = of_property_read_string(pdev->dev.of_node, - "qcom,typec-psy-name", &typec_psy_name); - if (rc) { - pr_err("failed to get prop typec-psy-name rc=%d\n", - rc); - return rc; - } - - typec_psy = power_supply_get_by_name(typec_psy_name); - if (!typec_psy) { - pr_smb(PR_STATUS, - "Type-C supply not found, deferring probe\n"); - return -EPROBE_DEFER; - } - } - - vadc_dev = NULL; - if (of_find_property(pdev->dev.of_node, "qcom,dcin-vadc", NULL)) { - vadc_dev = qpnp_get_vadc(&pdev->dev, "dcin"); - if (IS_ERR(vadc_dev)) { - rc = PTR_ERR(vadc_dev); - if (rc != -EPROBE_DEFER) - dev_err(&pdev->dev, - "Couldn't get vadc rc=%d\n", - rc); - return rc; - } - } - - vchg_vadc_dev = NULL; - if (of_find_property(pdev->dev.of_node, "qcom,vchg_sns-vadc", NULL)) { - vchg_vadc_dev = qpnp_get_vadc(&pdev->dev, "vchg_sns"); - if (IS_ERR(vchg_vadc_dev)) { - rc = PTR_ERR(vchg_vadc_dev); - if (rc != -EPROBE_DEFER) - dev_err(&pdev->dev, "Couldn't get vadc 'vchg' rc=%d\n", - rc); - return rc; - } - } - - - chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); - if (!chip->regmap) { - dev_err(&pdev->dev, "Couldn't get parent's regmap\n"); - return -EINVAL; - } - - chip->fcc_votable = create_votable("BATT_FCC", - VOTE_MIN, - set_fastchg_current_vote_cb, chip); - if (IS_ERR(chip->fcc_votable)) { - rc = PTR_ERR(chip->fcc_votable); - goto votables_cleanup; - } - - chip->usb_icl_votable = create_votable("USB_ICL", - VOTE_MIN, - set_usb_current_limit_vote_cb, chip); - if (IS_ERR(chip->usb_icl_votable)) { - rc = PTR_ERR(chip->usb_icl_votable); - goto votables_cleanup; - } - - chip->dc_icl_votable = create_votable("DCIN_ICL", - VOTE_MIN, - set_dc_current_limit_vote_cb, chip); - if (IS_ERR(chip->dc_icl_votable)) { - rc = PTR_ERR(chip->dc_icl_votable); - goto votables_cleanup; - } - - chip->usb_suspend_votable = create_votable("USB_SUSPEND", - VOTE_SET_ANY, - usb_suspend_vote_cb, chip); - if (IS_ERR(chip->usb_suspend_votable)) { - rc = PTR_ERR(chip->usb_suspend_votable); - goto votables_cleanup; - } - - chip->dc_suspend_votable = create_votable("DC_SUSPEND", - VOTE_SET_ANY, - dc_suspend_vote_cb, chip); - if (IS_ERR(chip->dc_suspend_votable)) { - rc = PTR_ERR(chip->dc_suspend_votable); - goto votables_cleanup; - } - - chip->battchg_suspend_votable = create_votable("BATTCHG_SUSPEND", - VOTE_SET_ANY, - charging_suspend_vote_cb, chip); - if (IS_ERR(chip->battchg_suspend_votable)) { - rc = PTR_ERR(chip->battchg_suspend_votable); - goto votables_cleanup; - } - - chip->hw_aicl_rerun_disable_votable = create_votable("HWAICL_DISABLE", - VOTE_SET_ANY, - smbchg_hw_aicl_rerun_disable_cb, chip); - if (IS_ERR(chip->hw_aicl_rerun_disable_votable)) { - rc = PTR_ERR(chip->hw_aicl_rerun_disable_votable); - goto votables_cleanup; - } - - chip->hw_aicl_rerun_enable_indirect_votable = create_votable( - "HWAICL_ENABLE_INDIRECT", - VOTE_SET_ANY, - smbchg_hw_aicl_rerun_enable_indirect_cb, chip); - if (IS_ERR(chip->hw_aicl_rerun_enable_indirect_votable)) { - rc = PTR_ERR(chip->hw_aicl_rerun_enable_indirect_votable); - goto votables_cleanup; - } - - chip->aicl_deglitch_short_votable = create_votable( - "HWAICL_SHORT_DEGLITCH", - VOTE_SET_ANY, - smbchg_aicl_deglitch_config_cb, chip); - if (IS_ERR(chip->aicl_deglitch_short_votable)) { - rc = PTR_ERR(chip->aicl_deglitch_short_votable); - goto votables_cleanup; - } - - INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work); - INIT_DELAYED_WORK(&chip->parallel_en_work, - smbchg_parallel_usb_en_work); - INIT_DELAYED_WORK(&chip->vfloat_adjust_work, smbchg_vfloat_adjust_work); - INIT_DELAYED_WORK(&chip->hvdcp_det_work, smbchg_hvdcp_det_work); - init_completion(&chip->src_det_lowered); - init_completion(&chip->src_det_raised); - init_completion(&chip->usbin_uv_lowered); - init_completion(&chip->usbin_uv_raised); - chip->vadc_dev = vadc_dev; - chip->vchg_vadc_dev = vchg_vadc_dev; - chip->pdev = pdev; - chip->dev = &pdev->dev; - - chip->typec_psy = typec_psy; - chip->fake_battery_soc = -EINVAL; - chip->usb_online = -EINVAL; - dev_set_drvdata(&pdev->dev, chip); - - spin_lock_init(&chip->sec_access_lock); - mutex_init(&chip->therm_lvl_lock); - mutex_init(&chip->usb_set_online_lock); - mutex_init(&chip->parallel.lock); - mutex_init(&chip->taper_irq_lock); - mutex_init(&chip->pm_lock); - mutex_init(&chip->wipower_config); - mutex_init(&chip->usb_status_lock); - device_init_wakeup(chip->dev, true); - - rc = smbchg_parse_peripherals(chip); - if (rc) { - dev_err(chip->dev, "Error parsing DT peripherals: %d\n", rc); - goto votables_cleanup; - } - - rc = smbchg_check_chg_version(chip); - if (rc) { - pr_err("Unable to check schg version rc=%d\n", rc); - goto votables_cleanup; - } - - rc = smb_parse_dt(chip); - if (rc < 0) { - dev_err(&pdev->dev, "Unable to parse DT nodes: %d\n", rc); - goto votables_cleanup; - } - - rc = smbchg_regulator_init(chip); - if (rc) { - dev_err(&pdev->dev, - "Couldn't initialize regulator rc=%d\n", rc); - goto votables_cleanup; - } - - chip->extcon = devm_extcon_dev_allocate(chip->dev, smbchg_extcon_cable); - if (IS_ERR(chip->extcon)) { - dev_err(chip->dev, "failed to allocate extcon device\n"); - rc = PTR_ERR(chip->extcon); - goto votables_cleanup; - } - - rc = devm_extcon_dev_register(chip->dev, chip->extcon); - if (rc) { - dev_err(chip->dev, "failed to register extcon device\n"); - goto votables_cleanup; - } - - chip->usb_psy_d.name = "usb"; - chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB; - chip->usb_psy_d.get_property = smbchg_usb_get_property; - chip->usb_psy_d.set_property = smbchg_usb_set_property; - chip->usb_psy_d.properties = smbchg_usb_properties; - chip->usb_psy_d.num_properties = ARRAY_SIZE(smbchg_usb_properties); - chip->usb_psy_d.property_is_writeable = smbchg_usb_is_writeable; - - usb_psy_cfg.drv_data = chip; - usb_psy_cfg.supplied_to = smbchg_usb_supplicants; - usb_psy_cfg.num_supplicants = ARRAY_SIZE(smbchg_usb_supplicants); - - chip->usb_psy = devm_power_supply_register(chip->dev, - &chip->usb_psy_d, &usb_psy_cfg); - if (IS_ERR(chip->usb_psy)) { - dev_err(&pdev->dev, "Unable to register usb_psy rc = %ld\n", - PTR_ERR(chip->usb_psy)); - rc = PTR_ERR(chip->usb_psy); - goto votables_cleanup; - } - - if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) { - chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm"); - if (IS_ERR(chip->dpdm_reg)) { - rc = PTR_ERR(chip->dpdm_reg); - goto votables_cleanup; - } - } - - rc = smbchg_hw_init(chip); - if (rc < 0) { - dev_err(&pdev->dev, - "Unable to intialize hardware rc = %d\n", rc); - goto out; - } - - rc = determine_initial_status(chip); - if (rc < 0) { - dev_err(&pdev->dev, - "Unable to determine init status rc = %d\n", rc); - goto out; - } - - chip->previous_soc = -EINVAL; - chip->batt_psy_d.name = chip->battery_psy_name; - chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY; - chip->batt_psy_d.get_property = smbchg_battery_get_property; - chip->batt_psy_d.set_property = smbchg_battery_set_property; - chip->batt_psy_d.properties = smbchg_battery_properties; - chip->batt_psy_d.num_properties = ARRAY_SIZE(smbchg_battery_properties); - chip->batt_psy_d.external_power_changed = smbchg_external_power_changed; - chip->batt_psy_d.property_is_writeable = smbchg_battery_is_writeable; - - batt_psy_cfg.drv_data = chip; - batt_psy_cfg.num_supplicants = 0; - chip->batt_psy = devm_power_supply_register(chip->dev, - &chip->batt_psy_d, - &batt_psy_cfg); - if (IS_ERR(chip->batt_psy)) { - dev_err(&pdev->dev, - "Unable to register batt_psy rc = %ld\n", - PTR_ERR(chip->batt_psy)); - goto out; - } - - if (chip->dc_psy_type != -EINVAL) { - chip->dc_psy_d.name = "dc"; - chip->dc_psy_d.type = chip->dc_psy_type; - chip->dc_psy_d.get_property = smbchg_dc_get_property; - chip->dc_psy_d.set_property = smbchg_dc_set_property; - chip->dc_psy_d.property_is_writeable = smbchg_dc_is_writeable; - chip->dc_psy_d.properties = smbchg_dc_properties; - chip->dc_psy_d.num_properties - = ARRAY_SIZE(smbchg_dc_properties); - - dc_psy_cfg.drv_data = chip; - dc_psy_cfg.num_supplicants - = ARRAY_SIZE(smbchg_dc_supplicants); - dc_psy_cfg.supplied_to = smbchg_dc_supplicants; - - chip->dc_psy = devm_power_supply_register(chip->dev, - &chip->dc_psy_d, - &dc_psy_cfg); - if (IS_ERR(chip->dc_psy)) { - dev_err(&pdev->dev, - "Unable to register dc_psy rc = %ld\n", - PTR_ERR(chip->dc_psy)); - goto out; - } - } - - if (chip->cfg_chg_led_support && - chip->schg_version == QPNP_SCHG_LITE) { - rc = smbchg_register_chg_led(chip); - if (rc) { - dev_err(chip->dev, - "Unable to register charger led: %d\n", - rc); - goto out; - } - - rc = smbchg_chg_led_controls(chip); - if (rc) { - dev_err(chip->dev, - "Failed to set charger led controld bit: %d\n", - rc); - goto unregister_led_class; - } - } - - rc = smbchg_request_irqs(chip); - if (rc < 0) { - dev_err(&pdev->dev, "Unable to request irqs rc = %d\n", rc); - goto unregister_led_class; - } - - rerun_hvdcp_det_if_necessary(chip); - - dump_regs(chip); - create_debugfs_entries(chip); - dev_info(chip->dev, - "SMBCHG successfully probe Charger version=%s Revision DIG:%d.%d ANA:%d.%d batt=%d dc=%d usb=%d\n", - version_str[chip->schg_version], - chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR], - chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR], - get_prop_batt_present(chip), - chip->dc_present, chip->usb_present); - return 0; - -unregister_led_class: - if (chip->cfg_chg_led_support && chip->schg_version == QPNP_SCHG_LITE) - led_classdev_unregister(&chip->led_cdev); -out: - handle_usb_removal(chip); -votables_cleanup: - if (chip->aicl_deglitch_short_votable) - destroy_votable(chip->aicl_deglitch_short_votable); - if (chip->hw_aicl_rerun_enable_indirect_votable) - destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable); - if (chip->hw_aicl_rerun_disable_votable) - destroy_votable(chip->hw_aicl_rerun_disable_votable); - if (chip->battchg_suspend_votable) - destroy_votable(chip->battchg_suspend_votable); - if (chip->dc_suspend_votable) - destroy_votable(chip->dc_suspend_votable); - if (chip->usb_suspend_votable) - destroy_votable(chip->usb_suspend_votable); - if (chip->dc_icl_votable) - destroy_votable(chip->dc_icl_votable); - if (chip->usb_icl_votable) - destroy_votable(chip->usb_icl_votable); - if (chip->fcc_votable) - destroy_votable(chip->fcc_votable); - return rc; -} - -static int smbchg_remove(struct platform_device *pdev) -{ - struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev); - - debugfs_remove_recursive(chip->debug_root); - - destroy_votable(chip->aicl_deglitch_short_votable); - destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable); - destroy_votable(chip->hw_aicl_rerun_disable_votable); - destroy_votable(chip->battchg_suspend_votable); - destroy_votable(chip->dc_suspend_votable); - destroy_votable(chip->usb_suspend_votable); - destroy_votable(chip->dc_icl_votable); - destroy_votable(chip->usb_icl_votable); - destroy_votable(chip->fcc_votable); - - return 0; -} - -static void smbchg_shutdown(struct platform_device *pdev) -{ - struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev); - int rc; - - if (!(chip->wa_flags & SMBCHG_RESTART_WA)) - return; - - if (!is_hvdcp_present(chip)) - return; - - pr_smb(PR_MISC, "Disable Parallel\n"); - mutex_lock(&chip->parallel.lock); - smbchg_parallel_en = 0; - smbchg_parallel_usb_disable(chip); - mutex_unlock(&chip->parallel.lock); - - pr_smb(PR_MISC, "Disable all interrupts\n"); - disable_irq(chip->aicl_done_irq); - disable_irq(chip->batt_cold_irq); - disable_irq(chip->batt_cool_irq); - disable_irq(chip->batt_hot_irq); - disable_irq(chip->batt_missing_irq); - disable_irq(chip->batt_warm_irq); - disable_irq(chip->chg_error_irq); - disable_irq(chip->chg_hot_irq); - disable_irq(chip->chg_term_irq); - disable_irq(chip->dcin_uv_irq); - disable_irq(chip->fastchg_irq); - disable_irq(chip->otg_fail_irq); - disable_irq(chip->otg_oc_irq); - disable_irq(chip->power_ok_irq); - disable_irq(chip->recharge_irq); - disable_irq(chip->src_detect_irq); - disable_irq(chip->taper_irq); - disable_irq(chip->usbid_change_irq); - disable_irq(chip->usbin_ov_irq); - disable_irq(chip->usbin_uv_irq); - disable_irq(chip->vbat_low_irq); - disable_irq(chip->wdog_timeout_irq); - - /* remove all votes for short deglitch */ - vote(chip->aicl_deglitch_short_votable, - VARB_WORKAROUND_SHORT_DEGLITCH_VOTER, false, 0); - vote(chip->aicl_deglitch_short_votable, - HVDCP_SHORT_DEGLITCH_VOTER, false, 0); - - /* vote to ensure AICL rerun is enabled */ - rc = vote(chip->hw_aicl_rerun_enable_indirect_votable, - SHUTDOWN_WORKAROUND_VOTER, true, 0); - if (rc < 0) - pr_err("Couldn't vote to enable indirect AICL rerun\n"); - rc = vote(chip->hw_aicl_rerun_disable_votable, - WEAK_CHARGER_HW_AICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't vote to enable AICL rerun\n"); - - /* switch to 5V HVDCP */ - pr_smb(PR_MISC, "Switch to 5V HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_ADAPTER_SEL_MASK, HVDCP_5V); - if (rc < 0) { - pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc); - return; - } - - pr_smb(PR_MISC, "Wait 500mS to lower to 5V\n"); - /* wait for HVDCP to lower to 5V */ - msleep(500); - /* - * Check if the same hvdcp session is in progress. src_det should be - * high and that we are still in 5V hvdcp - */ - if (!is_src_detect_high(chip)) { - pr_smb(PR_MISC, "src det low after 500mS sleep\n"); - return; - } - - /* disable HVDCP */ - pr_smb(PR_MISC, "Disable HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, 0); - if (rc < 0) - pr_err("Couldn't disable HVDCP rc=%d\n", rc); - - chip->hvdcp_3_det_ignore_uv = true; - /* fake a removal */ - pr_smb(PR_MISC, "Faking Removal\n"); - rc = fake_insertion_removal(chip, false); - if (rc < 0) - pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc); - - /* fake an insertion */ - pr_smb(PR_MISC, "Faking Insertion\n"); - rc = fake_insertion_removal(chip, true); - if (rc < 0) - pr_err("Couldn't fake insertion rc=%d\n", rc); - - pr_smb(PR_MISC, "Wait 1S to settle\n"); - msleep(1000); - chip->hvdcp_3_det_ignore_uv = false; - - pr_smb(PR_STATUS, "wrote power off configurations\n"); -} - -static const struct dev_pm_ops smbchg_pm_ops = { -}; - -MODULE_DEVICE_TABLE(spmi, smbchg_id); - -static struct platform_driver smbchg_driver = { - .driver = { - .name = "qpnp-smbcharger", - .owner = THIS_MODULE, - .of_match_table = smbchg_match_table, - .pm = &smbchg_pm_ops, - }, - .probe = smbchg_probe, - .remove = smbchg_remove, - .shutdown = smbchg_shutdown, -}; - -static int __init smbchg_init(void) -{ - return platform_driver_register(&smbchg_driver); -} - -static void __exit smbchg_exit(void) -{ - return platform_driver_unregister(&smbchg_driver); -} - -module_init(smbchg_init); -module_exit(smbchg_exit); - -MODULE_DESCRIPTION("QPNP SMB Charger"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:qpnp-smbcharger"); diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index 5d4b46469e9c53a081975a292496cfee0f69fb45..a8427c5e31071b7673b2d9831e2ff78b1cd80195 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -474,6 +474,36 @@ int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend) return rc; } +static int smblib_set_adapter_allowance(struct smb_charger *chg, + u8 allowed_voltage) +{ + int rc = 0; + + switch (allowed_voltage) { + case USBIN_ADAPTER_ALLOW_12V: + case USBIN_ADAPTER_ALLOW_5V_OR_12V: + case USBIN_ADAPTER_ALLOW_9V_TO_12V: + case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V: + case USBIN_ADAPTER_ALLOW_5V_TO_12V: + /* PM660 only support max. 9V */ + if (chg->smb_version == PM660_SUBTYPE) { + smblib_dbg(chg, PR_MISC, "voltage not supported=%d\n", + allowed_voltage); + allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V; + } + break; + } + + rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage); + if (rc < 0) { + smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n", + allowed_voltage, rc); + return rc; + } + + return rc; +} + #define MICRO_5V 5000000 #define MICRO_9V 9000000 #define MICRO_12V 12000000 @@ -504,10 +534,10 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg, return -EINVAL; } - rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage); + rc = smblib_set_adapter_allowance(chg, allowed_voltage); if (rc < 0) { - smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n", - allowed_voltage, rc); + smblib_err(chg, "Couldn't configure adapter allowance rc=%d\n", + rc); return rc; } @@ -650,8 +680,8 @@ static void smblib_uusb_removal(struct smb_charger *chg) } /* reconfigure allowed voltage for HVDCP */ - rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, - USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); + rc = smblib_set_adapter_allowance(chg, + USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); if (rc < 0) smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n", rc); @@ -3405,8 +3435,8 @@ static void typec_source_removal(struct smb_charger *chg) } /* reconfigure allowed voltage for HVDCP */ - rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, - USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); + rc = smblib_set_adapter_allowance(chg, + USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); if (rc < 0) smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n", rc); diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c index d9f56730c735819010e9efff4f8499814c7fbe58..040a40b4b173b214fcc89d1760434ed2a2f3f853 100644 --- a/drivers/power/tps65217_charger.c +++ b/drivers/power/tps65217_charger.c @@ -205,6 +205,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) if (!charger) return -ENOMEM; + platform_set_drvdata(pdev, charger); charger->tps = tps; charger->dev = &pdev->dev; diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index d24ca5f281b4bbd98e5ce86e8f454c270d86fe10..ec84ff8ad1b4a59ebbd9c93ea62edb2f714460f6 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -321,6 +321,8 @@ int pwmchip_remove(struct pwm_chip *chip) unsigned int i; int ret = 0; + pwmchip_sysfs_unexport_children(chip); + mutex_lock(&pwm_lock); for (i = 0; i < chip->npwm; i++) { @@ -889,7 +891,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put); */ bool pwm_can_sleep(struct pwm_device *pwm) { - return pwm->chip->can_sleep; + return true; } EXPORT_SYMBOL_GPL(pwm_can_sleep); diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 9c90886f41234153d044d24f94a9ae1e82efbd83..375008e2be20a5ae057f72fef7898988314cbdc4 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -350,6 +350,26 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip) } } +void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) +{ + struct device *parent; + unsigned int i; + + parent = class_find_device(&pwm_class, NULL, chip, + pwmchip_sysfs_match); + if (!parent) + return; + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + + if (test_bit(PWMF_EXPORTED, &pwm->flags)) + pwm_unexport_child(parent, pwm); + } + + put_device(parent); +} + static int __init pwm_sysfs_init(void) { return class_register(&pwm_class); diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 6fa0c7d13290981d254b38538d6bb62103df1e27..4bda998afdeff184e094aaa6d4e53c1dedf04c86 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -166,29 +166,30 @@ static const struct regulator_desc pm8x41_hfsmps = { static const struct regulator_desc pm8841_ftsmps = { .linear_ranges = (struct regulator_linear_range[]) { REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), - REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), + REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000), }, .n_linear_ranges = 2, - .n_voltages = 340, + .n_voltages = 262, .ops = &rpm_smps_ldo_ops, }; static const struct regulator_desc pm8941_boost = { .linear_ranges = (struct regulator_linear_range[]) { - REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000), + REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000), }, .n_linear_ranges = 1, - .n_voltages = 16, + .n_voltages = 31, .ops = &rpm_smps_ldo_ops, }; static const struct regulator_desc pm8941_pldo = { .linear_ranges = (struct regulator_linear_range[]) { - REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000), - REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), + REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500), + REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000), + REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000), }, - .n_linear_ranges = 2, - .n_voltages = 100, + .n_linear_ranges = 3, + .n_voltages = 164, .ops = &rpm_smps_ldo_ops, }; diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c index 88a5dc88badc7e0c72b1deb770dd1128a8cfa661..fee6457e3111f9fb4e0ede3c670aba43902fa6d9 100644 --- a/drivers/regulator/qcom_spmi-regulator.c +++ b/drivers/regulator/qcom_spmi-regulator.c @@ -1050,6 +1050,8 @@ static struct regulator_ops spmi_vs_ops = { .set_pull_down = spmi_regulator_common_set_pull_down, .set_soft_start = spmi_regulator_common_set_soft_start, .set_over_current_protection = spmi_regulator_vs_ocp, + .set_mode = spmi_regulator_common_set_mode, + .get_mode = spmi_regulator_common_get_mode, }; static struct regulator_ops spmi_boost_ops = { @@ -1440,6 +1442,7 @@ static const struct spmi_regulator_data pm8941_regulators[] = { { "s1", 0x1400, "vdd_s1", }, { "s2", 0x1700, "vdd_s2", }, { "s3", 0x1a00, "vdd_s3", }, + { "s4", 0xa000, }, { "l1", 0x4000, "vdd_l1_l3", }, { "l2", 0x4100, "vdd_l2_lvs_1_2_3", }, { "l3", 0x4200, "vdd_l1_l3", }, @@ -1467,8 +1470,8 @@ static const struct spmi_regulator_data pm8941_regulators[] = { { "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", }, { "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", }, { "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", }, - { "mvs1", 0x8300, "vin_5vs", }, - { "mvs2", 0x8400, "vin_5vs", }, + { "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", }, + { "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", }, { } }; diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c index 6893e52f6bddeb9b2c77291a76e89cb5fe4a89cc..c75beec59a79a18d48bbfc47a77c1933ea3240c7 100644 --- a/drivers/regulator/spm-regulator.c +++ b/drivers/regulator/spm-regulator.c @@ -29,9 +29,9 @@ #include #include #include +#include #if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI)) - asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); #else #define __invoke_psci_fn_smc(a, b, c, d) 0 #endif diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index fb991ec764235d55443a9f99414efc3511cad7a2..696116ebdf50a5b853ba847b9ef55604ed3423ae 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev) pmic->num_regulators = ARRAY_SIZE(tps65910_regs); pmic->ext_sleep_control = tps65910_ext_sleep_control; info = tps65910_regs; + /* Work around silicon erratum SWCZ010: output programmed + * voltage level can go higher than expected or crash + * Workaround: use no synchronization of DCDC clocks + */ + tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL, + DCDCCTRL_DCDCCKSYNC_MASK); break; case TPS65911: pmic->get_ctrl_reg = &tps65911_get_ctrl_register; diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index ec2e9c5fb993c7023c9262af5c0b691905443bff..22394fe30579a30cef087d5c22b64bdeba48b1d6 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -109,6 +109,7 @@ /* OMAP_RTC_OSC_REG bit fields: */ #define OMAP_RTC_OSC_32KCLK_EN BIT(6) #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) +#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4) /* OMAP_RTC_IRQWAKEEN bit fields: */ #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) @@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev) */ if (rtc->has_ext_clk) { reg = rtc_read(rtc, OMAP_RTC_OSC_REG); - rtc_write(rtc, OMAP_RTC_OSC_REG, - reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); + reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE; + reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC; + rtc_writel(rtc, OMAP_RTC_OSC_REG, reg); } rtc->type->lock(rtc); diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 7c511add5aa7d884d8082398198b1aa92a61dc84..bae98521c808c5300a6683c0fe5f1563bfe7a59d 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp) static void con3270_update_string(struct con3270 *cp, struct string *s, int nr) { - if (s->len >= cp->view.cols - 5) + if (s->len < 4) { + /* This indicates a bug, but printing a warning would + * cause a deadlock. */ + return; + } + if (s->string[s->len - 4] != TO_RA) return; raw3270_buffer_address(cp->view.dev, s->string + s->len - 3, cp->view.cols * (nr + 1)); @@ -461,11 +466,11 @@ con3270_cline_end(struct con3270 *cp) cp->cline->len + 4 : cp->view.cols; s = con3270_alloc_string(cp, size); memcpy(s->string, cp->cline->string, cp->cline->len); - if (s->len < cp->view.cols - 5) { + if (cp->cline->len < cp->view.cols - 5) { s->string[s->len - 4] = TO_RA; s->string[s->len - 1] = 0; } else { - while (--size > cp->cline->len) + while (--size >= cp->cline->len) s->string[size] = cp->view.ascebc[' ']; } /* Replace cline with allocated line s and reset cline. */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index c424c0c7367e360acd9ccd1ce899dc80bb575265..1e16331891a9426fdca1d99f5c4ae8a043f34742 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -95,12 +95,13 @@ struct chsc_ssd_area { int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { struct chsc_ssd_area *ssd_area; + unsigned long flags; int ccode; int ret; int i; int mask; - spin_lock_irq(&chsc_page_lock); + spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); ssd_area = chsc_page; ssd_area->request.length = 0x0010; @@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) ssd->fla[i] = ssd_area->fla[i]; } out: - spin_unlock_irq(&chsc_page_lock); + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } @@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) u32 fmt : 4; u32 : 16; } __attribute__ ((packed)) *secm_area; + unsigned long flags; int ret, ccode; - spin_lock_irq(&chsc_page_lock); + spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); secm_area = chsc_page; secm_area->request.length = 0x0050; @@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", secm_area->response.code); out: - spin_unlock_irq(&chsc_page_lock); + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } @@ -993,6 +995,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, int chsc_get_channel_measurement_chars(struct channel_path *chp) { + unsigned long flags; int ccode, ret; struct { @@ -1022,7 +1025,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) return 0; - spin_lock_irq(&chsc_page_lock); + spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); scmc_area = chsc_page; scmc_area->request.length = 0x0010; @@ -1054,7 +1057,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) chsc_initialize_cmg_chars(chp, scmc_area->cmcv, (struct cmg_chars *) &scmc_area->data); out: - spin_unlock_irq(&chsc_page_lock); + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } @@ -1135,6 +1138,7 @@ struct css_chsc_char css_chsc_characteristics; int __init chsc_determine_css_characteristics(void) { + unsigned long flags; int result; struct { struct chsc_header request; @@ -1147,7 +1151,7 @@ chsc_determine_css_characteristics(void) u32 chsc_char[508]; } __attribute__ ((packed)) *scsc_area; - spin_lock_irq(&chsc_page_lock); + spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); scsc_area = chsc_page; scsc_area->request.length = 0x0010; @@ -1169,7 +1173,7 @@ chsc_determine_css_characteristics(void) CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", scsc_area->response.code); exit: - spin_unlock_irq(&chsc_page_lock); + spin_unlock_irqrestore(&chsc_page_lock, flags); return result; } diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 5d7fbe4e907e37e464c63e8f3278c78edcb838cf..581001989937ce1e0aaab11c26136d5e11b4fa4d 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -3,7 +3,7 @@ * * Debug traces for zfcp. * - * Copyright IBM Corp. 2002, 2013 + * Copyright IBM Corp. 2002, 2016 */ #define KMSG_COMPONENT "zfcp" @@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, * @tag: tag indicating which kind of unsolicited status has been received * @req: request for which a response was received */ -void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) +void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) { struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; @@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) rec->u.res.req_issued = req->issued; rec->u.res.prot_status = q_pref->prot_status; rec->u.res.fsf_status = q_head->fsf_status; + rec->u.res.port_handle = q_head->port_handle; + rec->u.res.lun_handle = q_head->lun_handle; memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE); @@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) rec->pl_len, "fsf_res", req->req_id); } - debug_event(dbf->hba, 1, rec, sizeof(*rec)); + debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } @@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, if (sdev) { rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); rec->lun = zfcp_scsi_dev_lun(sdev); - } + } else + rec->lun = ZFCP_DBF_INVALID_LUN; } /** @@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) spin_unlock_irqrestore(&dbf->rec_lock, flags); } +/** + * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery + * @tag: identifier for event + * @wka_port: well known address port + * @req_id: request ID to correlate with potential HBA trace record + */ +void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, + u64 req_id) +{ + struct zfcp_dbf *dbf = wka_port->adapter->dbf; + struct zfcp_dbf_rec *rec = &dbf->rec_buf; + unsigned long flags; + + spin_lock_irqsave(&dbf->rec_lock, flags); + memset(rec, 0, sizeof(*rec)); + + rec->id = ZFCP_DBF_REC_RUN; + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->port_status = wka_port->status; + rec->d_id = wka_port->d_id; + rec->lun = ZFCP_DBF_INVALID_LUN; + + rec->u.run.fsf_req_id = req_id; + rec->u.run.rec_status = ~0; + rec->u.run.rec_step = ~0; + rec->u.run.rec_action = ~0; + rec->u.run.rec_count = ~0; + + debug_event(dbf->rec, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); +} + static inline -void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, - u64 req_id, u32 d_id) +void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, + char *paytag, struct scatterlist *sg, u8 id, u16 len, + u64 req_id, u32 d_id, u16 cap_len) { struct zfcp_dbf_san *rec = &dbf->san_buf; u16 rec_len; unsigned long flags; + struct zfcp_dbf_pay *payload = &dbf->pay_buf; + u16 pay_sum = 0; spin_lock_irqsave(&dbf->san_lock, flags); memset(rec, 0, sizeof(*rec)); @@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, rec->id = id; rec->fsf_req_id = req_id; rec->d_id = d_id; - rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); - memcpy(rec->payload, data, rec_len); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->pl_len = len; /* full length even if we cap pay below */ + if (!sg) + goto out; + rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD); + memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */ + if (len <= rec_len) + goto out; /* skip pay record if full content in rec->payload */ + + /* if (len > rec_len): + * dump data up to cap_len ignoring small duplicate in rec->payload + */ + spin_lock(&dbf->pay_lock); + memset(payload, 0, sizeof(*payload)); + memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); + payload->fsf_req_id = req_id; + payload->counter = 0; + for (; sg && pay_sum < cap_len; sg = sg_next(sg)) { + u16 pay_len, offset = 0; + + while (offset < sg->length && pay_sum < cap_len) { + pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC, + (u16)(sg->length - offset)); + /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */ + memcpy(payload->data, sg_virt(sg) + offset, pay_len); + debug_event(dbf->pay, 1, payload, + zfcp_dbf_plen(pay_len)); + payload->counter++; + offset += pay_len; + pay_sum += pay_len; + } + } + spin_unlock(&dbf->pay_lock); +out: debug_event(dbf->san, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->san_lock, flags); } @@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) struct zfcp_fsf_ct_els *ct_els = fsf->data; u16 length; - length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); - zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, - fsf->req_id, d_id); + length = (u16)zfcp_qdio_real_bytes(ct_els->req); + zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ, + length, fsf->req_id, d_id, length); +} + +static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, + struct zfcp_fsf_req *fsf, + u16 len) +{ + struct zfcp_fsf_ct_els *ct_els = fsf->data; + struct fc_ct_hdr *reqh = sg_virt(ct_els->req); + struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1); + struct scatterlist *resp_entry = ct_els->resp; + struct fc_gpn_ft_resp *acc; + int max_entries, x, last = 0; + + if (!(memcmp(tag, "fsscth2", 7) == 0 + && ct_els->d_id == FC_FID_DIR_SERV + && reqh->ct_rev == FC_CT_REV + && reqh->ct_in_id[0] == 0 + && reqh->ct_in_id[1] == 0 + && reqh->ct_in_id[2] == 0 + && reqh->ct_fs_type == FC_FST_DIR + && reqh->ct_fs_subtype == FC_NS_SUBTYPE + && reqh->ct_options == 0 + && reqh->_ct_resvd1 == 0 + && reqh->ct_cmd == FC_NS_GPN_FT + /* reqh->ct_mr_size can vary so do not match but read below */ + && reqh->_ct_resvd2 == 0 + && reqh->ct_reason == 0 + && reqh->ct_explan == 0 + && reqh->ct_vendor == 0 + && reqn->fn_resvd == 0 + && reqn->fn_domain_id_scope == 0 + && reqn->fn_area_id_scope == 0 + && reqn->fn_fc4_type == FC_TYPE_FCP)) + return len; /* not GPN_FT response so do not cap */ + + acc = sg_virt(resp_entry); + max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp)) + + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one + * to account for header as 1st pseudo "entry" */; + + /* the basic CT_IU preamble is the same size as one entry in the GPN_FT + * response, allowing us to skip special handling for it - just skip it + */ + for (x = 1; x < max_entries && !last; x++) { + if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) + acc++; + else + acc = sg_virt(++resp_entry); + + last = acc->fp_flags & FC_NS_FID_LAST; + } + len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp))); + return len; /* cap after last entry */ } /** @@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) struct zfcp_fsf_ct_els *ct_els = fsf->data; u16 length; - length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); - zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, - fsf->req_id, 0); + length = (u16)zfcp_qdio_real_bytes(ct_els->resp); + zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES, + length, fsf->req_id, ct_els->d_id, + zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length)); } /** @@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) struct fsf_status_read_buffer *srb = (struct fsf_status_read_buffer *) fsf->data; u16 length; + struct scatterlist sg; length = (u16)(srb->length - offsetof(struct fsf_status_read_buffer, payload)); - zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, - fsf->req_id, ntoh24(srb->d_id)); + sg_init_one(&sg, srb->payload.data, length); + zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length, + fsf->req_id, ntoh24(srb->d_id), length); } /** @@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) * @sc: pointer to struct scsi_cmnd * @fsf: pointer to struct zfcp_fsf_req */ -void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) +void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, + struct zfcp_fsf_req *fsf) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) sc->device->host->hostdata[0]; @@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) } } - debug_event(dbf->scsi, 1, rec, sizeof(*rec)); + debug_event(dbf->scsi, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->scsi_lock, flags); } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 0be3d48681aead94466a71ab7b34c6ae7ca098ff..36d07584271d569d27ec2eeb3706235d6459e026 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -2,7 +2,7 @@ * zfcp device driver * debug feature declarations * - * Copyright IBM Corp. 2008, 2010 + * Copyright IBM Corp. 2008, 2015 */ #ifndef ZFCP_DBF_H @@ -17,6 +17,11 @@ #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull +enum zfcp_dbf_pseudo_erp_act_type { + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe, +}; + /** * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action * @ready: number of ready recovery actions @@ -110,6 +115,7 @@ struct zfcp_dbf_san { u32 d_id; #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; + u16 pl_len; } __packed; /** @@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res { u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; u32 fsf_status; u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; + u32 port_handle; + u32 lun_handle; } __packed; /** @@ -279,7 +287,7 @@ static inline void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) { if (debug_level_enabled(req->adapter->dbf->hba, level)) - zfcp_dbf_hba_fsf_res(tag, req); + zfcp_dbf_hba_fsf_res(tag, level, req); } /** @@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, scmd->device->host->hostdata[0]; if (debug_level_enabled(adapter->dbf->scsi, level)) - zfcp_dbf_scsi(tag, scmd, req); + zfcp_dbf_scsi(tag, level, scmd, req); } /** diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 3fb410977014f81821e5dd16f65eebcd7f43c4d7..a59d678125bd0e0ad0bd1ca74b0d42985abb25d8 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -3,7 +3,7 @@ * * Error Recovery Procedures (ERP). * - * Copyright IBM Corp. 2002, 2010 + * Copyright IBM Corp. 2002, 2015 */ #define KMSG_COMPONENT "zfcp" @@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) break; case ZFCP_ERP_ACTION_REOPEN_PORT: - if (result == ZFCP_ERP_SUCCEEDED) - zfcp_scsi_schedule_rport_register(port); + /* This switch case might also happen after a forced reopen + * was successfully done and thus overwritten with a new + * non-forced reopen at `ersfs_2'. In this case, we must not + * do the clean-up of the non-forced version. + */ + if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) + if (result == ZFCP_ERP_SUCCEEDED) + zfcp_scsi_schedule_rport_register(port); /* fall through */ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: put_device(&port->dev); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 5b500652572b7c1395cc801c146f41535d310f31..c8fed9fa1cca3680015913162ce3ddfee50c85b1 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -3,7 +3,7 @@ * * External function declarations. * - * Copyright IBM Corp. 2002, 2010 + * Copyright IBM Corp. 2002, 2015 */ #ifndef ZFCP_EXT_H @@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, struct zfcp_port *, struct scsi_device *, u8, u8); extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); +extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); -extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); +extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); @@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); -extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); +extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, + struct zfcp_fsf_req *); /* zfcp_erp.c */ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 522a633c866a8b1e464ec857f179365e68bb6530..75f820ca17b79b0574e3afd91df18998a3438c30 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -3,7 +3,7 @@ * * Implementation of FSF commands. * - * Copyright IBM Corp. 2002, 2013 + * Copyright IBM Corp. 2002, 2015 */ #define KMSG_COMPONENT "zfcp" @@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case FSF_TOPO_FABRIC: - fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + else + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; break; case FSF_TOPO_AL: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; @@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { fc_host_permanent_port_name(shost) = bottom->wwpn; - fc_host_port_type(shost) = FC_PORTTYPE_NPIV; } else fc_host_permanent_port_name(shost) = fc_host_port_name(shost); fc_host_maxframe_size(shost) = bottom->maximum_frame_size; @@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, if (zfcp_adapter_multi_buffer_active(adapter)) { if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) return -EIO; + qtcb->bottom.support.req_buf_length = + zfcp_qdio_real_bytes(sg_req); if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) return -EIO; + qtcb->bottom.support.resp_buf_length = + zfcp_qdio_real_bytes(sg_resp); zfcp_qdio_set_data_div(qdio, &req->qdio_req, zfcp_qdio_sbale_count(sg_req)); @@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, req->handler = zfcp_fsf_send_ct_handler; req->qtcb->header.port_handle = wka_port->handle; + ct->d_id = wka_port->d_id; req->data = ct; zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); @@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, hton24(req->qtcb->bottom.support.d_id, d_id); req->handler = zfcp_fsf_send_els_handler; + els->d_id = d_id; req->data = els; zfcp_dbf_san_req("fssels1", req, d_id); @@ -1575,7 +1583,7 @@ out: int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req; + struct zfcp_fsf_req *req = NULL; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); + if (req && !IS_ERR(req)) + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); return retval; } @@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req; + struct zfcp_fsf_req *req = NULL; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); + if (req && !IS_ERR(req)) + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); return retval; } diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 57ae3ae1046d126d6dfe6769e2885bc8f210a7a9..be1c04b334c51f678d643e4c488173f8fd6be0ee 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,7 +3,7 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corp. 2002, 2010 + * Copyright IBM Corp. 2002, 2015 */ #ifndef FSF_H @@ -436,6 +436,7 @@ struct zfcp_blk_drv_data { * @handler_data: data passed to handler function * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) * @status: used to pass error status to calling function + * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS */ struct zfcp_fsf_ct_els { struct scatterlist *req; @@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els { void *handler_data; struct zfcp_port *port; int status; + u32 d_id; }; #endif /* FSF_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index b3c6ff49103b851f6467da7ff1fcb361865e2698..9069f98a18172e754c943010654de65e91c2fb7c 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -3,7 +3,7 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corp. 2002, 2013 + * Copyright IBM Corp. 2002, 2015 */ #define KMSG_COMPONENT "zfcp" @@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) ids.port_id = port->d_id; ids.roles = FC_RPORT_ROLE_FCP_TARGET; + zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); if (!rport) { dev_err(&port->adapter->ccw_device->dev, @@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) struct fc_rport *rport = port->rport; if (rport) { + zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); fc_remote_port_delete(rport); port->rport = NULL; } diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 41f9a00e4f74ff16a3301617d10f2d62ec820573..7aa01c1960ea99f01b946c5fa2d714c99ea39247 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2297,15 +2297,23 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, } case ARCMSR_MESSAGE_WRITE_WQBUFFER: { unsigned char *ver_addr; - int32_t user_len, cnt2end; + uint32_t user_len; + int32_t cnt2end; uint8_t *pQbuffer, *ptmpuserbuffer; + + user_len = pcmdmessagefld->cmdmessage.Length; + if (user_len > ARCMSR_API_DATA_BUFLEN) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } ptmpuserbuffer = ver_addr; - user_len = pcmdmessagefld->cmdmessage.Length; + memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); spin_lock_irqsave(&acb->wqbuffer_lock, flags); @@ -2537,18 +2545,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; struct CommandControlBlock *ccb; int target = cmd->device->id; - int lun = cmd->device->lun; - uint8_t scsicmd = cmd->cmnd[0]; cmd->scsi_done = done; cmd->host_scribble = NULL; cmd->result = 0; - if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){ - if(acb->devstate[target][lun] == ARECA_RAID_GONE) { - cmd->result = (DID_NO_CONNECT << 16); - } - cmd->scsi_done(cmd); - return 0; - } if (target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, cmd); diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 67669a9e73c1c94c7c9deeff3b5c3bc0723cada2..f3a33312a9a682e26148e9e6cebe4a8094288ab6 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -954,8 +954,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq) skb_put(skb, len); pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); - r = pci_dma_mapping_error(fnic->pdev, pa); - if (r) { + if (pci_dma_mapping_error(fnic->pdev, pa)) { + r = -ENOMEM; printk(KERN_ERR "PCI mapping failed with error %d\n", r); goto free_skb; } @@ -1093,8 +1093,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); - ret = pci_dma_mapping_error(fnic->pdev, pa); - if (ret) { + if (pci_dma_mapping_error(fnic->pdev, pa)) { + ret = -ENOMEM; printk(KERN_ERR "DMA map failed with error %d\n", ret); goto free_skb_on_err; } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a3860367b568aa06adf4290bf84717458c241e77..e9ce74afd13f4672e941d620f16ab602c647d33e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3930,6 +3930,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h, return rc; } +static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) +{ + struct bmic_identify_physical_device *id_phys; + bool is_spare = false; + int rc; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + return false; + + rc = hpsa_bmic_id_physical_device(h, + lunaddrbytes, + GET_BMIC_DRIVE_NUMBER(lunaddrbytes), + id_phys, sizeof(*id_phys)); + if (rc == 0) + is_spare = (id_phys->more_flags >> 6) & 0x01; + + kfree(id_phys); + return is_spare; +} + +#define RPL_DEV_FLAG_NON_DISK 0x1 +#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 +#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 + +#define BMIC_DEVICE_TYPE_ENCLOSURE 6 + +static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, + struct ext_report_lun_entry *rle) +{ + u8 device_flags; + u8 device_type; + + if (!MASKED_DEVICE(lunaddrbytes)) + return false; + + device_flags = rle->device_flags; + device_type = rle->device_type; + + if (device_flags & RPL_DEV_FLAG_NON_DISK) { + if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) + return false; + return true; + } + + if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) + return false; + + if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) + return false; + + /* + * Spares may be spun down, we do not want to + * do an Inquiry to a RAID set spare drive as + * that would have them spun up, that is a + * performance hit because I/O to the RAID device + * stops while the spin up occurs which can take + * over 50 seconds. + */ + if (hpsa_is_disk_spare(h, lunaddrbytes)) + return true; + + return false; +} static void hpsa_update_scsi_devices(struct ctlr_info *h) { @@ -4023,6 +4087,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) u8 *lunaddrbytes, is_OBDR = 0; int rc = 0; int phys_dev_index = i - (raid_ctlr_position == 0); + bool skip_device = false; physical_device = i < nphysicals + (raid_ctlr_position == 0); @@ -4030,10 +4095,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); - /* skip masked non-disk devices */ - if (MASKED_DEVICE(lunaddrbytes) && physical_device && - (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) - continue; + /* + * Skip over some devices such as a spare. + */ + if (!tmpdevice->external && physical_device) { + skip_device = hpsa_skip_device(h, lunaddrbytes, + &physdev_list->LUN[phys_dev_index]); + if (skip_device) + continue; + } /* Get device type, vendor, model, device id */ rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 6aa317c303e2dced686c76cc1102e563b4f058b1..1f9f9e5af2072ec2e5467e4a0203cfe0c815916b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) spin_lock_irqsave(vhost->host->host_lock, flags); vhost->state = IBMVFC_NO_CRQ; vhost->logged_in = 0; - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); /* Clean out the queue */ memset(crq->msgs, 0, PAGE_SIZE); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index ef4ff03242ea13d825a3ba1993eb9489911d8af4..aaf7da07a358cb85a915943f1a5821572a53ef1c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1923,7 +1923,7 @@ struct megasas_instance_template { }; #define MEGASAS_IS_LOGICAL(scp) \ - (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 + ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 3f8d357b1bac9f93c2128254b11a677638c105e9..17c440b9d086afa341a386c69099c05fceea8b55 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1688,16 +1688,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) goto out_done; } - switch (scmd->cmnd[0]) { - case SYNCHRONIZE_CACHE: - /* - * FW takes care of flush cache on its own - * No need to send it down - */ + /* + * FW takes care of flush cache on its own for Virtual Disk. + * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW. + */ + if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) { scmd->result = DID_OK << 16; goto out_done; - default: - break; } if (instance->instancet->build_and_issue_cmd(instance, scmd)) { @@ -5941,11 +5938,11 @@ static void megasas_detach_one(struct pci_dev *pdev) if (fusion->ld_drv_map[i]) free_pages((ulong)fusion->ld_drv_map[i], fusion->drv_map_pages); - if (fusion->pd_seq_sync) - dma_free_coherent(&instance->pdev->dev, - pd_seq_map_sz, - fusion->pd_seq_sync[i], - fusion->pd_seq_phys[i]); + if (fusion->pd_seq_sync[i]) + dma_free_coherent(&instance->pdev->dev, + pd_seq_map_sz, + fusion->pd_seq_sync[i], + fusion->pd_seq_phys[i]); } free_pages((ulong)instance->ctrl_context, instance->ctrl_context_pages); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6180f7970bbf73c0d9dcfefd6e86ffe3bf4f27f4..8cead04f26d6bd204bebac82be2f443291f73ef6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1275,9 +1275,9 @@ scsih_target_alloc(struct scsi_target *starget) sas_target_priv_data->handle = raid_device->handle; sas_target_priv_data->sas_address = raid_device->wwid; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; - sas_target_priv_data->raid_device = raid_device; if (ioc->is_warpdrive) - raid_device->starget = starget; + sas_target_priv_data->raid_device = raid_device; + raid_device->starget = starget; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return 0; @@ -3706,6 +3706,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, } } +static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +{ + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); +} + /** * _scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object @@ -3727,6 +3732,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) if (!scmd) continue; count++; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, + SDEV_RUNNING); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) @@ -3831,8 +3839,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) SAM_STAT_CHECK_CONDITION; } - - /** * scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object @@ -3859,6 +3865,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); + /* + * Lock the device for any subsequent command until command is + * done. + */ + if (ata_12_16_cmd(scmd)) + scsi_internal_device_block(scmd->device); + sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4431,6 +4444,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); if (mpi_reply == NULL) { @@ -4510,7 +4526,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) le16_to_cpu(mpi_reply->DevHandle)); mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); - if (!(ioc->logging_level & MPT_DEBUG_REPLY) && + if ((ioc->logging_level & MPT_DEBUG_REPLY) && ((scmd->sense_buffer[2] == UNIT_ATTENTION) || (scmd->sense_buffer[2] == MEDIUM_ERROR) || (scmd->sense_buffer[2] == HARDWARE_ERROR))) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index fc6674db4f2db3b5da454c0df566e638477d8114..c44cbf46221ce9bfa54a18c98f81e70ca9fdffee 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2257,6 +2257,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) { scsi_qla_host_t *vha = shost_priv(shost); + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 1; if (!vha->host) return 1; if (time > vha->hw->loop_reset_delay * HZ) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d09d60293c272663b5b9b84f27ce2fab3e61a6aa..e357a393d56e0908c74149d310f2244fadd23656 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -4981,6 +4981,7 @@ static void __exit scsi_debug_exit(void) bus_unregister(&pseudo_lld_bus); root_device_unregister(pseudo_primary); + vfree(map_storep); vfree(dif_storep); vfree(fake_storep); } diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 87b35b78d879d1a43ee703169b4a343f27defd8e..95fa74203b99f1781786d34a5c9ec237ff58d305 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1458,12 +1458,12 @@ retry: out_err: kfree(lun_data); out: - scsi_device_put(sdev); if (scsi_device_created(sdev)) /* * the sdev we used didn't appear in the report luns scan */ __scsi_remove_device(sdev); + scsi_device_put(sdev); return ret; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 5d81bcc1dc7516248e297f72cdf0615f7272c116..d5c00951cf93ae0677b40bd70e1dc9a7d5366ae4 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2806,10 +2806,10 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && - sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) - rw_max = q->limits.io_opt = - sdkp->opt_xfer_blocks * sdp->sector_size; - else + logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) { + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); + } else rw_max = BLK_DEF_MAX_SECTORS; /* Combine with controller limits */ diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 654630bb7d0edeb48438654e47d8a60151d2c87b..765a6f1ac1b7320edc860b1eb6baa30e7e1beac4 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo return blocks << (ilog2(sdev->sector_size) - 9); } +static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) +{ + return blocks * sdev->sector_size; +} + /* * A DIF-capable target device can be formatted with different * protection schemes. Currently 0 through 3 are defined: diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 8562ada73c1d93cea3978942d68ff32c905edd89..4e541773c805c0d76aa8370b398c90d43cd94994 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -3136,6 +3136,13 @@ int icnss_trigger_recovery(struct device *dev) goto out; } + if (test_bit(ICNSS_PDR_ENABLED, &priv->state)) { + icnss_pr_err("PD restart not enabled: state: 0x%lx\n", + priv->state); + ret = -EOPNOTSUPP; + goto out; + } + if (!priv->service_notifier[0].handle) { icnss_pr_err("Invalid handle during recovery, state: 0x%lx\n", priv->state); diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c index 85ff81ff475cad02dba7bd318f2b27b9ad3226db..c1c65cd25558b1b9c15a8af402e54359f0e302de 100644 --- a/drivers/soc/qcom/service-notifier.c +++ b/drivers/soc/qcom/service-notifier.c @@ -635,7 +635,13 @@ static int send_pd_restart_req(const char *service_path, return rc; } - /* Check the response */ + /* Check response if PDR is disabled */ + if (QMI_RESP_BIT_SHIFT(resp.resp.result) == QMI_ERR_DISABLED_V01) { + pr_err("PD restart is disabled 0x%x\n", + QMI_RESP_BIT_SHIFT(resp.resp.error)); + return -EOPNOTSUPP; + } + /* Check the response for other error case*/ if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) { pr_err("QMI request for PD restart failed 0x%x\n", QMI_RESP_BIT_SHIFT(resp.resp.error)); diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index b04b05a0904eec086c49250c3dc4e27cb84a3927..5548a31e1a39a100142b45841cbe38e1aa007e38 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -116,7 +116,7 @@ static const struct spm_reg_data spm_reg_8064_cpu = { static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv); -typedef int (*idle_fn)(int); +typedef int (*idle_fn)(void); static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops); static inline void spm_register_write(struct spm_driver_data *drv, @@ -179,10 +179,10 @@ static int qcom_pm_collapse(unsigned long int unused) return -1; } -static int qcom_cpu_spc(int cpu) +static int qcom_cpu_spc(void) { int ret; - struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu); + struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv); spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC); ret = cpu_suspend(0, qcom_pm_collapse); @@ -197,9 +197,9 @@ static int qcom_cpu_spc(int cpu) return ret; } -static int qcom_idle_enter(int cpu, unsigned long index) +static int qcom_idle_enter(unsigned long index) { - return per_cpu(qcom_idle_ops, cpu)[index](cpu); + return __this_cpu_read(qcom_idle_ops)[index](); } static const struct of_device_id qcom_idle_state_match[] __initconst = { @@ -288,7 +288,7 @@ static struct spm_driver_data *spm_get_drv(struct platform_device *pdev, struct spm_driver_data *drv = NULL; struct device_node *cpu_node, *saw_node; int cpu; - bool found; + bool found = 0; for_each_possible_cpu(cpu) { cpu_node = of_cpu_device_node_get(cpu); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 39412c9097c6a240466c51c941ec890a4612542e..a3965cac1b3447ce6cf6c715e46269bbc301508f 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -753,7 +753,6 @@ static int dspi_remove(struct platform_device *pdev) /* Disconnect from the SPI framework */ clk_disable_unprepare(dspi->clk); spi_unregister_master(dspi->master); - spi_master_put(dspi->master); return 0; } diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index a7934ab00b96505f3753fa38266238f3abcc6f64..d22de4c8c3995c87328f37fd140041833514a8bb 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -263,6 +263,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); + /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ + if (sh_msiof_spi_div_table[k].div == 1 && brps > 2) + continue; if (brps <= 32) /* max of brdv is 32 */ break; } diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c index 99c6584fcfa58f6dfe3c62ca54f714f520e10053..97246bcbcd62548ebd60f6bc93a2ce66f83fd40b 100644 --- a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c @@ -197,6 +197,6 @@ void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, frame.sp = regs->sp; frame.pc = regs->pc; output->printf(output, "\n"); - walk_stackframe(&frame, report_trace, &sts); + walk_stackframe(current, &frame, report_trace, &sts); } } diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index b1e45161eefcf45cf04e65c1bdab3d2aa52dcf0c..18c2b6daf58857e1d1f34462d5e95a4e5e1d1178 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -392,11 +392,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned start_line, if (unlikely(timeit)) { ts_end = ktime_get(); - if (ktime_to_ns(par->update_time)) + if (!ktime_to_ns(par->update_time)) par->update_time = ts_start; - par->update_time = ts_start; fps = ktime_us_delta(ts_start, par->update_time); + par->update_time = ts_start; fps = fps ? 1000000 / fps : 0; throughput = ktime_us_delta(ts_end, ts_start); diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig index 4e094602437c3dea1c36e7614f808d680a0b4aa1..c579141a7bed8a52059ca4a5331fa6c3d034ac2b 100644 --- a/drivers/staging/goldfish/Kconfig +++ b/drivers/staging/goldfish/Kconfig @@ -4,6 +4,12 @@ config GOLDFISH_AUDIO ---help--- Emulated audio channel for the Goldfish Android Virtual Device +config GOLDFISH_SYNC + tristate "Goldfish AVD Sync Driver" + depends on GOLDFISH + ---help--- + Emulated sync fences for the Goldfish Android Virtual Device + config MTD_GOLDFISH_NAND tristate "Goldfish NAND device" depends on GOLDFISH diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile index dec34ad58162fdda2dde0969b0e004bc3dbfb01d..0cf525588210792e85f0034d3f5c181d24a8dce6 100644 --- a/drivers/staging/goldfish/Makefile +++ b/drivers/staging/goldfish/Makefile @@ -4,3 +4,8 @@ obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o + +# and sync + +ccflags-y := -Idrivers/staging/android +obj-$(CONFIG_GOLDFISH_SYNC) += goldfish_sync.o diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c index b0927e49d0a81f31e63895fa2621ed61bfc30a57..63b79c09b41ba2231b7a4b4c3beb644659d3fad4 100644 --- a/drivers/staging/goldfish/goldfish_audio.c +++ b/drivers/staging/goldfish/goldfish_audio.c @@ -26,7 +26,9 @@ #include #include #include +#include #include +#include MODULE_AUTHOR("Google, Inc."); MODULE_DESCRIPTION("Android QEMU Audio Driver"); @@ -115,6 +117,7 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct goldfish_audio *data = fp->private_data; + unsigned long irq_flags; int length; int result = 0; @@ -128,6 +131,10 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf, wait_event_interruptible(data->wait, data->buffer_status & AUDIO_INT_READ_BUFFER_FULL); + spin_lock_irqsave(&data->lock, irq_flags); + data->buffer_status &= ~AUDIO_INT_READ_BUFFER_FULL; + spin_unlock_irqrestore(&data->lock, irq_flags); + length = AUDIO_READ(data, AUDIO_READ_BUFFER_AVAILABLE); /* copy data to user space */ @@ -344,11 +351,25 @@ static int goldfish_audio_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_audio_of_match[] = { + { .compatible = "google,goldfish-audio", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_audio_of_match); + +static const struct acpi_device_id goldfish_audio_acpi_match[] = { + { "GFSH0005", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_audio_acpi_match); + static struct platform_driver goldfish_audio_driver = { .probe = goldfish_audio_probe, .remove = goldfish_audio_remove, .driver = { - .name = "goldfish_audio" + .name = "goldfish_audio", + .of_match_table = goldfish_audio_of_match, + .acpi_match_table = ACPI_PTR(goldfish_audio_acpi_match), } }; diff --git a/drivers/staging/goldfish/goldfish_sync.c b/drivers/staging/goldfish/goldfish_sync.c new file mode 100644 index 0000000000000000000000000000000000000000..ba8def29901ea1a084aa01637c5269e50a80de90 --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync.c @@ -0,0 +1,987 @@ +/* + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "sw_sync.h" +#include "sync.h" + +#define ERR(...) printk(KERN_ERR __VA_ARGS__); + +#define INFO(...) printk(KERN_INFO __VA_ARGS__); + +#define DPRINT(...) pr_debug(__VA_ARGS__); + +#define DTRACE() DPRINT("%s: enter", __func__) + +/* The Goldfish sync driver is designed to provide a interface + * between the underlying host's sync device and the kernel's + * sw_sync. + * The purpose of the device/driver is to enable lightweight + * creation and signaling of timelines and fences + * in order to synchronize the guest with host-side graphics events. + * + * Each time the interrupt trips, the driver + * may perform a sw_sync operation. + */ + +/* The operations are: */ + +/* Ready signal - used to mark when irq should lower */ +#define CMD_SYNC_READY 0 + +/* Create a new timeline. writes timeline handle */ +#define CMD_CREATE_SYNC_TIMELINE 1 + +/* Create a fence object. reads timeline handle and time argument. + * Writes fence fd to the SYNC_REG_HANDLE register. */ +#define CMD_CREATE_SYNC_FENCE 2 + +/* Increments timeline. reads timeline handle and time argument */ +#define CMD_SYNC_TIMELINE_INC 3 + +/* Destroys a timeline. reads timeline handle */ +#define CMD_DESTROY_SYNC_TIMELINE 4 + +/* Starts a wait on the host with + * the given glsync object and sync thread handle. */ +#define CMD_TRIGGER_HOST_WAIT 5 + +/* The register layout is: */ + +#define SYNC_REG_BATCH_COMMAND 0x00 /* host->guest batch commands */ +#define SYNC_REG_BATCH_GUESTCOMMAND 0x04 /* guest->host batch commands */ +#define SYNC_REG_BATCH_COMMAND_ADDR 0x08 /* communicate physical address of host->guest batch commands */ +#define SYNC_REG_BATCH_COMMAND_ADDR_HIGH 0x0c /* 64-bit part */ +#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR 0x10 /* communicate physical address of guest->host commands */ +#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */ +#define SYNC_REG_INIT 0x18 /* signals that the device has been probed */ + +/* There is an ioctl associated with goldfish sync driver. + * Make it conflict with ioctls that are not likely to be used + * in the emulator. + * + * '@' 00-0F linux/radeonfb.h conflict! + * '@' 00-0F drivers/video/aty/aty128fb.c conflict! + */ +#define GOLDFISH_SYNC_IOC_MAGIC '@' + +#define GOLDFISH_SYNC_IOC_QUEUE_WORK _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info) + +/* The above definitions (command codes, register layout, ioctl definitions) + * need to be in sync with the following files: + * + * Host-side (emulator): + * external/qemu/android/emulation/goldfish_sync.h + * external/qemu-android/hw/misc/goldfish_sync.c + * + * Guest-side (system image): + * device/generic/goldfish-opengl/system/egl/goldfish_sync.h + * device/generic/goldfish/ueventd.ranchu.rc + * platform/build/target/board/generic/sepolicy/file_contexts + */ +struct goldfish_sync_hostcmd { + /* sorted for alignment */ + uint64_t handle; + uint64_t hostcmd_handle; + uint32_t cmd; + uint32_t time_arg; +}; + +struct goldfish_sync_guestcmd { + uint64_t host_command; /* uint64_t for alignment */ + uint64_t glsync_handle; + uint64_t thread_handle; + uint64_t guest_timeline_handle; +}; + +#define GOLDFISH_SYNC_MAX_CMDS 64 + +struct goldfish_sync_state { + char __iomem *reg_base; + int irq; + + /* Spinlock protects |to_do| / |to_do_end|. */ + spinlock_t lock; + /* |mutex_lock| protects all concurrent access + * to timelines for both kernel and user space. */ + struct mutex mutex_lock; + + /* Buffer holding commands issued from host. */ + struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS]; + uint32_t to_do_end; + + /* Addresses for the reading or writing + * of individual commands. The host can directly write + * to |batch_hostcmd| (and then this driver immediately + * copies contents to |to_do|). This driver either replies + * through |batch_hostcmd| or simply issues a + * guest->host command through |batch_guestcmd|. + */ + struct goldfish_sync_hostcmd *batch_hostcmd; + struct goldfish_sync_guestcmd *batch_guestcmd; + + /* Used to give this struct itself to a work queue + * function for executing actual sync commands. */ + struct work_struct work_item; +}; + +static struct goldfish_sync_state global_sync_state[1]; + +struct goldfish_sync_timeline_obj { + struct sw_sync_timeline *sw_sync_tl; + uint32_t current_time; + /* We need to be careful about when we deallocate + * this |goldfish_sync_timeline_obj| struct. + * In order to ensure proper cleanup, we need to + * consider the triggered host-side wait that may + * still be in flight when the guest close()'s a + * goldfish_sync device's sync context fd (and + * destroys the |sw_sync_tl| field above). + * The host-side wait may raise IRQ + * and tell the kernel to increment the timeline _after_ + * the |sw_sync_tl| has already been set to null. + * + * From observations on OpenGL apps and CTS tests, this + * happens at some very low probability upon context + * destruction or process close, but it does happen + * and it needs to be handled properly. Otherwise, + * if we clean up the surrounding |goldfish_sync_timeline_obj| + * too early, any |handle| field of any host->guest command + * might not even point to a null |sw_sync_tl| field, + * but to garbage memory or even a reclaimed |sw_sync_tl|. + * If we do not count such "pending waits" and kfree the object + * immediately upon |goldfish_sync_timeline_destroy|, + * we might get mysterous RCU stalls after running a long + * time because the garbage memory that is being read + * happens to be interpretable as a |spinlock_t| struct + * that is currently in the locked state. + * + * To track when to free the |goldfish_sync_timeline_obj| + * itself, we maintain a kref. + * The kref essentially counts the timeline itself plus + * the number of waits in flight. kref_init/kref_put + * are issued on + * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy| + * and kref_get/kref_put are issued on + * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|. + * + * The timeline is destroyed after reference count + * reaches zero, which would happen after + * |goldfish_sync_timeline_destroy| and all pending + * |goldfish_sync_timeline_inc|'s are fulfilled. + * + * NOTE (1): We assume that |fence_create| and + * |timeline_inc| calls are 1:1, otherwise the kref scheme + * will not work. This is a valid assumption as long + * as the host-side virtual device implementation + * does not insert any timeline increments + * that we did not trigger from here. + * + * NOTE (2): The use of kref by itself requires no locks, + * but this does not mean everything works without locks. + * Related timeline operations do require a lock of some sort, + * or at least are not proven to work without it. + * In particualr, we assume that all the operations + * done on the |kref| field above are done in contexts where + * |global_sync_state->mutex_lock| is held. Do not + * remove that lock until everything is proven to work + * without it!!! */ + struct kref kref; +}; + +/* We will call |delete_timeline_obj| when the last reference count + * of the kref is decremented. This deletes the sw_sync + * timeline object along with the wrapper itself. */ +static void delete_timeline_obj(struct kref* kref) { + struct goldfish_sync_timeline_obj* obj = + container_of(kref, struct goldfish_sync_timeline_obj, kref); + + sync_timeline_destroy(&obj->sw_sync_tl->obj); + obj->sw_sync_tl = NULL; + kfree(obj); +} + +static uint64_t gensym_ctr; +static void gensym(char *dst) +{ + sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr); + gensym_ctr++; +} + +/* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock| + * is held. */ +static struct goldfish_sync_timeline_obj* +goldfish_sync_timeline_create(void) +{ + + char timeline_name[256]; + struct sw_sync_timeline *res_sync_tl = NULL; + struct goldfish_sync_timeline_obj *res; + + DTRACE(); + + gensym(timeline_name); + + res_sync_tl = sw_sync_timeline_create(timeline_name); + if (!res_sync_tl) { + ERR("Failed to create sw_sync timeline."); + return NULL; + } + + res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL); + res->sw_sync_tl = res_sync_tl; + res->current_time = 0; + kref_init(&res->kref); + + DPRINT("new timeline_obj=0x%p", res); + return res; +} + +/* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock| + * is held. */ +static int +goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj, + uint32_t val) +{ + + int fd; + char fence_name[256]; + struct sync_pt *syncpt = NULL; + struct sync_fence *sync_obj = NULL; + struct sw_sync_timeline *tl; + + DTRACE(); + + if (!obj) return -1; + + tl = obj->sw_sync_tl; + + syncpt = sw_sync_pt_create(tl, val); + if (!syncpt) { + ERR("could not create sync point! " + "sync_timeline=0x%p val=%d", + tl, val); + return -1; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ERR("could not get unused fd for sync fence. " + "errno=%d", fd); + goto err_cleanup_pt; + } + + gensym(fence_name); + + sync_obj = sync_fence_create(fence_name, syncpt); + if (!sync_obj) { + ERR("could not create sync fence! " + "sync_timeline=0x%p val=%d sync_pt=0x%p", + tl, val, syncpt); + goto err_cleanup_fd_pt; + } + + DPRINT("installing sync fence into fd %d sync_obj=0x%p", fd, sync_obj); + sync_fence_install(sync_obj, fd); + kref_get(&obj->kref); + + return fd; + +err_cleanup_fd_pt: + put_unused_fd(fd); +err_cleanup_pt: + sync_pt_free(syncpt); + return -1; +} + +/* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock| + * is held. */ +static void +goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc) +{ + DTRACE(); + /* Just give up if someone else nuked the timeline. + * Whoever it was won't care that it doesn't get signaled. */ + if (!obj) return; + + DPRINT("timeline_obj=0x%p", obj); + sw_sync_timeline_inc(obj->sw_sync_tl, inc); + DPRINT("incremented timeline. increment max_time"); + obj->current_time += inc; + + /* Here, we will end up deleting the timeline object if it + * turns out that this call was a pending increment after + * |goldfish_sync_timeline_destroy| was called. */ + kref_put(&obj->kref, delete_timeline_obj); + DPRINT("done"); +} + +/* |goldfish_sync_timeline_destroy| assumes + * that |global_sync_state->mutex_lock| is held. */ +static void +goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj) +{ + DTRACE(); + /* See description of |goldfish_sync_timeline_obj| for why we + * should not immediately destroy |obj| */ + kref_put(&obj->kref, delete_timeline_obj); +} + +static inline void +goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t handle, + uint32_t time_arg, + uint64_t hostcmd_handle) +{ + struct goldfish_sync_hostcmd *to_add; + + DTRACE(); + + BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS); + + to_add = &sync_state->to_do[sync_state->to_do_end]; + + to_add->cmd = cmd; + to_add->handle = handle; + to_add->time_arg = time_arg; + to_add->hostcmd_handle = hostcmd_handle; + + sync_state->to_do_end += 1; +} + +static inline void +goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t handle, + uint32_t time_arg, + uint64_t hostcmd_handle) +{ + unsigned long irq_flags; + struct goldfish_sync_hostcmd *batch_hostcmd = + sync_state->batch_hostcmd; + + DTRACE(); + + spin_lock_irqsave(&sync_state->lock, irq_flags); + + batch_hostcmd->cmd = cmd; + batch_hostcmd->handle = handle; + batch_hostcmd->time_arg = time_arg; + batch_hostcmd->hostcmd_handle = hostcmd_handle; + writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND); + + spin_unlock_irqrestore(&sync_state->lock, irq_flags); +} + +static inline void +goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t glsync_handle, + uint64_t thread_handle, + uint64_t timeline_handle) +{ + unsigned long irq_flags; + struct goldfish_sync_guestcmd *batch_guestcmd = + sync_state->batch_guestcmd; + + DTRACE(); + + spin_lock_irqsave(&sync_state->lock, irq_flags); + + batch_guestcmd->host_command = (uint64_t)cmd; + batch_guestcmd->glsync_handle = (uint64_t)glsync_handle; + batch_guestcmd->thread_handle = (uint64_t)thread_handle; + batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle; + writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND); + + spin_unlock_irqrestore(&sync_state->lock, irq_flags); +} + +/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device. + * In the context of OpenGL, this interrupt will fire whenever we need + * to signal a fence fd in the guest, with the command + * |CMD_SYNC_TIMELINE_INC|. + * However, because this function will be called in an interrupt context, + * it is necessary to do the actual work of signaling off of interrupt context. + * The shared work queue is used for this purpose. At the end when + * all pending commands are intercepted by the interrupt handler, + * we call |schedule_work|, which will later run the actual + * desired sync command in |goldfish_sync_work_item_fn|. + */ +static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id) +{ + + struct goldfish_sync_state *sync_state = dev_id; + + uint32_t nextcmd; + uint32_t command_r; + uint64_t handle_rw; + uint32_t time_r; + uint64_t hostcmd_handle_rw; + + int count = 0; + + DTRACE(); + + sync_state = dev_id; + + spin_lock(&sync_state->lock); + + for (;;) { + + readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND); + nextcmd = sync_state->batch_hostcmd->cmd; + + if (nextcmd == 0) + break; + + command_r = nextcmd; + handle_rw = sync_state->batch_hostcmd->handle; + time_r = sync_state->batch_hostcmd->time_arg; + hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle; + + goldfish_sync_cmd_queue( + sync_state, + command_r, + handle_rw, + time_r, + hostcmd_handle_rw); + + count++; + } + + spin_unlock(&sync_state->lock); + + schedule_work(&sync_state->work_item); + + return (count == 0) ? IRQ_NONE : IRQ_HANDLED; +} + +/* |goldfish_sync_work_item_fn| does the actual work of servicing + * host->guest sync commands. This function is triggered whenever + * the IRQ for the goldfish sync device is raised. Once it starts + * running, it grabs the contents of the buffer containing the + * commands it needs to execute (there may be multiple, because + * our IRQ is active high and not edge triggered), and then + * runs all of them one after the other. + */ +static void goldfish_sync_work_item_fn(struct work_struct *input) +{ + + struct goldfish_sync_state *sync_state; + int sync_fence_fd; + + struct goldfish_sync_timeline_obj *timeline; + uint64_t timeline_ptr; + + uint64_t hostcmd_handle; + + uint32_t cmd; + uint64_t handle; + uint32_t time_arg; + + struct goldfish_sync_hostcmd *todo; + uint32_t todo_end; + + unsigned long irq_flags; + + struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS]; + uint32_t i = 0; + + sync_state = container_of(input, struct goldfish_sync_state, work_item); + + mutex_lock(&sync_state->mutex_lock); + + spin_lock_irqsave(&sync_state->lock, irq_flags); { + + todo_end = sync_state->to_do_end; + + DPRINT("num sync todos: %u", sync_state->to_do_end); + + for (i = 0; i < todo_end; i++) + to_run[i] = sync_state->to_do[i]; + + /* We expect that commands will come in at a slow enough rate + * so that incoming items will not be more than + * GOLDFISH_SYNC_MAX_CMDS. + * + * This is because the way the sync device is used, + * it's only for managing buffer data transfers per frame, + * with a sequential dependency between putting things in + * to_do and taking them out. Once a set of commands is + * queued up in to_do, the user of the device waits for + * them to be processed before queuing additional commands, + * which limits the rate at which commands come in + * to the rate at which we take them out here. + * + * We also don't expect more than MAX_CMDS to be issued + * at once; there is a correspondence between + * which buffers need swapping to the (display / buffer queue) + * to particular commands, and we don't expect there to be + * enough display or buffer queues in operation at once + * to overrun GOLDFISH_SYNC_MAX_CMDS. + */ + sync_state->to_do_end = 0; + + } spin_unlock_irqrestore(&sync_state->lock, irq_flags); + + for (i = 0; i < todo_end; i++) { + DPRINT("todo index: %u", i); + + todo = &to_run[i]; + + cmd = todo->cmd; + + handle = (uint64_t)todo->handle; + time_arg = todo->time_arg; + hostcmd_handle = (uint64_t)todo->hostcmd_handle; + + DTRACE(); + + timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle; + + switch (cmd) { + case CMD_SYNC_READY: + break; + case CMD_CREATE_SYNC_TIMELINE: + DPRINT("exec CMD_CREATE_SYNC_TIMELINE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + timeline = goldfish_sync_timeline_create(); + timeline_ptr = (uintptr_t)timeline; + goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE, + timeline_ptr, + 0, + hostcmd_handle); + DPRINT("sync timeline created: %p", timeline); + break; + case CMD_CREATE_SYNC_FENCE: + DPRINT("exec CMD_CREATE_SYNC_FENCE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg); + goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE, + sync_fence_fd, + 0, + hostcmd_handle); + break; + case CMD_SYNC_TIMELINE_INC: + DPRINT("exec CMD_SYNC_TIMELINE_INC: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + goldfish_sync_timeline_inc(timeline, time_arg); + break; + case CMD_DESTROY_SYNC_TIMELINE: + DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + goldfish_sync_timeline_destroy(timeline); + break; + } + DPRINT("Done executing sync command"); + } + mutex_unlock(&sync_state->mutex_lock); +} + +/* Guest-side interface: file operations */ + +/* Goldfish sync context and ioctl info. + * + * When a sync context is created by open()-ing the goldfish sync device, we + * create a sync context (|goldfish_sync_context|). + * + * Currently, the only data required to track is the sync timeline itself + * along with the current time, which are all packed up in the + * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context| + * as the filp->private_data. + * + * Next, when a sync context user requests that work be queued and a fence + * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds + * information about which host handles to touch for this particular + * queue-work operation. We need to know about the host-side sync thread + * and the particular host-side GLsync object. We also possibly write out + * a file descriptor. + */ +struct goldfish_sync_context { + struct goldfish_sync_timeline_obj *timeline; +}; + +struct goldfish_sync_ioctl_info { + uint64_t host_glsync_handle_in; + uint64_t host_syncthread_handle_in; + int fence_fd_out; +}; + +static int goldfish_sync_open(struct inode *inode, struct file *file) +{ + + struct goldfish_sync_context *sync_context; + + DTRACE(); + + mutex_lock(&global_sync_state->mutex_lock); + + sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL); + + if (sync_context == NULL) { + ERR("Creation of goldfish sync context failed!"); + mutex_unlock(&global_sync_state->mutex_lock); + return -ENOMEM; + } + + sync_context->timeline = NULL; + + file->private_data = sync_context; + + DPRINT("successfully create a sync context @0x%p", sync_context); + + mutex_unlock(&global_sync_state->mutex_lock); + + return 0; +} + +static int goldfish_sync_release(struct inode *inode, struct file *file) +{ + + struct goldfish_sync_context *sync_context; + + DTRACE(); + + mutex_lock(&global_sync_state->mutex_lock); + + sync_context = file->private_data; + + if (sync_context->timeline) + goldfish_sync_timeline_destroy(sync_context->timeline); + + sync_context->timeline = NULL; + + kfree(sync_context); + + mutex_unlock(&global_sync_state->mutex_lock); + + return 0; +} + +/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync + * and is used in conjunction with eglCreateSyncKHR to queue up the + * actual work of waiting for the EGL sync command to complete, + * possibly returning a fence fd to the guest. + */ +static long goldfish_sync_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct goldfish_sync_context *sync_context_data; + struct goldfish_sync_timeline_obj *timeline; + int fd_out; + struct goldfish_sync_ioctl_info ioctl_data; + + DTRACE(); + + sync_context_data = file->private_data; + fd_out = -1; + + switch (cmd) { + case GOLDFISH_SYNC_IOC_QUEUE_WORK: + + DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK"); + + mutex_lock(&global_sync_state->mutex_lock); + + if (copy_from_user(&ioctl_data, + (void __user *)arg, + sizeof(ioctl_data))) { + ERR("Failed to copy memory for ioctl_data from user."); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + if (ioctl_data.host_syncthread_handle_in == 0) { + DPRINT("Error: zero host syncthread handle!!!"); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + if (!sync_context_data->timeline) { + DPRINT("no timeline yet, create one."); + sync_context_data->timeline = goldfish_sync_timeline_create(); + DPRINT("timeline: 0x%p", &sync_context_data->timeline); + } + + timeline = sync_context_data->timeline; + fd_out = goldfish_sync_fence_create(timeline, + timeline->current_time + 1); + DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)", + fd_out, + sync_context_data->timeline->current_time + 1, + sync_context_data->timeline); + + ioctl_data.fence_fd_out = fd_out; + + if (copy_to_user((void __user *)arg, + &ioctl_data, + sizeof(ioctl_data))) { + DPRINT("Error, could not copy to user!!!"); + + sys_close(fd_out); + /* We won't be doing an increment, kref_put immediately. */ + kref_put(&timeline->kref, delete_timeline_obj); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + /* We are now about to trigger a host-side wait; + * accumulate on |pending_waits|. */ + goldfish_sync_send_guestcmd(global_sync_state, + CMD_TRIGGER_HOST_WAIT, + ioctl_data.host_glsync_handle_in, + ioctl_data.host_syncthread_handle_in, + (uint64_t)(uintptr_t)(sync_context_data->timeline)); + + mutex_unlock(&global_sync_state->mutex_lock); + return 0; + default: + return -ENOTTY; + } +} + +static const struct file_operations goldfish_sync_fops = { + .owner = THIS_MODULE, + .open = goldfish_sync_open, + .release = goldfish_sync_release, + .unlocked_ioctl = goldfish_sync_ioctl, + .compat_ioctl = goldfish_sync_ioctl, +}; + +static struct miscdevice goldfish_sync_device = { + .name = "goldfish_sync", + .fops = &goldfish_sync_fops, +}; + + +static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state, + void *batch_addr, + uint32_t addr_offset, + uint32_t addr_offset_high) +{ + uint64_t batch_addr_phys; + uint32_t batch_addr_phys_test_lo; + uint32_t batch_addr_phys_test_hi; + + if (!batch_addr) { + ERR("Could not use batch command address!"); + return false; + } + + batch_addr_phys = virt_to_phys(batch_addr); + writel((uint32_t)(batch_addr_phys), + sync_state->reg_base + addr_offset); + writel((uint32_t)(batch_addr_phys >> 32), + sync_state->reg_base + addr_offset_high); + + batch_addr_phys_test_lo = + readl(sync_state->reg_base + addr_offset); + batch_addr_phys_test_hi = + readl(sync_state->reg_base + addr_offset_high); + + if (virt_to_phys(batch_addr) != + (((uint64_t)batch_addr_phys_test_hi << 32) | + batch_addr_phys_test_lo)) { + ERR("Invalid batch command address!"); + return false; + } + + return true; +} + +int goldfish_sync_probe(struct platform_device *pdev) +{ + struct resource *ioresource; + struct goldfish_sync_state *sync_state = global_sync_state; + int status; + + DTRACE(); + + sync_state->to_do_end = 0; + + spin_lock_init(&sync_state->lock); + mutex_init(&sync_state->mutex_lock); + + platform_set_drvdata(pdev, sync_state); + + ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (ioresource == NULL) { + ERR("platform_get_resource failed"); + return -ENODEV; + } + + sync_state->reg_base = devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE); + if (sync_state->reg_base == NULL) { + ERR("Could not ioremap"); + return -ENOMEM; + } + + sync_state->irq = platform_get_irq(pdev, 0); + if (sync_state->irq < 0) { + ERR("Could not platform_get_irq"); + return -ENODEV; + } + + status = devm_request_irq(&pdev->dev, + sync_state->irq, + goldfish_sync_interrupt, + IRQF_SHARED, + pdev->name, + sync_state); + if (status) { + ERR("request_irq failed"); + return -ENODEV; + } + + INIT_WORK(&sync_state->work_item, + goldfish_sync_work_item_fn); + + misc_register(&goldfish_sync_device); + + /* Obtain addresses for batch send/recv of commands. */ + { + struct goldfish_sync_hostcmd *batch_addr_hostcmd; + struct goldfish_sync_guestcmd *batch_addr_guestcmd; + + batch_addr_hostcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd), + GFP_KERNEL); + batch_addr_guestcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd), + GFP_KERNEL); + + if (!setup_verify_batch_cmd_addr(sync_state, + batch_addr_hostcmd, + SYNC_REG_BATCH_COMMAND_ADDR, + SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) { + ERR("goldfish_sync: Could not setup batch command address"); + return -ENODEV; + } + + if (!setup_verify_batch_cmd_addr(sync_state, + batch_addr_guestcmd, + SYNC_REG_BATCH_GUESTCOMMAND_ADDR, + SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) { + ERR("goldfish_sync: Could not setup batch guest command address"); + return -ENODEV; + } + + sync_state->batch_hostcmd = batch_addr_hostcmd; + sync_state->batch_guestcmd = batch_addr_guestcmd; + } + + INFO("goldfish_sync: Initialized goldfish sync device"); + + writel(0, sync_state->reg_base + SYNC_REG_INIT); + + return 0; +} + +static int goldfish_sync_remove(struct platform_device *pdev) +{ + struct goldfish_sync_state *sync_state = global_sync_state; + + DTRACE(); + + misc_deregister(&goldfish_sync_device); + memset(sync_state, 0, sizeof(struct goldfish_sync_state)); + return 0; +} + +static const struct of_device_id goldfish_sync_of_match[] = { + { .compatible = "google,goldfish-sync", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_sync_of_match); + +static const struct acpi_device_id goldfish_sync_acpi_match[] = { + { "GFSH0006", 0 }, + { }, +}; + +MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match); + +static struct platform_driver goldfish_sync = { + .probe = goldfish_sync_probe, + .remove = goldfish_sync_remove, + .driver = { + .name = "goldfish_sync", + .of_match_table = goldfish_sync_of_match, + .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match), + } +}; + +module_platform_driver(goldfish_sync); + +MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Android QEMU Sync Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); + +/* This function is only to run a basic test of sync framework. + * It creates a timeline and fence object whose signal point is at 1. + * The timeline is incremented, and we use the sync framework's + * sync_fence_wait on that fence object. If everything works out, + * we should not hang in the wait and return immediately. + * There is no way to explicitly run this test yet, but it + * can be used by inserting it at the end of goldfish_sync_probe. + */ +void test_kernel_sync(void) +{ + struct goldfish_sync_timeline_obj *test_timeline; + int test_fence_fd; + + DTRACE(); + + DPRINT("test sw_sync"); + + test_timeline = goldfish_sync_timeline_create(); + DPRINT("sw_sync_timeline_create -> 0x%p", test_timeline); + + test_fence_fd = goldfish_sync_fence_create(test_timeline, 1); + DPRINT("sync_fence_create -> %d", test_fence_fd); + + DPRINT("incrementing test timeline"); + goldfish_sync_timeline_inc(test_timeline, 1); + + DPRINT("test waiting (should NOT hang)"); + sync_fence_wait( + sync_fence_fdget(test_fence_fd), -1); + + DPRINT("test waiting (afterward)"); +} diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index bb40f37287426c4855a70b0b19b124802a52c284..20314ff08be09b9cb4783d754d497bc36f5cda5b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -236,7 +236,7 @@ static int ad7192_setup(struct ad7192_state *st, st->mclk = pdata->ext_clk_hz; else st->mclk = AD7192_INT_FREQ_MHZ; - break; + break; default: ret = -EINVAL; goto out; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 10c43dda0f5a37009d6ba65bda523e2883fb8e17..196da09e20a1a229420c2b81294c1fb5fd7f9a6b 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -647,6 +647,7 @@ static void ad5933_work(struct work_struct *work) __be16 buf[2]; int val[2]; unsigned char status; + int ret; mutex_lock(&indio_dev->mlock); if (st->state == AD5933_CTRL_INIT_START_FREQ) { @@ -654,19 +655,22 @@ static void ad5933_work(struct work_struct *work) ad5933_cmd(st, AD5933_CTRL_START_SWEEP); st->state = AD5933_CTRL_START_SWEEP; schedule_delayed_work(&st->work, st->poll_time_jiffies); - mutex_unlock(&indio_dev->mlock); - return; + goto out; } - ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); + ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); + if (ret) + goto out; if (status & AD5933_STAT_DATA_VALID) { int scan_count = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); - ad5933_i2c_read(st->client, + ret = ad5933_i2c_read(st->client, test_bit(1, indio_dev->active_scan_mask) ? AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, scan_count * 2, (u8 *)buf); + if (ret) + goto out; if (scan_count == 2) { val[0] = be16_to_cpu(buf[0]); @@ -678,8 +682,7 @@ static void ad5933_work(struct work_struct *work) } else { /* no data available - try again later */ schedule_delayed_work(&st->work, st->poll_time_jiffies); - mutex_unlock(&indio_dev->mlock); - return; + goto out; } if (status & AD5933_STAT_SWEEP_DONE) { @@ -691,7 +694,7 @@ static void ad5933_work(struct work_struct *work) ad5933_cmd(st, AD5933_CTRL_INC_FREQ); schedule_delayed_work(&st->work, st->poll_time_jiffies); } - +out: mutex_unlock(&indio_dev->mlock); } diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c index 0922dd3a08d3337c96165bf3f1b6b1c1ddb92307..196f6b0a288f1c0cab448ab07e9c33110266c4d1 100644 --- a/drivers/staging/nvec/nvec_ps2.c +++ b/drivers/staging/nvec/nvec_ps2.c @@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev) { struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); struct serio *ser_dev; - char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!ser_dev) return -ENOMEM; - ser_dev->id.type = SERIO_PS_PSTHRU; + ser_dev->id.type = SERIO_8042; ser_dev->write = ps2_sendcommand; ser_dev->start = ps2_startstreaming; ser_dev->stop = ps2_stopstreaming; @@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev) serio_register_port(ser_dev); - /* mouse reset */ - nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset)); - return 0; } diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 9b7026e7d55b654cf3861a10d0420341eb7846f1..45d0a87f55d233cca40ed653c80fb04f2477e43b 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -718,13 +718,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr) u8 res = _SUCCESS; - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL); + paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC); if (paddbareq_parm == NULL) { kfree(ph2c); res = _FAIL; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7bc3778a1ac9c0d79fd4376f2a8323107de9a281..2a67af4e2e135bf7062c12cf26256e09bd203795 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1680,6 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2509,8 +2510,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ - if (ack_kref) + if (ack_kref) { kref_get(&se_cmd->cmd_kref); + se_cmd->se_cmd_flags |= SCF_ACK_KREF; + } spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (se_sess->sess_tearing_down) { @@ -2833,6 +2836,12 @@ static const struct sense_info sense_info_table[] = { .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ .add_sector_info = true, }, + [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { + .key = COPY_ABORTED, + .asc = 0x0d, + .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ + + }, [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { /* * Returning ILLEGAL REQUEST would cause immediate IO errors on diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 47fe94ee10b82d876fedef726308738dc62f5805..153a6f255b6d98782c27f1932274d952092c455d 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op } mutex_unlock(&g_device_mutex); - pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); return -EINVAL; } @@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, struct xcopy_op *xop, unsigned char *p, - unsigned short tdll) + unsigned short tdll, sense_reason_t *sense_ret) { struct se_device *local_dev = se_cmd->se_dev; unsigned char *desc = p; @@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, unsigned short start = 0; bool src = true; + *sense_ret = TCM_INVALID_PARAMETER_LIST; + if (offset != 0) { pr_err("XCOPY target descriptor list length is not" " multiple of %d\n", XCOPY_TARGET_DESC_LEN); @@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); else rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); - - if (rc < 0) + /* + * If a matching IEEE NAA 0x83 descriptor for the requested device + * is not located on this node, return COPY_ABORTED with ASQ/ASQC + * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the + * initiator to fall back to normal copy method. + */ + if (rc < 0) { + *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; goto out; + } pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", xop->src_dev, &xop->src_tid_wwn[0]); @@ -653,6 +662,7 @@ static int target_xcopy_read_source( rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], remote_port, true); if (rc < 0) { + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; transport_generic_free_cmd(se_cmd, 0); return rc; } @@ -664,6 +674,7 @@ static int target_xcopy_read_source( rc = target_xcopy_issue_pt_cmd(xpt_cmd); if (rc < 0) { + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; transport_generic_free_cmd(se_cmd, 0); return rc; } @@ -714,6 +725,7 @@ static int target_xcopy_write_destination( remote_port, false); if (rc < 0) { struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; /* * If the failure happened before the t_mem_list hand-off in * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that @@ -729,6 +741,7 @@ static int target_xcopy_write_destination( rc = target_xcopy_issue_pt_cmd(xpt_cmd); if (rc < 0) { + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; transport_generic_free_cmd(se_cmd, 0); return rc; @@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work) out: xcopy_pt_undepend_remotedev(xop); kfree(xop); - - pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); - ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + /* + * Don't override an error scsi status if it has already been set + */ + if (ec_cmd->scsi_status == SAM_STAT_GOOD) { + pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY" + " CHECK_CONDITION -> sending response\n", rc); + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + } target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); } @@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, tdll, sdll, inline_dl); - rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); if (rc <= 0) goto out; diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index 0f82c0b146f6d8eb0bdea4a92e4c8a4d72dd3332..1e332855b93388ded9051ec74f644fc3bd8d8857 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -68,8 +68,7 @@ static void goldfish_tty_do_write(int line, const char *buf, unsigned count) static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) { - struct platform_device *pdev = dev_id; - struct goldfish_tty *qtty = &goldfish_ttys[pdev->id]; + struct goldfish_tty *qtty = dev_id; void __iomem *base = qtty->base; unsigned long irq_flags; unsigned char *buf; @@ -233,6 +232,7 @@ static int goldfish_tty_probe(struct platform_device *pdev) struct device *ttydev; void __iomem *base; u32 irq; + unsigned int line; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) @@ -248,10 +248,16 @@ static int goldfish_tty_probe(struct platform_device *pdev) irq = r->start; - if (pdev->id >= goldfish_tty_line_count) - goto err_unmap; - mutex_lock(&goldfish_tty_lock); + + if (pdev->id == PLATFORM_DEVID_NONE) + line = goldfish_tty_current_line_count; + else + line = pdev->id; + + if (line >= goldfish_tty_line_count) + goto err_create_driver_failed; + if (goldfish_tty_current_line_count == 0) { ret = goldfish_tty_create_driver(); if (ret) @@ -259,7 +265,7 @@ static int goldfish_tty_probe(struct platform_device *pdev) } goldfish_tty_current_line_count++; - qtty = &goldfish_ttys[pdev->id]; + qtty = &goldfish_ttys[line]; spin_lock_init(&qtty->lock); tty_port_init(&qtty->port); qtty->port.ops = &goldfish_port_ops; @@ -269,13 +275,13 @@ static int goldfish_tty_probe(struct platform_device *pdev) writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD); ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED, - "goldfish_tty", pdev); + "goldfish_tty", qtty); if (ret) goto err_request_irq_failed; ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver, - pdev->id, &pdev->dev); + line, &pdev->dev); if (IS_ERR(ttydev)) { ret = PTR_ERR(ttydev); goto err_tty_register_device_failed; @@ -286,8 +292,9 @@ static int goldfish_tty_probe(struct platform_device *pdev) qtty->console.device = goldfish_tty_console_device; qtty->console.setup = goldfish_tty_console_setup; qtty->console.flags = CON_PRINTBUFFER; - qtty->console.index = pdev->id; + qtty->console.index = line; register_console(&qtty->console); + platform_set_drvdata(pdev, qtty); mutex_unlock(&goldfish_tty_lock); return 0; @@ -307,13 +314,12 @@ err_unmap: static int goldfish_tty_remove(struct platform_device *pdev) { - struct goldfish_tty *qtty; + struct goldfish_tty *qtty = platform_get_drvdata(pdev); mutex_lock(&goldfish_tty_lock); - qtty = &goldfish_ttys[pdev->id]; unregister_console(&qtty->console); - tty_unregister_device(goldfish_tty_driver, pdev->id); + tty_unregister_device(goldfish_tty_driver, qtty->console.index); iounmap(qtty->base); qtty->base = NULL; free_irq(qtty->irq, pdev); @@ -324,11 +330,19 @@ static int goldfish_tty_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_tty_of_match[] = { + { .compatible = "google,goldfish-tty", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, goldfish_tty_of_match); + static struct platform_driver goldfish_tty_platform_driver = { .probe = goldfish_tty_probe, .remove = goldfish_tty_remove, .driver = { - .name = "goldfish_tty" + .name = "goldfish_tty", + .of_match_table = goldfish_tty_of_match, } }; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index a5d319e4aae65dad90f64bafd33c2ad02bed7034..8435c3f204c1ed85ef501d0232632659a46d1ffc 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -440,7 +440,7 @@ static int dw8250_probe(struct platform_device *pdev) } data->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) { + if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) { err = -EPROBE_DEFER; goto err_clk; } diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index ed489880e62b3ec4bd988dc833885afc68d7d367..83b3988eb6b20297b9a304fb97c2c2dffaec2848 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -149,6 +149,9 @@ static void mid8250_set_termios(struct uart_port *p, unsigned long w = BIT(24) - 1; unsigned long mul, div; + /* Gracefully handle the B0 case: fall back to B9600 */ + fuart = fuart ? fuart : 9600 * 16; + if (mid->board->freq < fuart) { /* Find prescaler value that satisfies Fuart < Fref */ if (mid->board->freq > baud) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index c1d4a8fa9be889fd839a02ed576511b3c28919cb..029de3f99752ef1a1ce36bbabd9e02e543a6fab9 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1952,6 +1952,43 @@ pci_wch_ch38x_setup(struct serial_private *priv, #define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 +#define PCI_VENDOR_ID_ACCESIO 0x494f +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8 + + + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 @@ -5119,6 +5156,108 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7958 }, + /* + * ACCES I/O Products quad + */ + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 7b5462eb83882446685a268559b936c74cd783dd..e0b89b961e1b10fa7901e7c95c8f91c4220a5007 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2075,6 +2075,7 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state, static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned long flags; unsigned int old_mode, mode, imr, quot, baud; @@ -2178,11 +2179,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_RS485; } else if (termios->c_cflag & CRTSCTS) { /* RS232 with hardware handshake (RTS/CTS) */ - if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) { - dev_info(port->dev, "not enabling hardware flow control because DMA is used"); - termios->c_cflag &= ~CRTSCTS; - } else { + if (atmel_use_fifo(port) && + !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { + /* + * with ATMEL_US_USMODE_HWHS set, the controller will + * be able to drive the RTS pin high/low when the RX + * FIFO is above RXFTHRES/below RXFTHRES2. + * It will also disable the transmitter when the CTS + * pin is high. + * This mode is not activated if CTS pin is a GPIO + * because in this case, the transmitter is always + * disabled (there must be an internal pull-up + * responsible for this behaviour). + * If the RTS pin is a GPIO, the controller won't be + * able to drive it according to the FIFO thresholds, + * but it will be handled by the driver. + */ mode |= ATMEL_US_USMODE_HWHS; + } else { + /* + * For platforms without FIFO, the flow control is + * handled by the driver. + */ + mode |= ATMEL_US_USMODE_NORMAL; } } else { /* RS232 without hadware handshake */ diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index ca0d3802f2af45fb8be9cad11d9763fb615e516a..4e603d060e80c91b36aabb3961dc22c3596d37ec 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); - if (port->sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&port->lock); - } else - spin_lock(&port->lock); for (i = 0; i < n; i++) { if (*s == '\n') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 136ebaaa9cc094b9faa275ae019d6e145d4001cd..5ab54ef4f30428f72fc22d694f176ef126416dfd 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -872,10 +872,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; + if (new_screen_size > (4 << 20)) + return -EINVAL; newscreen = kmalloc(new_screen_size, GFP_USER); if (!newscreen) return -ENOMEM; + if (vc == sel_cons) + clear_selection(); + old_rows = vc->vc_rows; old_row_size = vc->vc_size_row; @@ -1173,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar) break; case 3: /* erase scroll-back buffer (and whole display) */ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, - vc->vc_screenbuf_size >> 1); + vc->vc_screenbuf_size); set_origin(vc); if (CON_IS_VISIBLE(vc)) update_screen(vc); diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index 915facbf552e207c68b4b7555b9520d70746853f..e1134a4d97f3fd0005e30dc29e9acf9da77e0e46 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) ++uiomem; } - priv->dmem_region_start = i; + priv->dmem_region_start = uiomem - &uioinfo->mem[0]; priv->num_dmem_regions = pdata->num_dynamic_regions; for (i = 0; i < pdata->num_dynamic_regions; ++i) { diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 965d0e240dcb27674151e2073b984c89c90eb7ed..ba4a2a1eb3fff95dcb60fd86f0d1fbf38c300f5c 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -926,6 +926,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) if (!ci) return -ENOMEM; + spin_lock_init(&ci->lock); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index ca367b05e4405afe7bd1f886ac95760bec84b80c..d8a045fc1fdba82e92f7f43410ffd3935f6acc9b 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -939,6 +939,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci) int retval; struct ci_hw_ep *hwep; + /* + * Unexpected USB controller behavior, caused by bad signal integrity + * or ground reference problems, can lead to isr_setup_status_phase + * being called with ci->status equal to NULL. + * If this situation occurs, you should review your USB hardware design. + */ + if (WARN_ON_ONCE(!ci->status)) + return -EPIPE; + hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; ci->status->context = ci; ci->status->complete = isr_setup_status_complete; @@ -1875,8 +1884,6 @@ static int udc_start(struct ci_hdrc *ci) struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; int retval = 0; - spin_lock_init(&ci->lock); - ci->gadget.ops = &usb_gadget_ops; ci->gadget.speed = USB_SPEED_UNKNOWN; ci->gadget.max_speed = USB_SPEED_HIGH; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7f374369e53956e30dd93183590e47ac8d0ea598..4d77745f439fe5a95cc1994689e1d9d49b64c237 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -877,8 +877,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) DECLARE_WAITQUEUE(wait, current); struct async_icount old, new; - if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD )) - return -EINVAL; do { spin_lock_irq(&acm->read_lock); old = acm->oldcount; diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 7a11a8263171caad8ccba867bfa4e2e9734c2c3f..deaddb950c20d6cffbfff91754f30485e4a52c4f 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -121,6 +121,7 @@ static void usbtmc_delete(struct kref *kref) struct usbtmc_device_data *data = to_usbtmc_data(kref); usb_put_dev(data->usb_dev); + kfree(data); } static int usbtmc_open(struct inode *inode, struct file *filp) @@ -1104,7 +1105,7 @@ static int usbtmc_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "%s called\n", __func__); - data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 80c8d90d8b75842302b03264f784c967a92f76a8..ff44cfa26af81bf41d1efc6ab66dbec57998e77e 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -211,8 +211,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); - /* Fix up bInterval values outside the legal range. Use 32 ms if no - * proper value can be guessed. */ + /* + * Fix up bInterval values outside the legal range. + * Use 10 or 8 ms if no proper value can be guessed. + */ i = 0; /* i = min, j = max, n = default */ j = 255; if (usb_endpoint_xfer_int(d)) { @@ -221,13 +223,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: case USB_SPEED_HIGH: - /* Many device manufacturers are using full-speed + /* + * Many device manufacturers are using full-speed * bInterval values in high-speed interrupt endpoint - * descriptors. Try to fix those and fall back to a - * 32 ms default value otherwise. */ + * descriptors. Try to fix those and fall back to an + * 8-ms default value otherwise. + */ n = fls(d->bInterval*8); if (n == 0) - n = 9; /* 32 ms = 2^(9-1) uframes */ + n = 7; /* 8 ms = 2^(7-1) uframes */ j = 16; /* @@ -242,10 +246,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, } break; default: /* USB_SPEED_FULL or _LOW */ - /* For low-speed, 10 ms is the official minimum. + /* + * For low-speed, 10 ms is the official minimum. * But some "overclocked" devices might want faster - * polling so we'll allow it. */ - n = 32; + * polling so we'll allow it. + */ + n = 10; break; } } else if (usb_endpoint_xfer_isoc(d)) { @@ -253,10 +259,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, j = 16; switch (to_usb_device(ddev)->speed) { case USB_SPEED_HIGH: - n = 9; /* 32 ms = 2^(9-1) uframes */ + n = 7; /* 8 ms = 2^(7-1) uframes */ break; default: /* USB_SPEED_FULL */ - n = 6; /* 32 ms = 2^(6-1) frames */ + n = 4; /* 8 ms = 2^(4-1) frames */ break; } } diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index 068b03a35bd52f864fafcdfcf7230fb244d6096e..20ac60d6b6a893eb7927945af5a5181228da71fa 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -978,22 +978,30 @@ void dbg_print_reg(const char *name, int reg) static ssize_t dwc3_store_events(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned tty; + int ret; + u8 tty; if (buf == NULL) { pr_err("[%s] EINVAL\n", __func__); - goto done; + ret = -EINVAL; + return ret; } - if (sscanf(buf, "%u", &tty) != 1 || tty > 1) { + ret = kstrtou8_from_user(buf, count, 0, &tty); + if (ret < 0) { + pr_err("can't get enter value.\n"); + return ret; + } + + if (tty > 1) { pr_err("<1|0>: enable|disable console log\n"); - goto done; + ret = -EINVAL; + return ret; } dbg_dwc3_data.tty = tty; pr_info("tty = %u", dbg_dwc3_data.tty); - done: return count; } @@ -1034,21 +1042,30 @@ const struct file_operations dwc3_gadget_dbg_data_fops = { static ssize_t dwc3_store_int_events(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - int clear_stats, i; + int i, ret; unsigned long flags; struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct timespec ts; + u8 clear_stats; if (ubuf == NULL) { pr_err("[%s] EINVAL\n", __func__); - goto done; + ret = -EINVAL; + return ret; + } + + ret = kstrtou8_from_user(ubuf, count, 0, &clear_stats); + if (ret < 0) { + pr_err("can't get enter value.\n"); + return ret; } - if (sscanf(ubuf, "%u", &clear_stats) != 1 || clear_stats != 0) { + if (clear_stats != 0) { pr_err("Wrong value. To clear stats, enter value as 0.\n"); - goto done; + ret = -EINVAL; + return ret; } spin_lock_irqsave(&dwc->lock, flags); @@ -1065,7 +1082,6 @@ static ssize_t dwc3_store_int_events(struct file *file, spin_unlock_irqrestore(&dwc->lock, flags); -done: return count; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 88350e61f3bd6b3a07e287db5b2176e7f5f0b908..7a279db521ca79cd01873242e38f898c27fb334e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3582,7 +3582,7 @@ err3: kfree(dwc->setup_buf); err2: - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); err1: @@ -3611,7 +3611,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc) kfree(dwc->setup_buf); - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 4964bb1a24b1983e83beef6489774bff88122793..ed0ff7b1fc15bda9fe0f35510b006a4296c519f4 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -426,11 +426,6 @@ static int config_usb_cfg_link( } f = usb_get_function(fi); - if (f == NULL) { - /* Are we trying to symlink PTP without MTP function? */ - ret = -EINVAL; /* Invalid Configuration */ - goto out; - } if (IS_ERR(f)) { ret = PTR_ERR(f); goto out; diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 61057befc1368ac5d6f8de4a5b7c2e9d3e89b017..cd096fb9078fb31ac080f382e17a114a670c8303 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -255,6 +255,7 @@ static inline struct acc_dev *func_to_dev(struct usb_function *f) static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!req) return NULL; @@ -1079,6 +1080,7 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f) static void acc_start_work(struct work_struct *data) { char *envp[2] = { "ACCESSORY=START", NULL }; + kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp); } diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c index bcd817439dbf92efdc24885629a16a3de07bd261..db7903d19c435e4c5b08c09270baa44c5c48b6b2 100644 --- a/drivers/usb/gadget/function/f_audio_source.c +++ b/drivers/usb/gadget/function/f_audio_source.c @@ -310,6 +310,7 @@ static struct device_attribute *audio_source_function_attributes[] = { static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!req) return NULL; @@ -377,10 +378,9 @@ static void audio_send(struct audio_dev *audio) /* compute number of frames to send */ now = ktime_get(); - msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time); - do_div(msecs, 1000000); - frames = msecs * SAMPLE_RATE; - do_div(frames, 1000); + msecs = div_s64((ktime_to_ns(now) - ktime_to_ns(audio->start_time)), + 1000000); + frames = div_s64((msecs * SAMPLE_RATE), 1000); /* Readjust our frames_sent if we fall too far behind. * If we get too far behind it is better to drop some frames than diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 739cf9790cd495f6b9c76399004a2871cb30005f..ab44bd316217bcd929545d7625ed5cd21720b466 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1581,6 +1581,8 @@ static int functionfs_init(void) pr_err("failed registering file system (%d)\n", ret); ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0); + if (IS_ERR_OR_NULL(ffs_ipc_log)) + ffs_ipc_log = NULL; return ret; } @@ -1591,6 +1593,11 @@ static void functionfs_cleanup(void) pr_info("unloading\n"); unregister_filesystem(&ffs_fs_type); + + if (ffs_ipc_log) { + ipc_log_context_destroy(ffs_ipc_log); + ffs_ipc_log = NULL; + } } diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index 972ea68b16e4712f1427f32008cb617fa1e634bc..bf7460f25e611ea998a1b6d6d9d6cb7130a4a95e 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -137,6 +137,7 @@ struct mtp_dev { unsigned dbg_read_index; unsigned dbg_write_index; bool is_ptp; + struct mutex read_mutex; }; static struct usb_interface_descriptor mtp_interface_desc = { @@ -412,6 +413,7 @@ static inline struct mtp_dev *func_to_mtp(struct usb_function *f) static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!req) return NULL; @@ -640,11 +642,18 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); + mutex_lock(&dev->read_mutex); + if (dev->state == STATE_OFFLINE) { + r = -EIO; + mutex_unlock(&dev->read_mutex); + goto done; + } requeue_req: /* queue a request */ req = dev->rx_req[0]; req->length = len; dev->rx_done = 0; + mutex_unlock(&dev->read_mutex); ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); if (ret < 0) { r = -EIO; @@ -670,6 +679,7 @@ requeue_req: usb_ep_dequeue(dev->ep_out, req); goto done; } + mutex_lock(&dev->read_mutex); if (dev->state == STATE_BUSY) { /* If we got a 0-len packet, throw it back and try again. */ if (req->actual == 0) @@ -683,6 +693,7 @@ requeue_req: } else r = -EIO; + mutex_unlock(&dev->read_mutex); done: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) @@ -937,6 +948,12 @@ static void receive_file_work(struct work_struct *data) while (count > 0 || write_req) { if (count > 0) { + mutex_lock(&dev->read_mutex); + if (dev->state == STATE_OFFLINE) { + r = -EIO; + mutex_unlock(&dev->read_mutex); + break; + } /* queue a request */ read_req = dev->rx_req[cur_buf]; cur_buf = (cur_buf + 1) % RX_REQ_MAX; @@ -945,6 +962,7 @@ static void receive_file_work(struct work_struct *data) read_req->length = mtp_rx_req_len; dev->rx_done = 0; + mutex_unlock(&dev->read_mutex); ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; @@ -957,15 +975,23 @@ static void receive_file_work(struct work_struct *data) if (write_req) { DBG(cdev, "rx %pK %d\n", write_req, write_req->actual); start_time = ktime_get(); + mutex_lock(&dev->read_mutex); + if (dev->state == STATE_OFFLINE) { + r = -EIO; + mutex_unlock(&dev->read_mutex); + break; + } ret = vfs_write(filp, write_req->buf, write_req->actual, &offset); DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; + mutex_unlock(&dev->read_mutex); if (dev->state != STATE_OFFLINE) dev->state = STATE_ERROR; break; } + mutex_unlock(&dev->read_mutex); dev->perf[dev->dbg_write_index].vfs_wtime = ktime_to_us(ktime_sub(ktime_get(), start_time)); dev->perf[dev->dbg_write_index].vfs_wbytes = ret; @@ -989,6 +1015,12 @@ static void receive_file_work(struct work_struct *data) break; } + mutex_lock(&dev->read_mutex); + if (dev->state == STATE_OFFLINE) { + r = -EIO; + mutex_unlock(&dev->read_mutex); + break; + } /* Check if we aligned the size due to MTU constraint */ if (count < read_req->length) read_req->actual = (read_req->actual > count ? @@ -1009,6 +1041,7 @@ static void receive_file_work(struct work_struct *data) write_req = read_req; read_req = NULL; + mutex_unlock(&dev->read_mutex); } } @@ -1352,6 +1385,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev, } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS && w_index == 0 && w_value == 0) { struct mtp_device_status *status = cdev->req->buf; + status->wLength = __constant_cpu_to_le16(sizeof(*status)); @@ -1374,6 +1408,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev, /* respond with data transfer or status phase? */ if (value >= 0) { int rc; + cdev->req->zero = value < w_length; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); @@ -1469,12 +1504,14 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) struct usb_request *req; int i; + mutex_lock(&dev->read_mutex); while ((req = mtp_req_get(dev, &dev->tx_idle))) mtp_request_free(req, dev->ep_in); for (i = 0; i < RX_REQ_MAX; i++) mtp_request_free(dev->rx_req[i], dev->ep_out); while ((req = mtp_req_get(dev, &dev->intr_idle))) mtp_request_free(req, dev->ep_intr); + mutex_unlock(&dev->read_mutex); dev->state = STATE_OFFLINE; dev->is_ptp = false; kfree(f->os_desc_table); @@ -1733,6 +1770,7 @@ static struct mtp_instance *to_mtp_instance(struct config_item *item) static void mtp_attr_release(struct config_item *item) { struct mtp_instance *fi_mtp = to_mtp_instance(item); + usb_put_function_instance(&fi_mtp->func_inst); } @@ -1853,7 +1891,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi, pr_err("\t2: Create MTP function\n"); pr_err("\t3: Create and symlink PTP function" " with a gadget configuration\n"); - return NULL; + return ERR_PTR(-EINVAL); /* Invalid Configuration */ } dev = fi_mtp->dev; @@ -1877,6 +1915,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi, dev->is_ptp = !mtp_config; fi->f = &dev->function; + mutex_init(&dev->read_mutex); return &dev->function; } EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp); diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 81ce22e918835bb2529d302879d9a77b094fb6d7..43e054666b68172cd4f5965455f937aaecd696f8 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -743,7 +743,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, /* throttle highspeed IRQ rate back slightly */ if (gadget_is_dualspeed(dev->gadget) && - (dev->gadget->speed == USB_SPEED_HIGH)) { + (dev->gadget->speed == USB_SPEED_HIGH) && + !list_empty(&dev->tx_reqs)) { dev->tx_qlen++; if (dev->tx_qlen == (dev->qmult/2)) { req->no_interrupt = 0; diff --git a/drivers/usb/gadget/functions.c b/drivers/usb/gadget/functions.c index 389c1f3d0feedd5664b3f5c733b3dce9c89627bc..b13f839e73682d5c26de4b1a984510eb9e92d6cb 100644 --- a/drivers/usb/gadget/functions.c +++ b/drivers/usb/gadget/functions.c @@ -58,7 +58,7 @@ struct usb_function *usb_get_function(struct usb_function_instance *fi) struct usb_function *f; f = fi->fd->alloc_func(fi); - if ((f == NULL) || IS_ERR(f)) + if (IS_ERR(f)) return f; f->fi = fi; return f; diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index c73689b72f95bf6592bd6fc889373ae6e0e4d040..b38a33584d4ac3af3a796e6eda756d6efd6b05ea 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget) tmp = in_be16(&udc->usb_param->frame_n); if (tmp & 0x8000) - tmp = tmp & 0x07ff; - else - tmp = -EINVAL; - - return (int)tmp; + return tmp & 0x07ff; + return -EINVAL; } static int fsl_qe_start(struct usb_gadget *gadget, diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 9094bca1bac6faf683be491e5d5287ebff558d1c..c0ce3db6a7c079d321009238cc50b61e122e25ac 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -72,7 +72,7 @@ static const char hcd_name [] = "ohci_hcd"; #define STATECHANGE_DELAY msecs_to_jiffies(300) -#define IO_WATCHDOG_DELAY msecs_to_jiffies(250) +#define IO_WATCHDOG_DELAY msecs_to_jiffies(275) #include "ohci.h" #include "pci-quirks.h" diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 963867c2c1d55b29745c742ea61863e183ef9987..cf147ccac7d365091e77443cabe1f7f2ddc1dd1a 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -45,6 +45,7 @@ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f @@ -154,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { + (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) { xhci->quirks |= XHCI_SPURIOUS_REBOOT; xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fe529f9f7d28dc4d158efb48743562062a531c1a..98d01acc8b11aeb3d9bfcfb874545cddf7daeda1 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -846,6 +846,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); ep->stop_cmds_pending--; + if (xhci->xhc_state & XHCI_STATE_REMOVING) { + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } if (xhci->xhc_state & XHCI_STATE_DYING) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Stop EP timer ran, but another timer marked " @@ -899,7 +903,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Calling usb_hc_died()"); - usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); + usb_hc_died(xhci_to_hcd(xhci)); xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "xHCI host controller is dead."); } diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 7771be3ac178ea0e95ca7da1f85ff55543ff4a00..4dd531ac5a7ffe0ecde019bc0d7024674f9f0ee9 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -898,24 +898,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; - /* we can register the device now, as it is ready */ - usb_set_intfdata (interface, dev); - - retval = usb_register_dev (interface, &tower_class); - - if (retval) { - /* something prevented us from registering this driver */ - dev_err(idev, "Not able to get a minor for this device.\n"); - usb_set_intfdata (interface, NULL); - goto error; - } - dev->minor = interface->minor; - - /* let the user know what node this device is now attached to */ - dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " - "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), - USB_MAJOR, dev->minor); - /* get the firmware version and log it */ result = usb_control_msg (udev, usb_rcvctrlpipe(udev, 0), @@ -936,6 +918,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device get_version_reply.minor, le16_to_cpu(get_version_reply.build_no)); + /* we can register the device now, as it is ready */ + usb_set_intfdata (interface, dev); + + retval = usb_register_dev (interface, &tower_class); + + if (retval) { + /* something prevented us from registering this driver */ + dev_err(idev, "Not able to get a minor for this device.\n"); + usb_set_intfdata (interface, NULL); + goto error; + } + dev->minor = interface->minor; + + /* let the user know what node this device is now attached to */ + dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " + "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), + USB_MAJOR, dev->minor); exit: return retval; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 59a63a0b79850c7c959f5ef5080de02fb19b1db8..e0a083f6ab68a5d9cd79c5296d58f47d21758a21 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -661,7 +661,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ } - channel->desired_mode = mode; + channel->desired_mode = *mode; musb_writew(epio, MUSB_TXCSR, csr); return 0; @@ -2008,10 +2008,8 @@ void musb_host_rx(struct musb *musb, u8 epnum) qh->offset, urb->transfer_buffer_length); - done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, - urb, xfer_len, - iso_err); - if (done) + if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb, + xfer_len, iso_err)) goto finish; else dev_err(musb->controller, "error: rx_dma failed\n"); diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c index d4be5d5948960cd32353d3007be4373d914be114..28965ef4f824a0117e94fc8c271b347858d0a5e6 100644 --- a/drivers/usb/renesas_usbhs/mod.c +++ b/drivers/usb/renesas_usbhs/mod.c @@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data) if (usbhs_mod_is_host(priv)) usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); - usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); + /* + * The driver should not clear the xxxSTS after the line of + * "call irq callback functions" because each "if" statement is + * possible to call the callback function for avoiding any side effects. + */ + if (irq_state.intsts0 & BRDY) + usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); - usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); + if (irq_state.intsts0 & BEMP) + usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); /* * call irq callback functions diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index a2b43a6e7fa76f78a8150aa9b69a65277d79da8e..fe7452f0f38a67db94034405602c85f0ea6f9379 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -117,6 +117,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ @@ -129,6 +130,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ @@ -784,7 +786,7 @@ static void cp210x_set_termios(struct tty_struct *tty, } else { modem_ctl[0] &= ~0x7B; modem_ctl[0] |= 0x01; - modem_ctl[1] |= 0x40; + modem_ctl[1] = 0x40; dev_dbg(dev, "%s - flow control = NONE\n", __func__); } @@ -844,7 +846,9 @@ static int cp210x_tiocmget(struct tty_struct *tty) unsigned int control; int result; - cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1); + result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1); + if (result) + return result; result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) |((control & CONTROL_RTS) ? TIOCM_RTS : 0) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8c48c9d83d48c8f3bfa3abc808badc2bedab16c0..d3d6ec4551514c20880a059729ce401e08b1de34 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = { /* ekey Devices */ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, /* Infineon Devices */ - { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) }, /* GE Healthcare devices */ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, /* Active Research (Actisense) devices */ @@ -1011,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, + { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index f87a938cf00571eb69edbd8d625f58041384d5fa..48ee04c94a7541ad6c5f9023be107246207c56fd 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -595,6 +595,12 @@ #define ATMEL_VID 0x03eb /* Vendor ID */ #define STK541_PID 0x2109 /* Zigbee Controller */ +/* + * Texas Instruments + */ +#define TI_VID 0x0451 +#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ + /* * Blackfin gnICE JTAG * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice @@ -626,8 +632,9 @@ /* * Infineon Technologies */ -#define INFINEON_VID 0x058b -#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ +#define INFINEON_VID 0x058b +#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ +#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */ /* * Acton Research Corp. diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index a204782ae530e1de8f7982f869a94f9b80ce9dad..e98b6e57b703dbd84c3b618fe56dc50eff3fd7ec 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS); /* Infineon Flashloader driver */ #define FLASHLOADER_IDS() \ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ - { USB_DEVICE(0x8087, 0x0716) } + { USB_DEVICE(0x8087, 0x0716) }, \ + { USB_DEVICE(0x8087, 0x0801) } DEVICE(flashloader, FLASHLOADER_IDS); /* Google Serial USB SubClass */ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index a0ca291bc07f7377c43deebbccc31f9268f4db35..e7e29c797824fc3b93c97cccb0643604ed31f651 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1077,7 +1077,8 @@ static int usb_serial_probe(struct usb_interface *interface, serial->disconnected = 0; - usb_serial_console_init(serial->port[0]->minor); + if (num_ports > 0) + usb_serial_console_init(serial->port[0]->minor); exit: module_put(type->driver.owner); return 0; diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 5e67f63b2e462ffc68b5acc1a9f8557828e44489..02f86dd1a3400b3a9cf4551e4a59e817608c2f8b 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) /* COMMAND STAGE */ /* let's send the command via the control pipe */ + /* + * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. + * Stack may be vmallocated. So no DMA for us. Make a copy. + */ + memcpy(us->iobuf, srb->cmnd, srb->cmd_len); result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, US_CBI_ADSC, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, - us->ifnum, srb->cmnd, srb->cmd_len); + us->ifnum, us->iobuf, srb->cmd_len); /* check the return code for the command */ usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index d059ad4d0dbdc76f8fbbdbfc47dfd990d2006fc5..97ee1b46db698f03dee8ea50beb00e864bff0579 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c @@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index) struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } + return rc; } @@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) if (dev) { rc = dev_get_drvdata(dev); __uwb_rc_get(rc); + put_device(dev); } + return rc; } EXPORT_SYMBOL_GPL(__uwb_rc_try_get); @@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, find_rc_grandpa); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } + return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); @@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } return rc; } diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index c1304b8d498530124c7ca30319c12cc823e84d6e..678e93741ae156bf5d2c41b7a52f7b215262210d 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c @@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc) dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); + put_device(dev); + return (dev != NULL); } diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c index 8726617f73ab5cccc63418f17cb51e048ecc33c0..705411bfaebba04868d3ea864f4725ac63298f73 100644 --- a/drivers/video/adf/adf_fops.c +++ b/drivers/video/adf/adf_fops.c @@ -132,7 +132,7 @@ static int adf_eng_get_data(struct adf_overlay_engine *eng, eng->ops->n_supported_formats)); mutex_lock(&dev->client_lock); - ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data, + ret = adf_obj_copy_custom_data_to_user(&eng->base, data.custom_data, &data.custom_data_size); mutex_unlock(&dev->client_lock); @@ -144,7 +144,7 @@ static int adf_eng_get_data(struct adf_overlay_engine *eng, goto done; } - if (supported_formats && copy_to_user(arg->supported_formats, + if (supported_formats && copy_to_user(data.supported_formats, supported_formats, n_supported_formats * sizeof(supported_formats[0]))) ret = -EFAULT; @@ -220,56 +220,45 @@ static int adf_device_post_config(struct adf_device *dev, int complete_fence_fd; struct adf_buffer *bufs = NULL; struct adf_interface **intfs = NULL; - size_t n_intfs, n_bufs, i; + struct adf_post_config data; + size_t i; void *custom_data = NULL; - size_t custom_data_size; int ret = 0; + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + complete_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (complete_fence_fd < 0) return complete_fence_fd; - if (get_user(n_intfs, &arg->n_interfaces)) { - ret = -EFAULT; - goto err_get_user; - } - - if (n_intfs > ADF_MAX_INTERFACES) { + if (data.n_interfaces > ADF_MAX_INTERFACES) { ret = -EINVAL; goto err_get_user; } - if (get_user(n_bufs, &arg->n_bufs)) { - ret = -EFAULT; - goto err_get_user; - } - - if (n_bufs > ADF_MAX_BUFFERS) { + if (data.n_bufs > ADF_MAX_BUFFERS) { ret = -EINVAL; goto err_get_user; } - if (get_user(custom_data_size, &arg->custom_data_size)) { - ret = -EFAULT; - goto err_get_user; - } - - if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) { + if (data.custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) { ret = -EINVAL; goto err_get_user; } - if (n_intfs) { - intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL); + if (data.n_interfaces) { + intfs = kmalloc(sizeof(intfs[0]) * data.n_interfaces, + GFP_KERNEL); if (!intfs) { ret = -ENOMEM; goto err_get_user; } } - for (i = 0; i < n_intfs; i++) { + for (i = 0; i < data.n_interfaces; i++) { u32 intf_id; - if (get_user(intf_id, &arg->interfaces[i])) { + if (get_user(intf_id, &data.interfaces[i])) { ret = -EFAULT; goto err_get_user; } @@ -281,31 +270,31 @@ static int adf_device_post_config(struct adf_device *dev, } } - if (n_bufs) { - bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL); + if (data.n_bufs) { + bufs = kzalloc(sizeof(bufs[0]) * data.n_bufs, GFP_KERNEL); if (!bufs) { ret = -ENOMEM; goto err_get_user; } } - for (i = 0; i < n_bufs; i++) { - ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]); + for (i = 0; i < data.n_bufs; i++) { + ret = adf_buffer_import(dev, &data.bufs[i], &bufs[i]); if (ret < 0) { memset(&bufs[i], 0, sizeof(bufs[i])); goto err_import; } } - if (custom_data_size) { - custom_data = kzalloc(custom_data_size, GFP_KERNEL); + if (data.custom_data_size) { + custom_data = kzalloc(data.custom_data_size, GFP_KERNEL); if (!custom_data) { ret = -ENOMEM; goto err_import; } - if (copy_from_user(custom_data, arg->custom_data, - custom_data_size)) { + if (copy_from_user(custom_data, data.custom_data, + data.custom_data_size)) { ret = -EFAULT; goto err_import; } @@ -316,8 +305,8 @@ static int adf_device_post_config(struct adf_device *dev, goto err_import; } - complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs, - n_bufs, custom_data, custom_data_size); + complete_fence = adf_device_post_nocopy(dev, intfs, data.n_interfaces, + bufs, data.n_bufs, custom_data, data.custom_data_size); if (IS_ERR(complete_fence)) { ret = PTR_ERR(complete_fence); goto err_import; @@ -327,7 +316,7 @@ static int adf_device_post_config(struct adf_device *dev, return 0; err_import: - for (i = 0; i < n_bufs; i++) + for (i = 0; i < data.n_bufs; i++) adf_buffer_cleanup(&bufs[i]); err_get_user: @@ -481,19 +470,19 @@ static int adf_device_get_data(struct adf_device *dev, data.n_allowed_attachments); mutex_lock(&dev->client_lock); - ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data, + ret = adf_obj_copy_custom_data_to_user(&dev->base, data.custom_data, &data.custom_data_size); mutex_unlock(&dev->client_lock); if (ret < 0) goto done; - ret = adf_copy_attachment_list_to_user(arg->attachments, + ret = adf_copy_attachment_list_to_user(data.attachments, data.n_attachments, attach, n_attach); if (ret < 0) goto done; - ret = adf_copy_attachment_list_to_user(arg->allowed_attachments, + ret = adf_copy_attachment_list_to_user(data.allowed_attachments, data.n_allowed_attachments, allowed_attach, n_allowed_attach); if (ret < 0) @@ -592,7 +581,7 @@ static int adf_intf_get_data(struct adf_interface *intf, data.n_available_modes = intf->n_modes; read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags); - if (copy_to_user(arg->available_modes, modelist, modelist_size)) { + if (copy_to_user(data.available_modes, modelist, modelist_size)) { ret = -EFAULT; goto done; } @@ -601,7 +590,7 @@ static int adf_intf_get_data(struct adf_interface *intf, memcpy(&data.current_mode, &intf->current_mode, sizeof(intf->current_mode)); - ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data, + ret = adf_obj_copy_custom_data_to_user(&intf->base, data.custom_data, &data.custom_data_size); done: mutex_unlock(&dev->client_lock); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 95d293b7445a83473bc2131cf657f5de14ef52b3..dc2fcda54d53d767d57a6d6c02fd7d383380ebe5 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -52,9 +52,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, return 1; if (regno < 16) { - red >>= 8; - green >>= 8; - blue >>= 8; + red >>= 16 - info->var.red.length; + green >>= 16 - info->var.green.length; + blue >>= 16 - info->var.blue.length; ((u32 *)(info->pseudo_palette))[regno] = (red << info->var.red.offset) | (green << info->var.green.offset) | diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 7f6c9e6cfc6c99d8d9912db5d2f78242923f51c5..1e56b50e408234f826fd28fdaa1bee0807911dcf 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -26,6 +26,7 @@ #include #include #include +#include enum { FB_GET_WIDTH = 0x00, @@ -234,7 +235,7 @@ static int goldfish_fb_probe(struct platform_device *pdev) fb->fb.var.activate = FB_ACTIVATE_NOW; fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT); fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH); - fb->fb.var.pixclock = 10000; + fb->fb.var.pixclock = 0; fb->fb.var.red.offset = 11; fb->fb.var.red.length = 5; @@ -304,12 +305,25 @@ static int goldfish_fb_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_fb_of_match[] = { + { .compatible = "google,goldfish-fb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_fb_of_match); + +static const struct acpi_device_id goldfish_fb_acpi_match[] = { + { "GFSH0004", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match); static struct platform_driver goldfish_fb_driver = { .probe = goldfish_fb_probe, .remove = goldfish_fb_remove, .driver = { - .name = "goldfish_fb" + .name = "goldfish_fb", + .of_match_table = goldfish_fb_of_match, + .acpi_match_table = ACPI_PTR(goldfish_fb_acpi_match), } }; diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index c66d9f3b3a65ade1787b5979c1d4d9a351229ad4..4aa14422899f531bfc06de244be005fee6c64e00 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -629,6 +629,7 @@ struct buf_data { char *string_buf; /* cmd buf as string, 3 bytes per number */ int sblen; /* string buffer length */ int sync_flag; + struct mutex dbg_mutex; /* mutex to synchronize read/write/flush */ }; struct mdss_dsi_debugfs_info { @@ -718,6 +719,7 @@ static ssize_t mdss_dsi_cmd_read(struct file *file, char __user *buf, char *bp; ssize_t ret = 0; + mutex_lock(&pcmds->dbg_mutex); if (*ppos == 0) { kfree(pcmds->string_buf); pcmds->string_buf = NULL; @@ -736,6 +738,7 @@ static ssize_t mdss_dsi_cmd_read(struct file *file, char __user *buf, buffer = kmalloc(bsize, GFP_KERNEL); if (!buffer) { pr_err("%s: Failed to allocate memory\n", __func__); + mutex_unlock(&pcmds->dbg_mutex); return -ENOMEM; } @@ -771,10 +774,12 @@ static ssize_t mdss_dsi_cmd_read(struct file *file, char __user *buf, kfree(pcmds->string_buf); pcmds->string_buf = NULL; pcmds->sblen = 0; + mutex_unlock(&pcmds->dbg_mutex); return 0; /* the end */ } ret = simple_read_from_buffer(buf, count, ppos, pcmds->string_buf, pcmds->sblen); + mutex_unlock(&pcmds->dbg_mutex); return ret; } @@ -786,6 +791,7 @@ static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p, int blen = 0; char *string_buf; + mutex_lock(&pcmds->dbg_mutex); if (*ppos == 0) { kfree(pcmds->string_buf); pcmds->string_buf = NULL; @@ -797,6 +803,7 @@ static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p, string_buf = krealloc(pcmds->string_buf, blen + 1, GFP_KERNEL); if (!string_buf) { pr_err("%s: Failed to allocate memory\n", __func__); + mutex_unlock(&pcmds->dbg_mutex); return -ENOMEM; } @@ -806,6 +813,7 @@ static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p, string_buf[blen] = '\0'; pcmds->string_buf = string_buf; pcmds->sblen = blen; + mutex_unlock(&pcmds->dbg_mutex); return ret; } @@ -816,8 +824,12 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id) char *buf, *bufp, *bp; struct dsi_ctrl_hdr *dchdr; - if (!pcmds->string_buf) + mutex_lock(&pcmds->dbg_mutex); + + if (!pcmds->string_buf) { + mutex_unlock(&pcmds->dbg_mutex); return 0; + } /* * Allocate memory for command buffer @@ -830,6 +842,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id) kfree(pcmds->string_buf); pcmds->string_buf = NULL; pcmds->sblen = 0; + mutex_unlock(&pcmds->dbg_mutex); return -ENOMEM; } @@ -854,6 +867,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id) pr_err("%s: dtsi cmd=%x error, len=%d\n", __func__, dchdr->dtype, dchdr->dlen); kfree(buf); + mutex_unlock(&pcmds->dbg_mutex); return -EINVAL; } bp += sizeof(*dchdr); @@ -865,6 +879,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id) pr_err("%s: dcs_cmd=%x len=%d error!\n", __func__, bp[0], len); kfree(buf); + mutex_unlock(&pcmds->dbg_mutex); return -EINVAL; } @@ -877,6 +892,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id) pcmds->buf = buf; pcmds->blen = blen; } + mutex_unlock(&pcmds->dbg_mutex); return 0; } @@ -891,6 +907,7 @@ struct dentry *dsi_debugfs_create_dcs_cmd(const char *name, umode_t mode, struct dentry *parent, struct buf_data *cmd, struct dsi_panel_cmds ctrl_cmds) { + mutex_init(&cmd->dbg_mutex); cmd->buf = ctrl_cmds.buf; cmd->blen = ctrl_cmds.blen; cmd->string_buf = NULL; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index dc2b94142f53251560e0734b769f5bf5f7f1feb0..a01a41a4126934c4b9af04853e409011aec3bbce 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -548,7 +548,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq) if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; - vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); } } @@ -580,7 +581,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) * entry. Always do both to keep code simple. */ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; - vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); } vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); END_USE(vq); @@ -648,10 +650,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next - * entry. Always do both to keep code simple. */ + * entry. Always update the event index to keep code simple. */ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; - vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); } /* TODO: tune this threshold */ bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; @@ -770,7 +773,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, /* No callback? Tell other side not to bother us. */ if (!callback) { vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; - vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); } /* Put everything in free lists. */ diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 531e764749838afccd288a06c464a9db7e60423d..0e0eb10f82a028c737949a4041a944d9798bb6bf 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type, rc = -ENOMEM; goto out; } - } else { + } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; diff --git a/fs/9p/acl.c b/fs/9p/acl.c index a7e28890f5efb4ec8729fc9de4f09e16d4fb91bd..929b618da43bb345cc69699ee96fd00beb358e73 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -282,32 +282,26 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler, switch (handler->flags) { case ACL_TYPE_ACCESS: if (acl) { - umode_t mode = inode->i_mode; - retval = posix_acl_equiv_mode(acl, &mode); - if (retval < 0) + struct iattr iattr; + + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); + if (retval) goto err_out; - else { - struct iattr iattr; - if (retval == 0) { - /* - * ACL can be represented - * by the mode bits. So don't - * update ACL. - */ - acl = NULL; - value = NULL; - size = 0; - } - /* Updte the mode bits */ - iattr.ia_mode = ((mode & S_IALLUGO) | - (inode->i_mode & ~S_IALLUGO)); - iattr.ia_valid = ATTR_MODE; - /* FIXME should we update ctime ? - * What is the following setxattr update the - * mode ? + if (!acl) { + /* + * ACL can be represented + * by the mode bits. So don't + * update ACL. */ - v9fs_vfs_setattr_dotl(dentry, &iattr); + value = NULL; + size = 0; } + iattr.ia_valid = ATTR_MODE; + /* FIXME should we update ctime ? + * What is the following setxattr update the + * mode ? + */ + v9fs_vfs_setattr_dotl(dentry, &iattr); } break; case ACL_TYPE_DEFAULT: diff --git a/fs/aio.c b/fs/aio.c index 24493f07fb40afacc81780ea681a4d88b5dbac95..2d41de30d31a3285ed30c722724af87474796c3e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct file_system_type *fs_type, static const struct dentry_operations ops = { .d_dname = simple_dname, }; - return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC); + struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops, + AIO_RING_MAGIC); + + if (!IS_ERR(root)) + root->d_sb->s_iflags |= SB_I_NOEXEC; + return root; } /* aio_setup diff --git a/fs/attr.c b/fs/attr.c index 6530ced19697d49a9189fa289c9112187448fee3..d62f674a605ff687972507ffce003bc08121b81d 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -202,6 +202,21 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de return -EPERM; } + /* + * If utimes(2) and friends are called with times == NULL (or both + * times are UTIME_NOW), then we need to check for write permission + */ + if (ia_valid & ATTR_TOUCH) { + if (IS_IMMUTABLE(inode)) + return -EPERM; + + if (!inode_owner_or_capable(inode)) { + error = inode_permission(inode, MAY_WRITE); + if (error) + return error; + } + } + if ((ia_valid & ATTR_MODE)) { umode_t amode = attr->ia_mode; /* Flag setting protected by i_mutex */ diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index c37149b929be50442ef40aacc36423eab9b264a2..502d3892d8a45d9268fbeb4bf1c3be848ac55b51 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -79,9 +79,13 @@ struct autofs_info { }; #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ -#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered +#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered * for expiry, so RCU_walk is - * not permitted + * not permitted. If it progresses to + * actual expiry attempt, the flag is + * not cleared when EXPIRING is set - + * in that case it gets cleared only + * when it comes to clearing EXPIRING. */ #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 1cebc3c52fa5792242144c939908be50040b9e0b..7a5a598a2d9456db5e1586f5187f663e03d1625d 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb, if (ino->flags & AUTOFS_INF_PENDING) goto out; if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { - ino->flags |= AUTOFS_INF_NO_RCU; + ino->flags |= AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); synchronize_rcu(); spin_lock(&sbi->fs_lock); if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { ino->flags |= AUTOFS_INF_EXPIRING; - smp_mb(); - ino->flags &= ~AUTOFS_INF_NO_RCU; init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return root; } - ino->flags &= ~AUTOFS_INF_NO_RCU; + ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; } out: spin_unlock(&sbi->fs_lock); @@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry, } return NULL; } + /* * Find an eligible tree to time-out * A tree is eligible if :- @@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, struct dentry *root = sb->s_root; struct dentry *dentry; struct dentry *expired; + struct dentry *found; struct autofs_info *ino; if (!root) @@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, dentry = NULL; while ((dentry = get_next_positive_subdir(dentry, root))) { + int flags = how; + spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); - if (ino->flags & AUTOFS_INF_NO_RCU) - expired = NULL; - else - expired = should_expire(dentry, mnt, timeout, how); - if (!expired) { + if (ino->flags & AUTOFS_INF_WANT_EXPIRE) { spin_unlock(&sbi->fs_lock); continue; } + spin_unlock(&sbi->fs_lock); + + expired = should_expire(dentry, mnt, timeout, flags); + if (!expired) + continue; + + spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(expired); - ino->flags |= AUTOFS_INF_NO_RCU; + ino->flags |= AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); synchronize_rcu(); - spin_lock(&sbi->fs_lock); - if (should_expire(expired, mnt, timeout, how)) { - if (expired != dentry) - dput(dentry); - goto found; - } - ino->flags &= ~AUTOFS_INF_NO_RCU; + /* Make sure a reference is not taken on found if + * things have changed. + */ + flags &= ~AUTOFS_EXP_LEAVES; + found = should_expire(expired, mnt, timeout, how); + if (!found || found != expired) + /* Something has changed, continue */ + goto next; + if (expired != dentry) - dput(expired); + dput(dentry); + + spin_lock(&sbi->fs_lock); + goto found; +next: + spin_lock(&sbi->fs_lock); + ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); + if (expired != dentry) + dput(expired); } return NULL; found: DPRINTK("returning %p %pd", expired, expired); ino->flags |= AUTOFS_INF_EXPIRING; - smp_mb(); - ino->flags &= ~AUTOFS_INF_NO_RCU; init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); - spin_lock(&sbi->lookup_lock); - spin_lock(&expired->d_parent->d_lock); - spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED); - list_move(&expired->d_parent->d_subdirs, &expired->d_child); - spin_unlock(&expired->d_lock); - spin_unlock(&expired->d_parent->d_lock); - spin_unlock(&sbi->lookup_lock); return expired; } @@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk) struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); int status; + int state; /* Block on any pending expire */ - if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))) + if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE)) return 0; if (rcu_walk) return -ECHILD; +retry: spin_lock(&sbi->fs_lock); - if (ino->flags & AUTOFS_INF_EXPIRING) { + state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING); + if (state == AUTOFS_INF_WANT_EXPIRE) { + spin_unlock(&sbi->fs_lock); + /* + * Possibly being selected for expire, wait until + * it's selected or not. + */ + schedule_timeout_uninterruptible(HZ/10); + goto retry; + } + if (state & AUTOFS_INF_EXPIRING) { spin_unlock(&sbi->fs_lock); DPRINTK("waiting for expire %p name=%pd", dentry, dentry); @@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb, ino = autofs4_dentry_ino(dentry); /* avoid rapid-fire expire attempts if expiry fails */ ino->last_used = now; - ino->flags &= ~AUTOFS_INF_EXPIRING; + ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); @@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, spin_lock(&sbi->fs_lock); /* avoid rapid-fire expire attempts if expiry fails */ ino->last_used = now; - ino->flags &= ~AUTOFS_INF_EXPIRING; + ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index c6d7d3dbd52abfa9af11ea0fb7aab955e355bbc9..7a54c6a867c89cc819bb5c0768e75e7b84527259 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) * a mount-trap. */ struct inode *inode; - if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)) + if (ino->flags & AUTOFS_INF_WANT_EXPIRE) return 0; if (d_mountpoint(dentry)) return 0; diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 9a0124a95851014c9000ed7822dc5fad0a2f6995..fb3e64d37cb4ca6d2faf4fadefd85b1477a95f2e 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -83,11 +83,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - ret = posix_acl_equiv_mode(acl, &inode->i_mode); - if (ret < 0) + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (ret) return ret; - if (ret == 0) - acl = NULL; } ret = 0; break; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index c473c42d7d6c4d559dbe1587491cd2fb93804145..bae05c5c75bad4c7f3f8ac74fb38e0b3dca460ac 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -694,7 +694,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); if (ret) { - bio->bi_error = ret; + comp_bio->bi_error = ret; bio_endio(comp_bio); } @@ -723,7 +723,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); if (ret) { - bio->bi_error = ret; + comp_bio->bi_error = ret; bio_endio(comp_bio); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4bc9dbf29a73fbde861d9a8cab06db3876c8bf20..3cff6523f27d57590a5ab4c29422614acb8e13aa 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8691,9 +8691,14 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, * So even we call qgroup_free_data(), it won't decrease reserved * space. * 2) Not written to disk - * This means the reserved space should be freed here. + * This means the reserved space should be freed here. However, + * if a truncate invalidates the page (by clearing PageDirty) + * and the page is accounted for while allocating extent + * in btrfs_check_data_free_space() we let delayed_ref to + * free the entire extent. */ - btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); + if (PageDirty(page)) + btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 65f30b3b04f91d54fc1bd9def4440d72fa077189..a7e18dbadf748ebaea90da93d982c2369a356833 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, int namelen; int ret = 0; + if (!S_ISDIR(file_inode(file)->i_mode)) + return -ENOTDIR; + ret = mnt_want_write_file(file); if (ret) goto out; @@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, struct btrfs_ioctl_vol_args *vol_args; int ret; + if (!S_ISDIR(file_inode(file)->i_mode)) + return -ENOTDIR; + vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); @@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, bool readonly = false; struct btrfs_qgroup_inherit *inherit = NULL; + if (!S_ISDIR(file_inode(file)->i_mode)) + return -ENOTDIR; + vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); @@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, int ret; int err = 0; + if (!S_ISDIR(dir->i_mode)) + return -ENOTDIR; + vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 0e044d7ee721ec1708def2298fa47f6a359764d6..f7441193bf35aee7565d73f4a9d4e618ab00ea6d 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2696,14 +2696,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, int index, int error) { struct btrfs_log_ctx *ctx; + struct btrfs_log_ctx *safe; - if (!error) { - INIT_LIST_HEAD(&root->log_ctxs[index]); - return; - } - - list_for_each_entry(ctx, &root->log_ctxs[index], list) + list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { + list_del_init(&ctx->list); ctx->log_ret = error; + } INIT_LIST_HEAD(&root->log_ctxs[index]); } @@ -2850,6 +2848,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { blk_finish_plug(&plug); + list_del_init(&root_log_ctx.list); mutex_unlock(&log_root_tree->log_mutex); ret = root_log_ctx.log_ret; goto out; @@ -2943,13 +2942,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_unlock(&root->log_mutex); out_wake_log_root: - /* - * We needn't get log_mutex here because we are sure all - * the other tasks are blocked. - */ + mutex_lock(&log_root_tree->log_mutex); btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); - mutex_lock(&log_root_tree->log_mutex); log_root_tree->log_transid_committed++; atomic_set(&log_root_tree->log_commit[index2], 0); mutex_unlock(&log_root_tree->log_mutex); @@ -2960,10 +2955,8 @@ out_wake_log_root: if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) wake_up(&log_root_tree->log_commit_wait[index2]); out: - /* See above. */ - btrfs_remove_all_log_ctxs(root, index1, ret); - mutex_lock(&root->log_mutex); + btrfs_remove_all_log_ctxs(root, index1, ret); root->log_transid_committed++; atomic_set(&root->log_commit[index1], 0); mutex_unlock(&root->log_mutex); diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 8f84646f10e9560ade100c1dafa180768f1d76de..4d8caeb94a118d339031b79a55c46ba59a537a05 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -94,11 +94,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - ret = posix_acl_equiv_mode(acl, &new_mode); - if (ret < 0) + ret = posix_acl_update_mode(inode, &new_mode, &acl); + if (ret) goto out; - if (ret == 0) - acl = NULL; } break; case ACL_TYPE_DEFAULT: diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 3c68e6aee2f0531bdd8afb0125bbae48419d0dfb..c8222bfe1e5669b00874274d2d4ee697d9ee2102 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -929,7 +929,8 @@ again: statret = __ceph_do_getattr(inode, page, CEPH_STAT_CAP_INLINE_DATA, !!page); if (statret < 0) { - __free_page(page); + if (page) + __free_page(page); if (statret == -ENODATA) { BUG_ON(retry_op != READ_INLINE); goto again; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 498dcfa2dcdbedf393ae26fc9f7f68cf90bceb90..d98536c8abfc03adf01c744c33fe014cbda9fabc 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1358,15 +1358,20 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn, if (!ctl->page || pgoff != page_index(ctl->page)) { ceph_readdir_cache_release(ctl); - ctl->page = grab_cache_page(&dir->i_data, pgoff); + if (idx == 0) + ctl->page = grab_cache_page(&dir->i_data, pgoff); + else + ctl->page = find_lock_page(&dir->i_data, pgoff); if (!ctl->page) { ctl->index = -1; - return -ENOMEM; + return idx == 0 ? -ENOMEM : 0; } /* reading/filling the cache are serialized by * i_mutex, no need to use page lock */ unlock_page(ctl->page); ctl->dentries = kmap(ctl->page); + if (idx == 0) + memset(ctl->dentries, 0, PAGE_CACHE_SIZE); } if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 50b268483302922ee63d205a2fa560ec27676eed..0a3544fb50f920d64d1162dc0947e490251f0c4c 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) list_for_each(tmp1, &cifs_tcp_ses_list) { server = list_entry(tmp1, struct TCP_Server_Info, tcp_ses_list); + seq_printf(m, "\nNumber of credits: %d", server->credits); i++; list_for_each(tmp2, &server->smb_ses_list) { ses = list_entry(tmp2, struct cifs_ses, @@ -255,7 +256,6 @@ static const struct file_operations cifs_debug_data_proc_fops = { static ssize_t cifs_stats_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - char c; bool bv; int rc; struct list_head *tmp1, *tmp2, *tmp3; @@ -263,11 +263,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, struct cifs_ses *ses; struct cifs_tcon *tcon; - rc = get_user(c, buffer); - if (rc) - return rc; - - if (strtobool(&c, &bv) == 0) { + rc = kstrtobool_from_user(buffer, count, &bv); + if (rc == 0) { #ifdef CONFIG_CIFS_STATS2 atomic_set(&totBufAllocCount, 0); atomic_set(&totSmBufAllocCount, 0); @@ -290,6 +287,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, } } spin_unlock(&cifs_tcp_ses_lock); + } else { + return rc; } return count; @@ -433,17 +432,17 @@ static int cifsFYI_proc_open(struct inode *inode, struct file *file) static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - char c; + char c[2] = { '\0' }; bool bv; int rc; - rc = get_user(c, buffer); + rc = get_user(c[0], buffer); if (rc) return rc; - if (strtobool(&c, &bv) == 0) + if (strtobool(c, &bv) == 0) cifsFYI = bv; - else if ((c > '1') && (c <= '9')) - cifsFYI = (int) (c - '0'); /* see cifs_debug.h for meanings */ + else if ((c[0] > '1') && (c[0] <= '9')) + cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */ return count; } @@ -471,20 +470,12 @@ static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file) static ssize_t cifs_linux_ext_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - char c; - bool bv; int rc; - rc = get_user(c, buffer); + rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled); if (rc) return rc; - rc = strtobool(&c, &bv); - if (rc) - return rc; - - linuxExtEnabled = bv; - return count; } @@ -511,20 +502,12 @@ static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file) static ssize_t cifs_lookup_cache_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - char c; - bool bv; int rc; - rc = get_user(c, buffer); + rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled); if (rc) return rc; - rc = strtobool(&c, &bv); - if (rc) - return rc; - - lookupCacheEnabled = bv; - return count; } @@ -551,20 +534,12 @@ static int traceSMB_proc_open(struct inode *inode, struct file *file) static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - char c; - bool bv; int rc; - rc = get_user(c, buffer); + rc = kstrtobool_from_user(buffer, count, &traceSMB); if (rc) return rc; - rc = strtobool(&c, &bv); - if (rc) - return rc; - - traceSMB = bv; - return count; } @@ -622,7 +597,6 @@ static ssize_t cifs_security_flags_proc_write(struct file *file, int rc; unsigned int flags; char flags_string[12]; - char c; bool bv; if ((count < 1) || (count > 11)) @@ -635,11 +609,10 @@ static ssize_t cifs_security_flags_proc_write(struct file *file, if (count < 3) { /* single char or single char followed by null */ - c = flags_string[0]; - if (strtobool(&c, &bv) == 0) { + if (strtobool(flags_string, &bv) == 0) { global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF; return count; - } else if (!isdigit(c)) { + } else if (!isdigit(flags_string[0])) { cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string); return -EINVAL; diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index 66cf0f9fff8984cb12eed1c89d91bc9c6ccc6a9d..c611ca2339d71e982da848bf1a5c1432d691e53c 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h @@ -25,7 +25,7 @@ void cifs_dump_mem(char *label, void *data, int length); void cifs_dump_detail(void *); void cifs_dump_mids(struct TCP_Server_Info *); -extern int traceSMB; /* flag which enables the function below */ +extern bool traceSMB; /* flag which enables the function below */ void dump_smb(void *, int); #define CIFS_INFO 0x01 #define CIFS_RC 0x02 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 450578097fb7f093142ed4b1c10130a774a17c3f..4f4fc9ff36365faf5826049e3179a8c1d9eed37c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -54,10 +54,10 @@ #endif int cifsFYI = 0; -int traceSMB = 0; +bool traceSMB; bool enable_oplocks = true; -unsigned int linuxExtEnabled = 1; -unsigned int lookupCacheEnabled = 1; +bool linuxExtEnabled = true; +bool lookupCacheEnabled = true; unsigned int global_secflags = CIFSSEC_DEF; /* unsigned int ntlmv2_support = 0; */ unsigned int sign_CIFS_PDUs = 1; @@ -268,7 +268,7 @@ cifs_alloc_inode(struct super_block *sb) cifs_inode->createtime = 0; cifs_inode->epoch = 0; #ifdef CONFIG_CIFS_SMB2 - get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE); + generate_random_uuid(cifs_inode->lease_key); #endif /* * Can not set i_flags here - they get immediately overwritten to zero @@ -1210,7 +1210,6 @@ init_cifs(void) GlobalTotalActiveXid = 0; GlobalMaxActiveXid = 0; spin_lock_init(&cifs_tcp_ses_lock); - spin_lock_init(&cifs_file_list_lock); spin_lock_init(&GlobalMid_Lock); if (cifs_max_pending < 2) { diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 2b510c537a0d588c532fe9200cdd4a9b03a4c9f4..c669a1471395e329aa7801d966597c9c0649ba93 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -827,6 +827,7 @@ struct cifs_tcon { struct list_head tcon_list; int tc_count; struct list_head openFileList; + spinlock_t open_file_lock; /* protects list above */ struct cifs_ses *ses; /* pointer to session associated with */ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ char *nativeFileSystem; @@ -883,7 +884,7 @@ struct cifs_tcon { #endif /* CONFIG_CIFS_STATS2 */ __u64 bytes_read; __u64 bytes_written; - spinlock_t stat_lock; + spinlock_t stat_lock; /* protects the two fields above */ #endif /* CONFIG_CIFS_STATS */ FILE_SYSTEM_DEVICE_INFO fsDevInfo; FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ @@ -1034,8 +1035,10 @@ struct cifs_fid_locks { }; struct cifsFileInfo { + /* following two lists are protected by tcon->open_file_lock */ struct list_head tlist; /* pointer to next fid owned by tcon */ struct list_head flist; /* next fid (file instance) for this inode */ + /* lock list below protected by cifsi->lock_sem */ struct cifs_fid_locks *llist; /* brlocks held by this fid */ kuid_t uid; /* allows finding which FileInfo structure */ __u32 pid; /* process id who opened file */ @@ -1043,11 +1046,12 @@ struct cifsFileInfo { /* BB add lock scope info here if needed */ ; /* lock scope id (0 if none) */ struct dentry *dentry; - unsigned int f_flags; struct tcon_link *tlink; + unsigned int f_flags; bool invalidHandle:1; /* file closed via session abend */ bool oplock_break_cancelled:1; - int count; /* refcount protected by cifs_file_list_lock */ + int count; + spinlock_t file_info_lock; /* protects four flag/count fields above */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ struct cifs_search_info srch_inf; struct work_struct oplock_break; /* work for oplock breaks */ @@ -1114,7 +1118,7 @@ struct cifs_writedata { /* * Take a reference on the file private data. Must be called with - * cifs_file_list_lock held. + * cfile->file_info_lock held. */ static inline void cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file) @@ -1508,8 +1512,10 @@ require use of the stronger protocol */ * GlobalMid_Lock protects: * list operations on pending_mid_q and oplockQ * updates to XID counters, multiplex id and SMB sequence numbers - * cifs_file_list_lock protects: - * list operations on tcp and SMB session lists and tCon lists + * tcp_ses_lock protects: + * list operations on tcp and SMB session lists + * tcon->open_file_lock protects the list of open files hanging off the tcon + * cfile->file_info_lock protects counters and fields in cifs file struct * f_owner.lock protects certain per file struct operations * mapping->page_lock protects certain per page operations * @@ -1541,18 +1547,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; * tcp session, and the list of tcon's per smb session. It also protects * the reference counters for the server, smb session, and tcon. Finally, * changes to the tcon->tidStatus should be done while holding this lock. + * generally the locks should be taken in order tcp_ses_lock before + * tcon->open_file_lock and that before file->file_info_lock since the + * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file */ GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock; -/* - * This lock protects the cifs_file->llist and cifs_file->flist - * list operations, and updates to some flags (cifs_file->invalidHandle) - * It will be moved to either use the tcon->stat_lock or equivalent later. - * If cifs_tcp_ses_lock and the lock below are both needed to be held, then - * the cifs_tcp_ses_lock must be grabbed first and released last. - */ -GLOBAL_EXTERN spinlock_t cifs_file_list_lock; - #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */ /* Outstanding dir notify requests */ GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; @@ -1588,11 +1588,11 @@ GLOBAL_EXTERN atomic_t midCount; /* Misc globals */ GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */ -GLOBAL_EXTERN unsigned int lookupCacheEnabled; +GLOBAL_EXTERN bool lookupCacheEnabled; GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent with more secure ntlmssp2 challenge/resp */ GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ -GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ +GLOBAL_EXTERN bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 76fcb50295a38b63a58a0d3f656d023e02b988a8..b1104ed8f54c042177061166da7c3088307b1fb3 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon) struct list_head *tmp1; /* list all files open on tree connection and mark them invalid */ - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); list_for_each_safe(tmp, tmp1, &tcon->openFileList) { open_file = list_entry(tmp, struct cifsFileInfo, tlist); open_file->invalidHandle = true; open_file->oplock_break_cancelled = true; } - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); /* * BB Add call to invalidate_inodes(sb) for all superblocks mounted * to this tcon. diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 61c3a5ab863725cd3ea3f1ceefc54bcb35a622cf..812a8cb07c632b855c7e4deee62d3b1eaf626e3f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2200,7 +2200,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, sizeof(tcp_ses->dstaddr)); #ifdef CONFIG_CIFS_SMB2 - get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE); + generate_random_uuid(tcp_ses->client_guid); #endif /* * at this point we are the only ones with the pointer @@ -3693,14 +3693,16 @@ remote_path_check: goto mount_fail_check; } - rc = cifs_are_all_path_components_accessible(server, + if (rc != -EREMOTE) { + rc = cifs_are_all_path_components_accessible(server, xid, tcon, cifs_sb, full_path); - if (rc != 0) { - cifs_dbg(VFS, "cannot query dirs between root and final path, " - "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; - rc = 0; + if (rc != 0) { + cifs_dbg(VFS, "cannot query dirs between root and final path, " + "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; + rc = 0; + } } kfree(full_path); } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0068e82217c3f98b1fa5fbc4e95926ae52182a6e..72f270d4bd17946526f202b209f456305049b5df 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); mutex_init(&cfile->fh_mutex); + spin_lock_init(&cfile->file_info_lock); cifs_sb_active(inode->i_sb); @@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, oplock = 0; } - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) oplock = fid->pending_open->oplock; list_del(&fid->pending_open->olist); @@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, server->ops->set_fid(cfile, fid, oplock); list_add(&cfile->tlist, &tcon->openFileList); + /* if readable file instance put first in list*/ if (file->f_mode & FMODE_READ) list_add(&cfile->flist, &cinode->openFileList); else list_add_tail(&cfile->flist, &cinode->openFileList); - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); if (fid->purge_cache) cifs_zap_mapping(inode); @@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct cifsFileInfo * cifsFileInfo_get(struct cifsFileInfo *cifs_file) { - spin_lock(&cifs_file_list_lock); + spin_lock(&cifs_file->file_info_lock); cifsFileInfo_get_locked(cifs_file); - spin_unlock(&cifs_file_list_lock); + spin_unlock(&cifs_file->file_info_lock); return cifs_file; } /* * Release a reference on the file private data. This may involve closing * the filehandle out on the server. Must be called without holding - * cifs_file_list_lock. + * tcon->open_file_lock and cifs_file->file_info_lock. */ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) { @@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) struct cifs_pending_open open; bool oplock_break_cancelled; - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); + + spin_lock(&cifs_file->file_info_lock); if (--cifs_file->count > 0) { - spin_unlock(&cifs_file_list_lock); + spin_unlock(&cifs_file->file_info_lock); + spin_unlock(&tcon->open_file_lock); return; } + spin_unlock(&cifs_file->file_info_lock); if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); @@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); cifs_set_oplock_level(cifsi, 0); } - spin_unlock(&cifs_file_list_lock); + + spin_unlock(&tcon->open_file_lock); oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); @@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, struct file *file) server = tcon->ses->server; cifs_dbg(FYI, "Freeing private data in close dir\n"); - spin_lock(&cifs_file_list_lock); + spin_lock(&cfile->file_info_lock); if (server->ops->dir_needs_close(cfile)) { cfile->invalidHandle = true; - spin_unlock(&cifs_file_list_lock); + spin_unlock(&cfile->file_info_lock); if (server->ops->close_dir) rc = server->ops->close_dir(xid, tcon, &cfile->fid); else @@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, struct file *file) /* not much we can do if it fails anyway, ignore rc */ rc = 0; } else - spin_unlock(&cifs_file_list_lock); + spin_unlock(&cfile->file_info_lock); buf = cfile->srch_inf.ntwrk_buf_start; if (buf) { @@ -1720,12 +1727,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, { struct cifsFileInfo *open_file = NULL; struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) fsuid_only = false; - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); /* we could simply get the first_list_entry since write-only entries are always at the end of the list but since the first entry might have a close pending, we go through the whole list */ @@ -1736,8 +1744,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, if (!open_file->invalidHandle) { /* found a good file */ /* lock it so it will not be closed on us */ - cifsFileInfo_get_locked(open_file); - spin_unlock(&cifs_file_list_lock); + cifsFileInfo_get(open_file); + spin_unlock(&tcon->open_file_lock); return open_file; } /* else might as well continue, and look for another, or simply have the caller reopen it @@ -1745,7 +1753,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, } else /* write only file */ break; /* write only files are last so must be done */ } - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); return NULL; } @@ -1754,6 +1762,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, { struct cifsFileInfo *open_file, *inv_file = NULL; struct cifs_sb_info *cifs_sb; + struct cifs_tcon *tcon; bool any_available = false; int rc; unsigned int refind = 0; @@ -1769,15 +1778,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, } cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); + tcon = cifs_sb_master_tcon(cifs_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) fsuid_only = false; - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); refind_writable: if (refind > MAX_REOPEN_ATT) { - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); return NULL; } list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { @@ -1788,8 +1798,8 @@ refind_writable: if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { if (!open_file->invalidHandle) { /* found a good writable file */ - cifsFileInfo_get_locked(open_file); - spin_unlock(&cifs_file_list_lock); + cifsFileInfo_get(open_file); + spin_unlock(&tcon->open_file_lock); return open_file; } else { if (!inv_file) @@ -1805,24 +1815,24 @@ refind_writable: if (inv_file) { any_available = false; - cifsFileInfo_get_locked(inv_file); + cifsFileInfo_get(inv_file); } - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); if (inv_file) { rc = cifs_reopen_file(inv_file, false); if (!rc) return inv_file; else { - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); list_move_tail(&inv_file->flist, &cifs_inode->openFileList); - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); cifsFileInfo_put(inv_file); - spin_lock(&cifs_file_list_lock); ++refind; inv_file = NULL; + spin_lock(&tcon->open_file_lock); goto refind_writable; } } @@ -3632,15 +3642,17 @@ static int cifs_readpage(struct file *file, struct page *page) static int is_inode_writable(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *open_file; + struct cifs_tcon *tcon = + cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb)); - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); return 1; } } - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); return 0; } diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 8442b8b8e0be145e7f68055e0025f1909442d156..2396ab099849a132d884d79202a5d5bae0ea9a18 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -120,6 +120,7 @@ tconInfoAlloc(void) ++ret_buf->tc_count; INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); + spin_lock_init(&ret_buf->open_file_lock); #ifdef CONFIG_CIFS_STATS spin_lock_init(&ret_buf->stat_lock); #endif @@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) continue; cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); list_for_each(tmp2, &tcon->openFileList) { netfile = list_entry(tmp2, struct cifsFileInfo, tlist); @@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) &netfile->oplock_break); netfile->oplock_break_cancelled = false; - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "No matching file for oplock break\n"); return true; @@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb) void cifs_del_pending_open(struct cifs_pending_open *open) { - spin_lock(&cifs_file_list_lock); + spin_lock(&tlink_tcon(open->tlink)->open_file_lock); list_del(&open->olist); - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } void @@ -635,7 +636,7 @@ void cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open) { - spin_lock(&cifs_file_list_lock); + spin_lock(&tlink_tcon(tlink)->open_file_lock); cifs_add_pending_open_locked(fid, tlink, open); - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index b30a4a6d98a0f002c235bf3128012aff06ba2bc3..833e5844a2dbbc207b59d8a6727f5d255e16e5cb 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -594,14 +594,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) { /* close and restart search */ cifs_dbg(FYI, "search backing up - close and restart search\n"); - spin_lock(&cifs_file_list_lock); + spin_lock(&cfile->file_info_lock); if (server->ops->dir_needs_close(cfile)) { cfile->invalidHandle = true; - spin_unlock(&cifs_file_list_lock); + spin_unlock(&cfile->file_info_lock); if (server->ops->close_dir) server->ops->close_dir(xid, tcon, &cfile->fid); } else - spin_unlock(&cifs_file_list_lock); + spin_unlock(&cfile->file_info_lock); if (cfile->srch_inf.ntwrk_buf_start) { cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n"); if (cfile->srch_inf.smallBuf) diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index 0ffa180943357c5514b5ae025f61e4b19f574f38..238759c146badd9eddc372e31c558ec948647343 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -61,4 +61,14 @@ /* Maximum buffer size value we can send with 1 credit */ #define SMB2_MAX_BUFFER_SIZE 65536 +/* + * Maximum number of credits to keep available. + * This value is chosen somewhat arbitrarily. The Windows client + * defaults to 128 credits, the Windows server allows clients up to + * 512 credits, and the NetApp server does not limit clients at all. + * Choose a high enough value such that the client shouldn't limit + * performance. + */ +#define SMB2_MAX_CREDITS_AVAILABLE 32000 + #endif /* _SMB2_GLOB_H */ diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 4f0231e685a922efcb97a6c5905bfedc7ca7ece6..1238cd3552f9cc8e8f8fc560117af37cd224aa13 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path, struct tcon_link *tlink; int rc; + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && + (buf->LastWriteTime == 0) && (buf->ChangeTime) && + (buf->Attributes == 0)) + return 0; /* would be a no op, no sense sending this */ + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); + rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path, FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf, SMB2_OP_SET_INFO); diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 1c5907019045517ac302daf4c04e8caea21f1fda..e5bc85e49be713d9148341a0339da234fc6d588e 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -525,19 +525,19 @@ smb2_is_valid_lease_break(char *buffer) list_for_each(tmp1, &server->smb_ses_list) { ses = list_entry(tmp1, struct cifs_ses, smb_ses_list); - spin_lock(&cifs_file_list_lock); list_for_each(tmp2, &ses->tcon_list) { tcon = list_entry(tmp2, struct cifs_tcon, tcon_list); + spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); if (smb2_tcon_has_lease(tcon, rsp, lw)) { - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } + spin_unlock(&tcon->open_file_lock); } - spin_unlock(&cifs_file_list_lock); } } spin_unlock(&cifs_tcp_ses_lock); @@ -579,7 +579,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); - spin_lock(&cifs_file_list_lock); + spin_lock(&tcon->open_file_lock); list_for_each(tmp2, &tcon->openFileList) { cfile = list_entry(tmp2, struct cifsFileInfo, tlist); @@ -591,7 +591,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) cifs_dbg(FYI, "file id match, oplock break\n"); cinode = CIFS_I(d_inode(cfile->dentry)); - + spin_lock(&cfile->file_info_lock); if (!CIFS_CACHE_WRITE(cinode) && rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE) cfile->oplock_break_cancelled = true; @@ -613,14 +613,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) clear_bit( CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags); - + spin_unlock(&cfile->file_info_lock); queue_work(cifsiod_wq, &cfile->oplock_break); - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } - spin_unlock(&cifs_file_list_lock); + spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "No matching file for oplock break\n"); return true; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index dd8543caa56e93267bc39c8819db021167276b52..be34b4860675f046cf8a475393eb994fe52f9fda 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -282,7 +282,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) cifs_dbg(FYI, "Link Speed %lld\n", le64_to_cpu(out_buf->LinkSpeed)); } - + kfree(out_buf); return rc; } #endif /* STATS2 */ @@ -536,6 +536,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) server->ops->set_oplock_level(cinode, oplock, fid->epoch, &fid->purge_cache); cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); + memcpy(cfile->fid.create_guid, fid->create_guid, 16); } static void @@ -694,6 +695,7 @@ smb2_clone_range(const unsigned int xid, cchunk_out: kfree(pcchunk); + kfree(retbuf); return rc; } @@ -818,7 +820,6 @@ smb2_duplicate_extents(const unsigned int xid, { int rc; unsigned int ret_data_len; - char *retbuf = NULL; struct duplicate_extents_to_file dup_ext_buf; struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink); @@ -844,7 +845,7 @@ smb2_duplicate_extents(const unsigned int xid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, true /* is_fsctl */, (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), - (char **)&retbuf, + NULL, &ret_data_len); if (ret_data_len > 0) @@ -867,7 +868,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile) { struct fsctl_set_integrity_information_req integr_info; - char *retbuf = NULL; unsigned int ret_data_len; integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED); @@ -879,7 +879,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, FSCTL_SET_INTEGRITY_INFORMATION, true /* is_fsctl */, (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), - (char **)&retbuf, + NULL, &ret_data_len); } @@ -1036,7 +1036,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid) static void smb2_new_lease_key(struct cifs_fid *fid) { - get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE); + generate_random_uuid(fid->lease_key); } #define SMB2_SYMLINK_STRUCT_SIZE \ diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 0b6dc1942bdcccae62b0bba199f9c166fac7e3d1..0dbbdf5e4aeeb2fc56e7a9d4df235c18c188430e 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -103,7 +103,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , hdr->ProtocolId[3] = 'B'; hdr->StructureSize = cpu_to_le16(64); hdr->Command = smb2_cmd; - hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */ + if (tcon && tcon->ses && tcon->ses->server) { + struct TCP_Server_Info *server = tcon->ses->server; + + spin_lock(&server->req_lock); + /* Request up to 2 credits but don't go over the limit. */ + if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE) + hdr->CreditRequest = cpu_to_le16(0); + else + hdr->CreditRequest = cpu_to_le16( + min_t(int, SMB2_MAX_CREDITS_AVAILABLE - + server->credits, 2)); + spin_unlock(&server->req_lock); + } else { + hdr->CreditRequest = cpu_to_le16(2); + } hdr->ProcessId = cpu_to_le32((__u16)current->tgid); if (!tcon) @@ -593,6 +607,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, char *security_blob = NULL; unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ + u64 previous_session = ses->Suid; cifs_dbg(FYI, "Session Setup\n"); @@ -630,6 +645,10 @@ ssetup_ntlmssp_authenticate: return rc; req->hdr.SessionId = 0; /* First session, not a reauthenticate */ + + /* if reconnect, we need to send previous sess id, otherwise it is 0 */ + req->PreviousSessionId = previous_session; + req->Flags = 0; /* MBZ */ /* to enable echos and oplocks */ req->hdr.CreditRequest = cpu_to_le16(3); @@ -1167,7 +1186,7 @@ create_durable_v2_buf(struct cifs_fid *pfid) buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); - get_random_bytes(buf->dcontext.CreateGuid, 16); + generate_random_uuid(buf->dcontext.CreateGuid); memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ @@ -2059,6 +2078,7 @@ smb2_async_readv(struct cifs_readdata *rdata) if (rdata->credits) { buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); + buf->CreditRequest = buf->CreditCharge; spin_lock(&server->req_lock); server->credits += rdata->credits - le16_to_cpu(buf->CreditCharge); @@ -2245,6 +2265,7 @@ smb2_async_writev(struct cifs_writedata *wdata, if (wdata->credits) { req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); + req->hdr.CreditRequest = req->hdr.CreditCharge; spin_lock(&server->req_lock); server->credits += wdata->credits - le16_to_cpu(req->hdr.CreditCharge); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 4af52780ec350323cd41e993389e7214fb50f97d..b8f553b32ddab34eae501a49b04eebdcfd759632 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -276,7 +276,7 @@ struct smb2_sess_setup_req { __le32 Channel; __le16 SecurityBufferOffset; __le16 SecurityBufferLength; - __le64 PreviousSessionId; + __u64 PreviousSessionId; __u8 Buffer[1]; /* variable length GSS security buffer */ } __packed; diff --git a/fs/coredump.c b/fs/coredump.c index dfc87c5f5a54febea6f6e36bd3636965f9fa6396..5d15c4975ba14f31b32439259780ee8e42681e40 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -399,7 +400,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) if (core_waiters > 0) { struct core_thread *ptr; + freezer_do_not_count(); wait_for_completion(&core_state->startup); + freezer_count(); /* * Wait for all the threads to become inactive, so that * all the thread context (extended register state, like diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 3a37bd3f9637811c3b86e5c05be5aa47f32819c3..9d7a4a71490738029fbc33e6d6a1ca1b00fe97f0 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1607,16 +1607,12 @@ void dlm_lowcomms_stop(void) mutex_lock(&connections_lock); dlm_allow_conn = 0; foreach_conn(stop_conn); + clean_writequeues(); + foreach_conn(free_conn); mutex_unlock(&connections_lock); work_stop(); - mutex_lock(&connections_lock); - clean_writequeues(); - - foreach_conn(free_conn); - - mutex_unlock(&connections_lock); kmem_cache_destroy(con_cache); } diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index 27695e6f4e46629a8a888f38bcaf6f0e4ceaf1ec..d6aeb84e90b61acb8dff22443b9a9694c7ba3dba 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -193,15 +193,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) case ACL_TYPE_ACCESS: name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - if (error == 0) - acl = NULL; - } + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); } break; diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 69b1e73026a51f6b8ae0e591d33abb0fdc9169d8..c3fe1e323951f9da0d19aa3eeb333c119f59a142 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -196,15 +196,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type, case ACL_TYPE_ACCESS: name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - inode->i_ctime = ext4_current_time(inode); - ext4_mark_inode_dirty(handle, inode); - if (error == 0) - acl = NULL; - } + inode->i_ctime = ext4_current_time(inode); + ext4_mark_inode_dirty(handle, inode); } break; diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c index ad050698143fde483e9c5cddc5e32c88041a9b29..8a9feb341f314ea94f7f4e5984aea57f4b8e20a4 100644 --- a/fs/ext4/crypto_policy.c +++ b/fs/ext4/crypto_policy.c @@ -102,6 +102,9 @@ static int ext4_create_encryption_context_from_policy( int ext4_process_policy(const struct ext4_encryption_policy *policy, struct inode *inode) { + if (!inode_owner_or_capable(inode)) + return -EACCES; + if (policy->version != 0) return -EINVAL; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 5d9d0be2770c9dea487288ea965010067d40a215..14d7f72f8092169f5058f5edb32ca0a77079731b 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -221,6 +221,7 @@ struct ext4_io_submit { #define EXT4_MAX_BLOCK_SIZE 65536 #define EXT4_MIN_BLOCK_LOG_SIZE 10 #define EXT4_MAX_BLOCK_LOG_SIZE 16 +#define EXT4_MAX_CLUSTER_LOG_SIZE 30 #ifdef __KERNEL__ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) #else diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 8eac7d586997c23b0e777dd4451b838379b5499d..9da42ace762a3e1cff1bbb2d2c5a667195cf2ee6 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5738,6 +5738,9 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } + } else { + ext4_ext_drop_refs(path); + kfree(path); } ret = ext4_es_remove_extent(inode, offset_lblk, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 55a1b5c0072ab14b67bf5fa4c110070510448dfe..eaab159a9c89c30e8f5fbb99fb3b67938bb9ce0b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3652,7 +3652,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, } /* - * ext4_punch_hole: punches a hole in a file by releaseing the blocks + * ext4_punch_hole: punches a hole in a file by releasing the blocks * associated with the given offset and length * * @inode: File inode @@ -3682,7 +3682,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) * Write out all dirty pages to avoid race conditions * Then release them. */ - if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { + if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (ret) @@ -4563,14 +4563,14 @@ static int ext4_do_update_inode(handle_t *handle, * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ - if (!ei->i_dtime) { + if (ei->i_dtime && list_empty(&ei->i_orphan)) { + raw_inode->i_uid_high = 0; + raw_inode->i_gid_high = 0; + } else { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(i_gid)); - } else { - raw_inode->i_uid_high = 0; - raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 95315b1f4b71c58458f0a6fa2b4d5ae2059e06a5..7e974878d9a9ca1774acf59059bf9cd88ebf890e 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -633,7 +633,13 @@ resizefs_out: goto encryption_policy_out; } + err = mnt_want_write_file(filp); + if (err) + goto encryption_policy_out; + err = ext4_process_policy(&policy, inode); + + mnt_drop_write_file(filp); encryption_policy_out: return err; #else diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index a0daca4b127b6e4d356d8d536a8d44c1f7afa984..0b1c97875686bbe2732c0dbb9cb2a131c4d52c80 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -815,7 +815,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) * for this page; do not hold this lock when calling this routine! */ -static int ext4_mb_init_cache(struct page *page, char *incore) +static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) { ext4_group_t ngroups; int blocksize; @@ -848,7 +848,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) /* allocate buffer_heads to read bitmaps */ if (groups_per_page > 1) { i = sizeof(struct buffer_head *) * groups_per_page; - bh = kzalloc(i, GFP_NOFS); + bh = kzalloc(i, gfp); if (bh == NULL) { err = -ENOMEM; goto out; @@ -983,7 +983,7 @@ out: * are on the same page e4b->bd_buddy_page is NULL and return value is 0. */ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, - ext4_group_t group, struct ext4_buddy *e4b) + ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) { struct inode *inode = EXT4_SB(sb)->s_buddy_cache; int block, pnum, poff; @@ -1002,7 +1002,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; - page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, pnum, gfp); if (!page) return -ENOMEM; BUG_ON(page->mapping != inode->i_mapping); @@ -1016,7 +1016,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, block++; pnum = block / blocks_per_page; - page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, pnum, gfp); if (!page) return -ENOMEM; BUG_ON(page->mapping != inode->i_mapping); @@ -1042,7 +1042,7 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) * calling this routine! */ static noinline_for_stack -int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) +int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) { struct ext4_group_info *this_grp; @@ -1062,7 +1062,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) * The call to ext4_mb_get_buddy_page_lock will mark the * page accessed. */ - ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); + ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { /* * somebody initialized the group @@ -1072,7 +1072,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) } page = e4b.bd_bitmap_page; - ret = ext4_mb_init_cache(page, NULL); + ret = ext4_mb_init_cache(page, NULL, gfp); if (ret) goto err; if (!PageUptodate(page)) { @@ -1091,7 +1091,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) } /* init buddy cache */ page = e4b.bd_buddy_page; - ret = ext4_mb_init_cache(page, e4b.bd_bitmap); + ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); if (ret) goto err; if (!PageUptodate(page)) { @@ -1109,8 +1109,8 @@ err: * calling this routine! */ static noinline_for_stack int -ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, - struct ext4_buddy *e4b) +ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, + struct ext4_buddy *e4b, gfp_t gfp) { int blocks_per_page; int block; @@ -1140,7 +1140,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, * we need full data about the group * to make a good selection */ - ret = ext4_mb_init_group(sb, group); + ret = ext4_mb_init_group(sb, group, gfp); if (ret) return ret; } @@ -1168,11 +1168,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, * wait for it to initialize. */ page_cache_release(page); - page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, pnum, gfp); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { - ret = ext4_mb_init_cache(page, NULL); + ret = ext4_mb_init_cache(page, NULL, gfp); if (ret) { unlock_page(page); goto err; @@ -1204,11 +1204,12 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, if (page == NULL || !PageUptodate(page)) { if (page) page_cache_release(page); - page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, pnum, gfp); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { - ret = ext4_mb_init_cache(page, e4b->bd_bitmap); + ret = ext4_mb_init_cache(page, e4b->bd_bitmap, + gfp); if (ret) { unlock_page(page); goto err; @@ -1247,6 +1248,12 @@ err: return ret; } +static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + struct ext4_buddy *e4b) +{ + return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); +} + static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) @@ -2047,7 +2054,7 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - int ret = ext4_mb_init_group(ac->ac_sb, group); + int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); if (ret) return ret; } @@ -4809,7 +4816,9 @@ do_more: #endif trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); - err = ext4_mb_load_buddy(sb, block_group, &e4b); + /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ + err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, + GFP_NOFS|__GFP_NOFAIL); if (err) goto error_return; @@ -5218,7 +5227,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range, grp = ext4_get_group_info(sb, group); /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - ret = ext4_mb_init_group(sb, group); + ret = ext4_mb_init_group(sb, group, GFP_NOFS); if (ret) break; } diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 796ff0eafd3c352beccb2347edcc3df87fc91422..7861d801b048e5d47143c2a3d9d6148cf8a6c971 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, return -EOPNOTSUPP; } + if (ext4_encrypted_inode(orig_inode) || + ext4_encrypted_inode(donor_inode)) { + ext4_msg(orig_inode->i_sb, KERN_ERR, + "Online defrag not supported for encrypted files"); + return -EOPNOTSUPP; + } + /* Protect orig and donor inodes against a truncate */ lock_two_nondirectories(orig_inode, donor_inode); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 20892f27f7be91ec9f64ba021dea289ac54a0ffb..b936da9d3d0c03faf51fe1fa0fb951e6ad460e25 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2034,33 +2034,31 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, frame->entries = entries; frame->at = entries; frame->bh = bh; - bh = bh2; retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (retval) goto out_frames; - retval = ext4_handle_dirty_dirent_node(handle, dir, bh); + retval = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (retval) goto out_frames; - de = do_split(handle,dir, &bh, frame, &fname->hinfo); + de = do_split(handle,dir, &bh2, frame, &fname->hinfo); if (IS_ERR(de)) { retval = PTR_ERR(de); goto out_frames; } - dx_release(frames); - retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh); - brelse(bh); - return retval; + retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2); out_frames: /* * Even if the block split failed, we have to properly write * out all the changes we did so far. Otherwise we can end up * with corrupted filesystem. */ - ext4_mark_inode_dirty(handle, dir); + if (retval) + ext4_mark_inode_dirty(handle, dir); dx_release(frames); + brelse(bh2); return retval; } diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index dd98270d0b217d51582d5f59184666fd07bfcb9d..71a08a2945298da01e80d6e4920f15b4649f9d85 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -330,6 +330,5 @@ int ext4_mpage_readpages(struct address_space *mapping, BUG_ON(pages && !list_empty(pages)); if (bio) submit_bio(READ, bio); - return 0; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5bab28caa9d4a882f1da662f8efb145e88cf1a09..127155b82e6e1d67f6f21cd5d6c9eeeee0bffed3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3394,7 +3394,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, - "Unsupported filesystem blocksize %d", blocksize); + "Unsupported filesystem blocksize %d (%d log_block_size)", + blocksize, le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } + if (le32_to_cpu(es->s_log_block_size) > + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log block size: %u", + le32_to_cpu(es->s_log_block_size)); goto failed_mount; } @@ -3533,6 +3541,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "block size (%d)", clustersize, blocksize); goto failed_mount; } + if (le32_to_cpu(es->s_log_cluster_size) > + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log cluster size: %u", + le32_to_cpu(es->s_log_cluster_size)); + goto failed_mount; + } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 1420a3c614afb1a4c06e87471163acf01b3b98d8..5d09ea585840a316257394b193f84e66d8fc5546 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = { EXT4_ATTR_FEATURE(lazy_itable_init); EXT4_ATTR_FEATURE(batched_discard); EXT4_ATTR_FEATURE(meta_bg_resize); +#ifdef CONFIG_EXT4_FS_ENCRYPTION EXT4_ATTR_FEATURE(encryption); +#endif EXT4_ATTR_FEATURE(metadata_csum_seed); static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(lazy_itable_init), ATTR_LIST(batched_discard), ATTR_LIST(meta_bg_resize), +#ifdef CONFIG_EXT4_FS_ENCRYPTION ATTR_LIST(encryption), +#endif ATTR_LIST(metadata_csum_seed), NULL, }; diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index c8f25f7241f06a96c3d99ebf5c82e53a6099ed60..e9a8d676c6bc53df1a238e9d07f3948f51ebe56e 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -214,12 +214,10 @@ static int __f2fs_set_acl(struct inode *inode, int type, case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; set_acl_inode(fi, inode->i_mode); - if (error == 0) - acl = NULL; } break; diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c index d4a96af513c22f28ea1242496286d4ff1a606567..e504f548b64e3045177ae884b3352ac34bd7466e 100644 --- a/fs/f2fs/crypto_policy.c +++ b/fs/f2fs/crypto_policy.c @@ -89,6 +89,9 @@ static int f2fs_create_encryption_context_from_policy( int f2fs_process_policy(const struct f2fs_encryption_policy *policy, struct inode *inode) { + if (!inode_owner_or_capable(inode)) + return -EACCES; + if (policy->version != 0) return -EINVAL; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 48fb86bc153d4267c2b6c62636f867b94b803358..b237862dc2740f36219562feebdfc01fe45e5419 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1745,14 +1745,46 @@ error: static int fuse_setattr(struct dentry *entry, struct iattr *attr) { struct inode *inode = d_inode(entry); + struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL; + int ret; if (!fuse_allow_current_process(get_fuse_conn(inode))) return -EACCES; - if (attr->ia_valid & ATTR_FILE) - return fuse_do_setattr(inode, attr, attr->ia_file); - else - return fuse_do_setattr(inode, attr, NULL); + if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) { + int kill; + + attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | + ATTR_MODE); + /* + * ia_mode calculation may have used stale i_mode. Refresh and + * recalculate. + */ + ret = fuse_do_getattr(inode, NULL, file); + if (ret) + return ret; + + attr->ia_mode = inode->i_mode; + kill = should_remove_suid(entry); + if (kill & ATTR_KILL_SUID) { + attr->ia_valid |= ATTR_MODE; + attr->ia_mode &= ~S_ISUID; + } + if (kill & ATTR_KILL_SGID) { + attr->ia_valid |= ATTR_MODE; + attr->ia_mode &= ~S_ISGID; + } + } + if (!attr->ia_valid) + return 0; + + ret = fuse_do_setattr(inode, attr, file); + if (!ret) { + /* Directory mode changed, may need to revalidate access */ + if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE)) + fuse_invalidate_entry_cache(entry); + } + return ret; } static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, @@ -1845,6 +1877,23 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name, return ret; } +static int fuse_verify_xattr_list(char *list, size_t size) +{ + size_t origsize = size; + + while (size) { + size_t thislen = strnlen(list, size); + + if (!thislen || thislen == size) + return -EIO; + + size -= thislen + 1; + list += thislen + 1; + } + + return origsize; +} + static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) { struct inode *inode = d_inode(entry); @@ -1880,6 +1929,8 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) ret = fuse_simple_request(fc, &args); if (!ret && !size) ret = outarg.size; + if (ret > 0 && size) + ret = fuse_verify_xattr_list(list, ret); if (ret == -ENOSYS) { fc->no_listxattr = 1; ret = -EOPNOTSUPP; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 025f11b9cffa10296a4a763d3a330be1697cdc64..64eb2c6ee450baf50a385a5a55a971c6670d2a83 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -558,13 +558,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, req->out.args[0].size = count; } -static void fuse_release_user_pages(struct fuse_req *req, int write) +static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty) { unsigned i; for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; - if (write) + if (should_dirty) set_page_dirty_lock(page); put_page(page); } @@ -1400,6 +1400,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, loff_t *ppos, int flags) { int write = flags & FUSE_DIO_WRITE; + bool should_dirty = !write && iter_is_iovec(iter); int cuse = flags & FUSE_DIO_CUSE; struct file *file = io->file; struct inode *inode = file->f_mapping->host; @@ -1444,7 +1445,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, nres = fuse_send_read(req, io, pos, nbytes, owner); if (!io->async) - fuse_release_user_pages(req, !write); + fuse_release_user_pages(req, should_dirty); if (req->out.h.error) { if (!res) res = req->out.h.error; @@ -2065,6 +2066,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = page->mapping->host; + /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ + if (!copied) + goto unlock; + if (!PageUptodate(page)) { /* Zero any unwritten bytes at the end of the page */ size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; @@ -2075,6 +2080,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, fuse_write_update_size(inode, pos + copied); set_page_dirty(page); + +unlock: unlock_page(page); page_cache_release(page); diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 1be3b061c05c921f60ddfebcce809875fe8de662..ff0ac96a8e7bd6eea557541e47ec90a9eb8555ff 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -79,17 +79,11 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; - error = posix_acl_equiv_mode(acl, &mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - - if (error == 0) - acl = NULL; - - if (mode != inode->i_mode) { - inode->i_mode = mode; + if (mode != inode->i_mode) mark_inode_dirty(inode); - } } if (acl) { diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c index df0c9af68d05ef0df5a127d53562677bc8a2d882..71b3087b7e327db93192252daa9b3cb15a2ef8c1 100644 --- a/fs/hfsplus/posix_acl.c +++ b/fs/hfsplus/posix_acl.c @@ -68,8 +68,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, case ACL_TYPE_ACCESS: xattr_name = POSIX_ACL_XATTR_ACCESS; if (acl) { - err = posix_acl_equiv_mode(acl, &inode->i_mode); - if (err < 0) + err = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (err) return err; } err = 0; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 5a7b3229b956ea99fe57b1e88f7d8234b5fee690..f34d6f5a5aca38d01043f9c94f67359f9cf84528 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) if (S_ISLNK(root_inode->i_mode)) { char *name = follow_link(host_root_path); - if (IS_ERR(name)) + if (IS_ERR(name)) { err = PTR_ERR(name); - else - err = read_name(root_inode, name); + goto out_put; + } + err = read_name(root_inode, name); kfree(name); if (err) goto out_put; diff --git a/fs/inode.c b/fs/inode.c index b0edef500590c3e49cd5f632640add63426a39d5..2c16b758831dd08715c3c6e270c97f00b6bed10c 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_rdev = 0; inode->dirtied_when = 0; +#ifdef CONFIG_CGROUP_WRITEBACK + inode->i_wb_frn_winner = 0; + inode->i_wb_frn_avg_time = 0; + inode->i_wb_frn_history = 0; +#endif + if (security_inode_alloc(inode)) goto out; spin_lock_init(&inode->i_lock); diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index d67a16f2a45df8fcce56b9ff3ec56f334d951c9c..350f67fb5b9c0da64ebbe87e106e6ef7d236f4da 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -690,6 +690,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) pri_bh = NULL; root_found: + /* We don't support read-write mounts */ + if (!(s->s_flags & MS_RDONLY)) { + error = -EACCES; + goto out_freebh; + } if (joliet_level && (pri == NULL || !opt.rock)) { /* This is the case of Joliet with the norock mount flag. @@ -1503,9 +1508,6 @@ struct inode *__isofs_iget(struct super_block *sb, static struct dentry *isofs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - /* We don't support read-write mounts */ - if (!(flags & MS_RDONLY)) - return ERR_PTR(-EACCES); return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ca181e81c765518d4a599025c914adbac626e739..fa1b8e0dcacfc22ebe42c753767295eca5876e24 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1156,6 +1156,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); + spin_unlock(&journal->j_list_lock); } else if (jh->b_transaction == journal->j_committing_transaction) { /* first access by this transaction */ jh->b_modified = 0; @@ -1163,8 +1164,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "set next transaction"); spin_lock(&journal->j_list_lock); jh->b_next_transaction = transaction; + spin_unlock(&journal->j_list_lock); } - spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); /* diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 2f7a3c09048999f4365c3e5612ffdf18e9c65d54..f9f86f87d32b82ddb7b78ba30c4270f3caac9a8a 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -235,9 +235,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) case ACL_TYPE_ACCESS: xprefix = JFFS2_XPREFIX_ACL_ACCESS; if (acl) { - umode_t mode = inode->i_mode; - rc = posix_acl_equiv_mode(acl, &mode); - if (rc < 0) + umode_t mode; + + rc = posix_acl_update_mode(inode, &mode, &acl); + if (rc) return rc; if (inode->i_mode != mode) { struct iattr attr; @@ -249,8 +250,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (rc < 0) return rc; } - if (rc == 0) - acl = NULL; } break; case ACL_TYPE_DEFAULT: diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 0c8ca830b113e62246d3f2629846958bdee32775..9fad9f4fe8830f02dcb9ebe02d8052ab307f97e0 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -84,13 +84,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type, case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; if (acl) { - rc = posix_acl_equiv_mode(acl, &inode->i_mode); - if (rc < 0) + rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (rc) return rc; inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); - if (rc == 0) - acl = NULL; } break; case ACL_TYPE_DEFAULT: diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 7247252ee9b1beffa3c624cb54db676a707fffe2..6e9a912d394c93c203d5ae19488ccbe598988cfe 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -833,21 +833,35 @@ repeat: mutex_lock(&kernfs_mutex); list_for_each_entry(info, &kernfs_root(kn)->supers, node) { + struct kernfs_node *parent; struct inode *inode; - struct dentry *dentry; + /* + * We want fsnotify_modify() on @kn but as the + * modifications aren't originating from userland don't + * have the matching @file available. Look up the inodes + * and generate the events manually. + */ inode = ilookup(info->sb, kn->ino); if (!inode) continue; - dentry = d_find_any_alias(inode); - if (dentry) { - fsnotify_parent(NULL, dentry, FS_MODIFY); - fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, - NULL, 0); - dput(dentry); + parent = kernfs_get_parent(kn); + if (parent) { + struct inode *p_inode; + + p_inode = ilookup(info->sb, parent->ino); + if (p_inode) { + fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, + inode, FSNOTIFY_EVENT_INODE, kn->name, 0); + iput(p_inode); + } + + kernfs_put(parent); } + fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, + kn->name, 0); iput(inode); } diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index a7f2e6e3330525c1069c524f8b56960e780f7f4b..48efe62e1302cc106cd661d8b62d199f347807c5 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, } ret = -EPROTONOSUPPORT; - if (minorversion == 0) + if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) ret = nfs4_callback_up_net(serv, net); else if (xprt->ops->bc_up) ret = xprt->ops->bc_up(serv, net); @@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, err_socks: svc_rpcb_cleanup(serv, net); err_bind: + nn->cb_users[minorversion]--; dprintk("NFS: Couldn't create callback socket: err = %d; " "net = %p\n", ret, net); return ret; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 646cdac73488e96041f2bcad33b4220b096da684..e2e857affbf2a6666be5e0818bb8b52f87e621c4 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -912,7 +912,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r if (hdr_arg.minorversion == 0) { cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident); if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) - return rpc_drop_reply; + goto out_invalidcred; } cps.minorversion = hdr_arg.minorversion; @@ -940,6 +940,10 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; + +out_invalidcred: + pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n"); + return rpc_autherr_badcred; } /* diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 5166adcfc0fb2379ed75c37b0aa09c8da7550607..7af5eeabc80e1cabb6d695a72cfb8076d82cb879 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -41,6 +41,17 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); } +static bool +nfs4_is_valid_delegation(const struct nfs_delegation *delegation, + fmode_t flags) +{ + if (delegation != NULL && (delegation->type & flags) == flags && + !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && + !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) + return true; + return false; +} + static int nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) { @@ -50,8 +61,7 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) flags &= FMODE_READ|FMODE_WRITE; rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); - if (delegation != NULL && (delegation->type & flags) == flags && - !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { + if (nfs4_is_valid_delegation(delegation, flags)) { if (mark) nfs_mark_delegation_referenced(delegation); ret = 1; @@ -892,7 +902,7 @@ bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, flags &= FMODE_READ|FMODE_WRITE; rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); - ret = (delegation != NULL && (delegation->type & flags) == flags); + ret = nfs4_is_valid_delegation(delegation, flags); if (ret) { nfs4_stateid_copy(dst, &delegation->stateid); nfs_mark_delegation_referenced(delegation); diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 02ec07973bc43003bb76b5aaeb8a950094c9ac60..fd8da630fd22edc442e6ad8c0974726759065bb7 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -374,8 +374,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task, return -EAGAIN; } - if (data->verf.committed == NFS_UNSTABLE) - pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb); + pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb); return 0; } diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 2a2e2d8ddee52e0e0473864e4e05cbd5215a37b5..54313322ee5bba934f2f1cb76cd9280c8e22ce8b 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1414,8 +1414,7 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, return -EAGAIN; } - if (data->verf.committed == NFS_UNSTABLE - && ff_layout_need_layoutcommit(data->lseg)) + if (ff_layout_need_layoutcommit(data->lseg)) pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb); return 0; diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 6b1ce9825430c7c9659b49ac9bff545b620bd7bb..7f1a0fb8c49351685d18aef2e22f5d630c892f35 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -269,6 +269,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server, task = rpc_run_task(&task_setup); if (IS_ERR(task)) return PTR_ERR(task); + rpc_put_task(task); return 0; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fc215ab4dcd51a6d879da5dbcabf3710b4dd281e..3c69299c01abe567eb2c1947ab6818c80897fa5c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7424,12 +7424,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp, status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); trace_nfs4_create_session(clp, status); + switch (status) { + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_DELAY: + case -ETIMEDOUT: + case -EACCES: + case -EAGAIN: + goto out; + }; + + clp->cl_seqid++; if (!status) { /* Verify the session's negotiated channel_attrs values */ status = nfs4_verify_channel_attrs(&args, &res); /* Increment the clientid slot sequence id */ - if (clp->cl_seqid == res.seqid) - clp->cl_seqid++; if (status) goto out; nfs4_update_session(session, &res); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index d854693a15b0e2443779986552d29d9db3f6cdc2..82dc3035ea45f088ec726eda6b18d3131957c0d0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1493,6 +1493,9 @@ restart: __func__, status); case -ENOENT: case -ENOMEM: + case -EACCES: + case -EROFS: + case -EIO: case -ESTALE: /* Open state on this file cannot be recovered */ nfs4_state_mark_recovery_failed(state, status); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index bec0384499f76dd1acc8fb13bbf34c2ee17e6a9b..5cd3568eea063a42812c03324acddfb5833975fd 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -365,6 +365,9 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, static bool pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo) { + /* Serialise LAYOUTGET/LAYOUTRETURN */ + if (atomic_read(&lo->plh_outstanding) != 0) + return false; if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) return false; lo->plh_return_iomode = 0; @@ -1530,6 +1533,7 @@ pnfs_update_layout(struct inode *ino, goto out; lookup_again: + nfs4_client_recover_expired_lease(clp); first = false; spin_lock(&ino->i_lock); lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f7ea624780a7cdc18d725e171dd3a1b47b371129..55638110cb0605f05a3fe034a8cc73918b9abec2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1200,27 +1200,6 @@ free_ol_stateid_reaplist(struct list_head *reaplist) } } -static void release_lockowner(struct nfs4_lockowner *lo) -{ - struct nfs4_client *clp = lo->lo_owner.so_client; - struct nfs4_ol_stateid *stp; - struct list_head reaplist; - - INIT_LIST_HEAD(&reaplist); - - spin_lock(&clp->cl_lock); - unhash_lockowner_locked(lo); - while (!list_empty(&lo->lo_owner.so_stateids)) { - stp = list_first_entry(&lo->lo_owner.so_stateids, - struct nfs4_ol_stateid, st_perstateowner); - WARN_ON(!unhash_lock_stateid(stp)); - put_ol_stateid_locked(stp, &reaplist); - } - spin_unlock(&clp->cl_lock); - free_ol_stateid_reaplist(&reaplist); - nfs4_put_stateowner(&lo->lo_owner); -} - static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, struct list_head *reaplist) { @@ -5952,6 +5931,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct nfs4_client *clp; + LIST_HEAD (reaplist); dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", clid->cl_boot, clid->cl_id); @@ -5982,9 +5962,23 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, nfs4_get_stateowner(sop); break; } + if (!lo) { + spin_unlock(&clp->cl_lock); + return status; + } + + unhash_lockowner_locked(lo); + while (!list_empty(&lo->lo_owner.so_stateids)) { + stp = list_first_entry(&lo->lo_owner.so_stateids, + struct nfs4_ol_stateid, + st_perstateowner); + WARN_ON(!unhash_lock_stateid(stp)); + put_ol_stateid_locked(stp, &reaplist); + } spin_unlock(&clp->cl_lock); - if (lo) - release_lockowner(lo); + free_ol_stateid_reaplist(&reaplist); + nfs4_put_stateowner(&lo->lo_owner); + return status; } diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index d2f97ecca6a5dfe6091d56da09871449574524bf..e0e5f7c3c99fe076d11dc5d907b543d5e5376671 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group, pr_debug("%s: group=%p event=%p\n", __func__, group, event); - wait_event(group->fanotify_data.access_waitq, event->response || - atomic_read(&group->fanotify_data.bypass_perm)); - - if (!event->response) { /* bypass_perm set */ - /* - * Event was canceled because group is being destroyed. Remove - * it from group's event list because we are responsible for - * freeing the permission event. - */ - fsnotify_remove_event(group, &event->fae.fse); - return 0; - } + wait_event(group->fanotify_data.access_waitq, event->response); /* userspace responded, convert to something usable */ switch (event->response) { diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 8e8e6bcd1d43d266346bac16dbb12ff8c893bae2..a64313868d3a15cefca72b5e228e798d98a43a1e 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file) #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS struct fanotify_perm_event_info *event, *next; + struct fsnotify_event *fsn_event; /* - * There may be still new events arriving in the notification queue - * but since userspace cannot use fanotify fd anymore, no event can - * enter or leave access_list by now. + * Stop new events from arriving in the notification queue. since + * userspace cannot use fanotify fd anymore, no event can enter or + * leave access_list by now either. */ - spin_lock(&group->fanotify_data.access_lock); - - atomic_inc(&group->fanotify_data.bypass_perm); + fsnotify_group_stop_queueing(group); + /* + * Process all permission events on access_list and notification queue + * and simulate reply from userspace. + */ + spin_lock(&group->fanotify_data.access_lock); list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, fae.fse.list) { pr_debug("%s: found group=%p event=%p\n", __func__, group, @@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file) spin_unlock(&group->fanotify_data.access_lock); /* - * Since bypass_perm is set, newly queued events will not wait for - * access response. Wake up the already sleeping ones now. - * synchronize_srcu() in fsnotify_destroy_group() will wait for all - * processes sleeping in fanotify_handle_event() waiting for access - * response and thus also for all permission events to be freed. + * Destroy all non-permission events. For permission events just + * dequeue them and set the response. They will be freed once the + * response is consumed and fanotify_get_response() returns. */ + mutex_lock(&group->notification_mutex); + while (!fsnotify_notify_queue_is_empty(group)) { + fsn_event = fsnotify_remove_first_event(group); + if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) + fsnotify_destroy_event(group, fsn_event); + else + FANOTIFY_PE(fsn_event)->response = FAN_ALLOW; + } + mutex_unlock(&group->notification_mutex); + + /* Response for all permission events it set, wakeup waiters */ wake_up(&group->fanotify_data.access_waitq); #endif @@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) spin_lock_init(&group->fanotify_data.access_lock); init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); - atomic_set(&group->fanotify_data.bypass_perm, 0); #endif switch (flags & FAN_ALL_CLASS_BITS) { case FAN_CLASS_NOTIF: diff --git a/fs/notify/group.c b/fs/notify/group.c index d16b62cb28544a147183c3780e3a61ffa246c8bb..18eb30c6bd8fcc6dddc130941fa43182295e6907 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -39,6 +39,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group) kfree(group); } +/* + * Stop queueing new events for this group. Once this function returns + * fsnotify_add_event() will not add any new events to the group's queue. + */ +void fsnotify_group_stop_queueing(struct fsnotify_group *group) +{ + mutex_lock(&group->notification_mutex); + group->shutdown = true; + mutex_unlock(&group->notification_mutex); +} + /* * Trying to get rid of a group. Remove all marks, flush all events and release * the group reference. @@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group) */ void fsnotify_destroy_group(struct fsnotify_group *group) { + /* + * Stop queueing new events. The code below is careful enough to not + * require this but fanotify needs to stop queuing events even before + * fsnotify_destroy_group() is called and this makes the other callers + * of fsnotify_destroy_group() to see the same behavior. + */ + fsnotify_group_stop_queueing(group); + /* clear all inode marks for this group */ fsnotify_clear_marks_by_group(group); diff --git a/fs/notify/notification.c b/fs/notify/notification.c index a95d8e037aebe24ba36421861d3abaad6f7dfc89..e455e83ceeebc9ea5cb0b3166e10bd505cec43f1 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group, * Add an event to the group notification queue. The group can later pull this * event off the queue to deal with. The function returns 0 if the event was * added to the queue, 1 if the event was merged with some other queued event, - * 2 if the queue of events has overflown. + * 2 if the event was not queued - either the queue of events has overflown + * or the group is shutting down. */ int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, @@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group, mutex_lock(&group->notification_mutex); + if (group->shutdown) { + mutex_unlock(&group->notification_mutex); + return 2; + } + if (group->q_len >= group->max_events) { ret = 2; /* Queue overflow event only if it isn't already queued */ @@ -125,21 +131,6 @@ queue: return ret; } -/* - * Remove @event from group's notification queue. It is the responsibility of - * the caller to destroy the event. - */ -void fsnotify_remove_event(struct fsnotify_group *group, - struct fsnotify_event *event) -{ - mutex_lock(&group->notification_mutex); - if (!list_empty(&event->list)) { - list_del_init(&event->list); - group->q_len--; - } - mutex_unlock(&group->notification_mutex); -} - /* * Remove and return the first event from the notification list. It is the * responsibility of the caller to destroy the obtained event diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 2162434728c022ab4651904b778c21d958ca802d..164307b994052cb658b08cb8c28da524dedfe644 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -241,13 +241,11 @@ int ocfs2_set_acl(handle_t *handle, case ACL_TYPE_ACCESS: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - umode_t mode = inode->i_mode; - ret = posix_acl_equiv_mode(acl, &mode); - if (ret < 0) - return ret; + umode_t mode; - if (ret == 0) - acl = NULL; + ret = posix_acl_update_mode(inode, &mode, &acl); + if (ret) + return ret; ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index f90931335c6b28af89cc0537b8d3fec48a089300..2e11658676eb2145fbd7de33e25872c0778532cb 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; - u8 old_owner = res->owner; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); @@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; - lock->convert_pending = 0; /* if it failed, move it back to granted queue. * if master returns DLM_NORMAL and then down before sending ast, * it may have already been moved to granted queue, reset to @@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); - } else if ((res->state & DLM_LOCK_RES_RECOVERING) || - (old_owner != res->owner)) { - mlog(0, "res %.*s is in recovering or has been recovered.\n", - res->lockname.len, res->lockname.name); + } else if (!lock->convert_pending) { + mlog(0, "%s: res %.*s, owner died and lock has been moved back " + "to granted list, retry convert.\n", + dlm->name, res->lockname.len, res->lockname.name); status = DLM_RECOVERING; } + + lock->convert_pending = 0; bail: spin_unlock(&res->spinlock); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 77d30cbd944d7c7d834456469677032fdb3d55a1..56dd3957cc91c058e41dea9b9936f4dcf871d623 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, u64 start, u64 len) { int ret = 0; - u64 tmpend, end = start + len; + u64 tmpend = 0; + u64 end = start + len; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); unsigned int csize = osb->s_clustersize; handle_t *handle; @@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, } /* - * We want to get the byte offset of the end of the 1st cluster. + * If start is on a cluster boundary and end is somewhere in another + * cluster, we have not COWed the cluster starting at start, unless + * end is also within the same cluster. So, in this case, we skip this + * first call to ocfs2_zero_range_for_truncate() truncate and move on + * to the next one. */ - tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); - if (tmpend > end) - tmpend = end; + if ((start & (csize - 1)) != 0) { + /* + * We want to get the byte offset of the end of the 1st + * cluster. + */ + tmpend = (u64)osb->s_clustersize + + (start & ~(osb->s_clustersize - 1)); + if (tmpend > end) + tmpend = end; - trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, - (unsigned long long)tmpend); + trace_ocfs2_zero_partial_clusters_range1( + (unsigned long long)start, + (unsigned long long)tmpend); - ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); - if (ret) - mlog_errno(ret); + ret = ocfs2_zero_range_for_truncate(inode, handle, start, + tmpend); + if (ret) + mlog_errno(ret); + } if (tmpend < end) { /* diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 9e52609cd683d9ca5706aff277a158b7b8e03d18..63a0d0ba36de11ef0a606c2d098663b750b2e282 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -25,6 +25,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; int uninitialized_var(error); + size_t slen; if (!old->d_inode->i_op->getxattr || !new->d_inode->i_op->getxattr) @@ -47,7 +48,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) goto out; } - for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { + for (name = buf; list_size; name += slen) { + slen = strnlen(name, list_size) + 1; + + /* underlying fs providing us with an broken xattr list? */ + if (WARN_ON(slen > list_size)) { + error = -EIO; + break; + } + list_size -= slen; + if (ovl_is_private_xattr(name)) continue; retry: @@ -129,6 +139,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) len -= bytes; } + if (!error) + error = vfs_fsync(new_file, 0); fput(new_file); out_fput: fput(old_file); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index ba5ef733951fdc42ed55d7847d749da0b33e9de9..327177df03a5ca00b1f3839034622eca3eb1aacf 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "overlayfs.h" void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) @@ -35,8 +36,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) { struct dentry *temp; char name[20]; + static atomic_t temp_id = ATOMIC_INIT(0); - snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry); + /* counter is allowed to wrap, since temp dentries are ephemeral */ + snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id)); temp = lookup_one_len(name, workdir, strlen(name)); if (!IS_ERR(temp) && temp->d_inode) { diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 34bd1bd354e63c3812c1ecbbf84abdc49a06e63b..a60d3cc5b55d9190539d07e6869206c986d8a6de 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -592,6 +592,37 @@ no_mem: } EXPORT_SYMBOL_GPL(posix_acl_create); +/** + * posix_acl_update_mode - update mode in set_acl + * + * Update the file mode when setting an ACL: compute the new file permission + * bits based on the ACL. In addition, if the ACL is equivalent to the new + * file mode, set *acl to NULL to indicate that no ACL should be set. + * + * As with chmod, clear the setgit bit if the caller is not in the owning group + * or capable of CAP_FSETID (see inode_change_ok). + * + * Called from set_acl inode operations. + */ +int posix_acl_update_mode(struct inode *inode, umode_t *mode_p, + struct posix_acl **acl) +{ + umode_t mode = inode->i_mode; + int error; + + error = posix_acl_equiv_mode(*acl, &mode); + if (error < 0) + return error; + if (error == 0) + *acl = NULL; + if (!in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) + mode &= ~S_ISGID; + *mode_p = mode; + return 0; +} +EXPORT_SYMBOL(posix_acl_update_mode); + /* * Fix up the uids and gids in posix acl extended attributes in place. */ diff --git a/fs/proc/base.c b/fs/proc/base.c index 67c6dd28d59ecaf039d3255250c3ecb4c91015d6..1f5f654983ec1511bc8efd6d282ecbef700f4cd3 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1748,18 +1748,13 @@ static const struct file_operations proc_pid_set_comm_operations = { static int proc_exe_link(struct dentry *dentry, struct path *exe_path) { struct task_struct *task; - struct mm_struct *mm; struct file *exe_file; task = get_proc_task(d_inode(dentry)); if (!task) return -ENOENT; - mm = get_task_mm(task); + exe_file = get_task_exe_file(task); put_task_struct(task); - if (!mm) - return -ENOENT; - exe_file = get_mm_exe_file(mm); - mmput(mm); if (exe_file) { *exe_path = exe_file->f_path; path_get(&exe_file->f_path); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 92e6726f6e3732573bd9a64f3b313cc3508ce519..21f198aa0961967f724d8601efb139fe79f6c34b 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) static ssize_t read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) { + char *buf = file->private_data; ssize_t acc = 0; size_t size, tsz; size_t elf_buflen; @@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) if (clear_user(buffer, tsz)) return -EFAULT; } else if (is_vmalloc_or_module_addr((void *)start)) { - char * elf_buf; - - elf_buf = kzalloc(tsz, GFP_KERNEL); - if (!elf_buf) - return -ENOMEM; - vread(elf_buf, (char *)start, tsz); + vread(buf, (char *)start, tsz); /* we have to zero-fill user buffer even if no read */ - if (copy_to_user(buffer, elf_buf, tsz)) { - kfree(elf_buf); + if (copy_to_user(buffer, buf, tsz)) return -EFAULT; - } - kfree(elf_buf); } else { if (kern_addr_valid(start)) { unsigned long n; - n = copy_to_user(buffer, (char *)start, tsz); + /* + * Using bounce buffer to bypass the + * hardened user copy kernel text checks. + */ + memcpy(buf, (char *) start, tsz); + n = copy_to_user(buffer, buf, tsz); /* * We cannot distinguish between fault on source * and fault on destination. When this happens @@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp) { if (!capable(CAP_SYS_RAWIO)) return -EPERM; + + filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!filp->private_data) + return -ENOMEM; + if (kcore_need_update) kcore_update_ram(); if (i_size_read(inode) != proc_root_kcore->size) { @@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp) return 0; } +static int release_kcore(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} static const struct file_operations proc_kcore_operations = { .read = read_kcore, .open = open_kcore, + .release = release_kcore, .llseek = default_llseek, }; diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index d8c439d813ce1bd0a415fd09a8407c6c6192d49b..ac6c78fe19cfa85ed526eb23506444d3a519c29a 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -178,7 +178,6 @@ static loff_t pstore_file_llseek(struct file *file, loff_t off, int whence) } static const struct file_operations pstore_file_operations = { - .owner = THIS_MODULE, .open = pstore_file_open, .read = pstore_file_read, .llseek = pstore_file_llseek, diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 5b10c2b4146c4ec29137a769de12070bb5714c1a..8d1e5e2db6a1a772daeb6d923a25a189c5c3b6a5 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -396,13 +396,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt) { int i; - cxt->max_dump_cnt = 0; if (!cxt->przs) return; - for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++) + for (i = 0; i < cxt->max_dump_cnt; i++) persistent_ram_free(cxt->przs[i]); + kfree(cxt->przs); + cxt->max_dump_cnt = 0; } static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, @@ -427,7 +428,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, GFP_KERNEL); if (!cxt->przs) { dev_err(dev, "failed to initialize a prz array for dumps\n"); - goto fail_prz; + goto fail_mem; } for (i = 0; i < cxt->max_dump_cnt; i++) { @@ -438,6 +439,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, err = PTR_ERR(cxt->przs[i]); dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", cxt->record_size, (unsigned long long)*paddr, err); + + while (i > 0) { + i--; + persistent_ram_free(cxt->przs[i]); + } goto fail_prz; } *paddr += cxt->record_size; @@ -445,7 +451,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, return 0; fail_prz: - ramoops_free_przs(cxt); + kfree(cxt->przs); +fail_mem: + cxt->max_dump_cnt = 0; return err; } @@ -707,7 +715,6 @@ static int ramoops_remove(struct platform_device *pdev) struct ramoops_context *cxt = &oops_cxt; pstore_unregister(&cxt->pstore); - cxt->max_dump_cnt = 0; kfree(cxt->pstore.buf); cxt->pstore.bufsize = 0; diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index aa9afe573155d2947051f6dec48c9452a37c30f7..3975deec02f8ca9202c42662cba69f9783605158 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -48,43 +48,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz) return atomic_read(&prz->buffer->start); } -/* increase and wrap the start pointer, returning the old value */ -static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a) -{ - int old; - int new; - - do { - old = atomic_read(&prz->buffer->start); - new = old + a; - while (unlikely(new >= prz->buffer_size)) - new -= prz->buffer_size; - } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old); - - return old; -} - -/* increase the size counter until it hits the max size */ -static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a) -{ - size_t old; - size_t new; - - if (atomic_read(&prz->buffer->size) == prz->buffer_size) - return; - - do { - old = atomic_read(&prz->buffer->size); - new = old + a; - if (new > prz->buffer_size) - new = prz->buffer_size; - } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); -} - static DEFINE_RAW_SPINLOCK(buffer_lock); /* increase and wrap the start pointer, returning the old value */ -static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) +static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) { int old; int new; @@ -104,7 +71,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) } /* increase the size counter until it hits the max size */ -static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) +static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) { size_t old; size_t new; @@ -125,9 +92,6 @@ exit: raw_spin_unlock_irqrestore(&buffer_lock, flags); } -static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic; -static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic; - static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, uint8_t *data, size_t len, uint8_t *ecc) { @@ -300,7 +264,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz, const void *s, unsigned int start, unsigned int count) { struct persistent_ram_buffer *buffer = prz->buffer; - memcpy(buffer->data + start, s, count); + memcpy_toio(buffer->data + start, s, count); persistent_ram_update_ecc(prz, start, count); } @@ -333,8 +297,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz) } prz->old_log_size = size; - memcpy(prz->old_log, &buffer->data[start], size - start); - memcpy(prz->old_log + size - start, &buffer->data[0], start); + memcpy_fromio(prz->old_log, &buffer->data[start], size - start); + memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); } int notrace persistent_ram_write(struct persistent_ram_zone *prz, @@ -469,9 +433,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, return NULL; } - buffer_start_add = buffer_start_add_locked; - buffer_size_add = buffer_size_add_locked; - if (memtype) va = ioremap(start, size); else diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 96a1bcf33db4435098baa153f807a71274543aaf..8f5ccdf81c252f9d9eddc24ed301f9bacac9901b 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -260,10 +260,10 @@ const struct file_operations reiserfs_file_operations = { const struct inode_operations reiserfs_file_inode_operations = { .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c index b751eea32e20733f496a16a52c37cd0f8c2d06a4..5db6f45b3fed64cf58034dea3efc3a09a0f4c497 100644 --- a/fs/reiserfs/ibalance.c +++ b/fs/reiserfs/ibalance.c @@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb, insert_ptr); } - memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); insert_ptr[0] = new_insert_ptr; + if (new_insert_ptr) + memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); return order; } diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 47f96988fdd478dbc6ce325232d38754258864cc..3ebc70167e416f649b4e9ff5110fb24c857719fd 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -1649,10 +1649,10 @@ const struct inode_operations reiserfs_dir_inode_operations = { .mknod = reiserfs_mknod, .rename = reiserfs_rename, .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, @@ -1667,10 +1667,10 @@ const struct inode_operations reiserfs_symlink_inode_operations = { .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, }; @@ -1679,10 +1679,10 @@ const struct inode_operations reiserfs_symlink_inode_operations = { */ const struct inode_operations reiserfs_special_inode_operations = { .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 4a62fe8cc3bff619516fbe15d62a8cfaa1aaa581..f9f3be50081a3c719a589f92479d274be82e9772 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -190,7 +190,15 @@ static int remove_save_link_only(struct super_block *s, static int reiserfs_quota_on_mount(struct super_block *, int); #endif -/* look for uncompleted unlinks and truncates and complete them */ +/* + * Look for uncompleted unlinks and truncates and complete them + * + * Called with superblock write locked. If quotas are enabled, we have to + * release/retake lest we call dquot_quota_on_mount(), proceed to + * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per + * cpu worklets to complete flush_async_commits() that in turn wait for the + * superblock write lock. + */ static int finish_unfinished(struct super_block *s) { INITIALIZE_PATH(path); @@ -237,7 +245,9 @@ static int finish_unfinished(struct super_block *s) quota_enabled[i] = 0; continue; } + reiserfs_write_unlock(s); ret = reiserfs_quota_on_mount(s, i); + reiserfs_write_lock(s); if (ret < 0) reiserfs_warning(s, "reiserfs-2500", "cannot turn on journaled " diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 66b26fdfff8d6de51082b48b0f9a61fba9567466..a8dbc93e45eb3757cbd3bc7d886802af37d41c22 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -763,60 +763,6 @@ find_xattr_handler_prefix(const struct xattr_handler **handlers, return xah; } - -/* - * Inode operation getxattr() - */ -ssize_t -reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer, - size_t size) -{ - const struct xattr_handler *handler; - - handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - - if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) - return -EOPNOTSUPP; - - return handler->get(handler, dentry, name, buffer, size); -} - -/* - * Inode operation setxattr() - * - * d_inode(dentry)->i_mutex down - */ -int -reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) -{ - const struct xattr_handler *handler; - - handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - - if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) - return -EOPNOTSUPP; - - return handler->set(handler, dentry, name, value, size, flags); -} - -/* - * Inode operation removexattr() - * - * d_inode(dentry)->i_mutex down - */ -int reiserfs_removexattr(struct dentry *dentry, const char *name) -{ - const struct xattr_handler *handler; - - handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - - if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) - return -EOPNOTSUPP; - - return handler->set(handler, dentry, name, NULL, 0, XATTR_REPLACE); -} - struct listxattr_buf { struct dir_context ctx; size_t size; diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index 15dde6262c00e3d93b765f79198d93bfe79c3646..613ff5aef94ea014caa87d52ae8c03d9c88e5bdd 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -2,6 +2,7 @@ #include #include #include +#include struct inode; struct dentry; @@ -18,12 +19,7 @@ int reiserfs_permission(struct inode *inode, int mask); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) -ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, - void *buffer, size_t size); -int reiserfs_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); -int reiserfs_removexattr(struct dentry *dentry, const char *name); int reiserfs_xattr_get(struct inode *, const char *, void *, size_t); int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); @@ -92,10 +88,7 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode) #else -#define reiserfs_getxattr NULL -#define reiserfs_setxattr NULL #define reiserfs_listxattr NULL -#define reiserfs_removexattr NULL static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 4b34b9dc03dda9fffd8da5d3ab7221bd9ab139b7..9b1824f355016a9ab070d716ff40d75f55417837 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -246,13 +246,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - if (error == 0) - acl = NULL; - } } break; case ACL_TYPE_DEFAULT: diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index ac659af431aec83d5ba7b85e7b69c1ac49e7b4c0..60de069225ba33a087f02de406eda2725a159e8e 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -12,26 +12,24 @@ static int security_get(const struct xattr_handler *handler, struct dentry *dentry, const char *name, void *buffer, size_t size) { - if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) - return -EINVAL; - if (IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); + return reiserfs_xattr_get(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size); } static int security_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags) { - if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) - return -EINVAL; - if (IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size, flags); } static size_t security_list(const struct xattr_handler *handler, diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c index a338adf1b8b4816c19ca84557a0142179f93b9a7..ebba1ebf28addb36a1ee3c3d30feab5c7199cc27 100644 --- a/fs/reiserfs/xattr_trusted.c +++ b/fs/reiserfs/xattr_trusted.c @@ -11,26 +11,24 @@ static int trusted_get(const struct xattr_handler *handler, struct dentry *dentry, const char *name, void *buffer, size_t size) { - if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX)) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); + return reiserfs_xattr_get(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size); } static int trusted_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags) { - if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX)) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size, flags); } static size_t trusted_list(const struct xattr_handler *handler, diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c index 39c9667191c5db5b3c930e538d81799b96bea6fb..6ac8a8c8bd9cd1052055c1269006c4a62ccbc9cc 100644 --- a/fs/reiserfs/xattr_user.c +++ b/fs/reiserfs/xattr_user.c @@ -10,24 +10,22 @@ static int user_get(const struct xattr_handler *handler, struct dentry *dentry, const char *name, void *buffer, size_t size) { - - if (strlen(name) < sizeof(XATTR_USER_PREFIX)) - return -EINVAL; if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; - return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); + return reiserfs_xattr_get(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size); } static int user_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags) { - if (strlen(name) < sizeof(XATTR_USER_PREFIX)) - return -EINVAL; - if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; - return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size, flags); } static size_t user_list(const struct xattr_handler *handler, diff --git a/fs/super.c b/fs/super.c index 8d99a7b948ff087380ca2b2d5988885c22360553..b938b14f6041b97a8f8b2681016dcb08a8438ce6 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1326,8 +1326,8 @@ int freeze_super(struct super_block *sb) } } /* - * This is just for debugging purposes so that fs can warn if it - * sees write activity when frozen is set to SB_FREEZE_COMPLETE. + * For debugging purposes so that fs can warn if it sees write activity + * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). */ sb->s_writers.frozen = SB_FREEZE_COMPLETE; up_write(&sb->s_umount); @@ -1346,7 +1346,7 @@ int thaw_super(struct super_block *sb) int error; down_write(&sb->s_umount); - if (sb->s_writers.frozen == SB_UNFROZEN) { + if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { up_write(&sb->s_umount); return -EINVAL; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index e49bd2808bf3e397b9b499fe4d6036ad7600a71b..f5d5ee43ae6eb775a59f0d2deee5ba907531f45a 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type) */ static int ubifs_readdir(struct file *file, struct dir_context *ctx) { - int err; + int err = 0; struct qstr nm; union ubifs_key key; struct ubifs_dent_node *dent; @@ -452,14 +452,20 @@ out: kfree(file->private_data); file->private_data = NULL; - if (err != -ENOENT) { + if (err != -ENOENT) ubifs_err(c, "cannot find next direntry, error %d", err); - return err; - } + else + /* + * -ENOENT is a non-fatal error in this context, the TNC uses + * it to indicate that the cursor moved past the current directory + * and readdir() has to stop. + */ + err = 0; + /* 2 is a special value indicating that there are no more direntries */ ctx->pos = 2; - return 0; + return err; } /* Free saved readdir() state when the directory is closed */ diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index e53292d0c21bcd9d57c3f219d3f7cc31d5eb836e..de6f82d4eda28a82e08b227fcd84facf13664c1e 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -173,6 +173,7 @@ out_cancel: host_ui->xattr_cnt -= 1; host_ui->xattr_size -= CALC_DENT_SIZE(nm->len); host_ui->xattr_size -= CALC_XATTR_BYTES(size); + host_ui->xattr_names -= nm->len; mutex_unlock(&host_ui->ui_mutex); out_free: make_bad_inode(inode); @@ -533,6 +534,7 @@ out_cancel: host_ui->xattr_cnt += 1; host_ui->xattr_size += CALC_DENT_SIZE(nm->len); host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); + host_ui->xattr_names += nm->len; mutex_unlock(&host_ui->ui_mutex); ubifs_release_budget(c, &req); make_bad_inode(inode); diff --git a/fs/utimes.c b/fs/utimes.c index aa138d64560a6a3c2133bc70d57e367cc8c1476d..cb771c30d10231186d405b63155d02abadb0ce7e 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -87,20 +87,7 @@ static int utimes_common(struct path *path, struct timespec *times) */ newattrs.ia_valid |= ATTR_TIMES_SET; } else { - /* - * If times is NULL (or both times are UTIME_NOW), - * then we need to check permissions, because - * inode_change_ok() won't do it. - */ - error = -EACCES; - if (IS_IMMUTABLE(inode)) - goto mnt_drop_write_and_out; - - if (!inode_owner_or_capable(inode)) { - error = inode_permission(inode, MAY_WRITE); - if (error) - goto mnt_drop_write_and_out; - } + newattrs.ia_valid |= ATTR_TOUCH; } retry_deleg: mutex_lock(&inode->i_mutex); @@ -112,7 +99,6 @@ retry_deleg: goto retry_deleg; } -mnt_drop_write_and_out: mnt_drop_write(path->mnt); out: return error; diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c index 3cc3cf7674746f279fcb0acf4eae27d49d089814..ac9a003dd29acf80d7c766788cb40d1f98d9132c 100644 --- a/fs/xfs/libxfs/xfs_dquot_buf.c +++ b/fs/xfs/libxfs/xfs_dquot_buf.c @@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc( if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else - ndquots = xfs_calc_dquots_per_chunk( - XFS_BB_TO_FSB(mp, bp->b_length)); + ndquots = xfs_calc_dquots_per_chunk(bp->b_length); for (i = 0; i < ndquots; i++, d++) { if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 6bb470fbb8e8ff3567abfb23b774607c7eab2dda..c5101a3295d83253599861370736535b44310963 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -288,16 +288,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) return error; if (type == ACL_TYPE_ACCESS) { - umode_t mode = inode->i_mode; - error = posix_acl_equiv_mode(acl, &mode); - - if (error <= 0) { - acl = NULL; - - if (error < 0) - return error; - } + umode_t mode; + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + return error; error = xfs_set_mode(inode, mode); if (error) return error; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 39090fc56f0943dddffc4faa12c810ceb7d25c58..eb1b8c8acfcbe25c8f11ee7b36f2d0ed5f9f0558 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1535,7 +1535,7 @@ xfs_wait_buftarg( * ensure here that all reference counts have been dropped before we * start walking the LRU list. */ - drain_workqueue(btp->bt_mount->m_buf_workqueue); + flush_workqueue(btp->bt_mount->m_buf_workqueue); /* loop until there is nothing left on the lru list. */ while (list_lru_count(&btp->bt_lru)) { diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 1bfa602958f2a2f7beb16fab8f98263d198c652b..32901d11f8c46fd3746e80a1c485d3c131e90fac 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -230,14 +230,18 @@ extern int __put_user_bad(void) __attribute__((noreturn)); might_fault(); \ access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ __get_user((x), (__typeof__(*(ptr)) *)__p) : \ - -EFAULT; \ + ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ }) #ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { - size = __copy_from_user(x, ptr, size); - return size ? -EFAULT : size; + size_t n = __copy_from_user(x, ptr, size); + if (unlikely(n)) { + memset(x + (size - n), 0, n); + return -EFAULT; + } + return 0; } #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) @@ -257,11 +261,13 @@ extern int __get_user_bad(void) __attribute__((noreturn)); static inline long copy_from_user(void *to, const void __user * from, unsigned long n) { + unsigned long res = n; might_fault(); - if (access_ok(VERIFY_READ, from, n)) - return __copy_from_user(to, from, n); - else - return n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } static inline long copy_to_user(void __user *to, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f608dd5e23744a5054c57fd84a7135458c7eca62..36c7bacaf7f1101788d5aa40e546fb79682f2d31 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -247,6 +247,14 @@ . = ALIGN(align); \ *(.data..init_task) +/* + * Allow architectures to handle ro_after_init data on their + * own by defining an empty RO_AFTER_INIT_DATA. + */ +#ifndef RO_AFTER_INIT_DATA +#define RO_AFTER_INIT_DATA *(.data..ro_after_init) +#endif + /* * Read only Data */ @@ -255,7 +263,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ - *(.data..ro_after_init) /* Read only after init */ \ + RO_AFTER_INIT_DATA /* Read only after init */ \ *(__vermagic) /* Kernel version magic */ \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 9916d0e4eff505f18cf5e30ad52dbe2e37b3fa3b..25d0914481a26d04a001a91216df6fd68c52e87c 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -23,6 +23,12 @@ #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) +#define CNTHCTL_EL1PCTEN (1 << 0) +#define CNTHCTL_EL1PCEN (1 << 1) +#define CNTHCTL_EVNTEN (1 << 2) +#define CNTHCTL_EVNTDIR (1 << 3) +#define CNTHCTL_EVNTI (0xF << 4) + enum arch_timer_reg { ARCH_TIMER_REG_CTRL, ARCH_TIMER_REG_TVAL, diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 0000000000000000000000000000000000000000..2a61c9bbab8faf411e0696765e0511c6500821a8 --- /dev/null +++ b/include/crypto/ghash.h @@ -0,0 +1,23 @@ +/* + * Common values for GHASH algorithms + */ + +#ifndef __CRYPTO_GHASH_H__ +#define __CRYPTO_GHASH_H__ + +#include +#include + +#define GHASH_BLOCK_SIZE 16 +#define GHASH_DIGEST_SIZE 16 + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[GHASH_BLOCK_SIZE]; + u32 bytes; +}; + +#endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0a271ca1f7c7ec12199b4c9781bb5aa5ded75dae..a31976c860f62ebf14b700663091e1b5a7b476a4 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1029,7 +1029,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files, #endif extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags); + struct drm_gem_object *obj, + int flags); extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d2f41477f8ae77600a8683890b3615766b9a3701..13a3d537811b9f7d12b3fa892b8312330f89d70f 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -279,6 +279,12 @@ struct vgic_v2_cpu_if { u32 vgic_lr[VGIC_V2_MAX_LRS]; }; +/* + * LRs are stored in reverse order in memory. make sure we index them + * correctly. + */ +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + struct vgic_v3_cpu_if { #ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 0000000000000000000000000000000000000000..0989e6b5c64c1641e0f0eb932dedf85163c964c4 --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include +#include + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. + */ +asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +/** + * arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. + */ +asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + + +static inline unsigned long __invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static inline unsigned long __invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 5261751f6bd4dec116516691cbc40a6763a6d12b..5f5270941ba02ae78a81129dfff17d647ef3099c 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -32,6 +32,7 @@ enum can_mode { * CAN common private data */ struct can_priv { + struct net_device *dev; struct can_device_stats can_stats; struct can_bittiming bittiming, data_bittiming; @@ -47,7 +48,7 @@ struct can_priv { u32 ctrlmode_static; /* static enabled options for driver/hardware */ int restart_ms; - struct timer_list restart_timer; + struct delayed_work restart_work; int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 788c7c49a673cab6beab1f19fac0cc60fed795de..4cd5c95d1ca0ccbf59312d4ef3d92dae752b1c38 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -66,7 +66,6 @@ enum { /* cgroup_root->flags */ enum { - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ }; @@ -431,7 +430,6 @@ struct cgroup_subsys { void (*css_reset)(struct cgroup_subsys_state *css); void (*css_e_css_changed)(struct cgroup_subsys_state *css); - int (*allow_attach)(struct cgroup_taskset *tset); int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 70358b9f5a7ab809be0dd1e6d5310c7588166ba7..cb91b44f5f7877d5899403b26ca1d9231c1ac9b6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -528,16 +528,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } -/* - * Default Android check for whether the current process is allowed to move a - * task across cgroups, either because CAP_SYS_NICE is set or because the uid - * of the calling process is the same as the moved task or because we are - * running as root. - * Returns 0 if this is allowed, or -EACCES otherwise. - */ -int subsys_cgroup_allow_attach(struct cgroup_taskset *tset); - - #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -562,10 +552,6 @@ static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } -static inline int subsys_cgroup_allow_attach(void *tset) -{ - return -EINVAL; -} #endif /* !CONFIG_CGROUPS */ #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index eeae401a2412e5e8d32381805be1c13798b7a8fb..287e698c28deec27e8b9245de540b1b69914b9b7 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -251,7 +251,9 @@ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ -#if GCC_VERSION >= 50000 +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h index 0a83a1e648b088c1acc8a7304eb6be71be146026..4db00b02ca3f1021ab64466c1000cf187a3de5f9 100644 --- a/include/linux/devfreq-event.h +++ b/include/linux/devfreq-event.h @@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) return -EINVAL; } -static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) -{ - return ERR_PTR(-EINVAL); -} - static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( struct device *dev, int index) { diff --git a/include/linux/fs.h b/include/linux/fs.h index db16b3fd48c0996703da2431f044497bdc86f5dd..e5761fb6341db2a2da3fda2200ec5ff18a1edcf9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -233,6 +233,7 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); #define ATTR_KILL_PRIV (1 << 14) #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ #define ATTR_TIMES_SET (1 << 16) +#define ATTR_TOUCH (1 << 17) /* * Whiteout is represented by a char device. The following constants define the diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 533c4408529a19819570c5dc40d917aa10ac2d26..850d8822e8ff8be3fb53b57801c87c90f73b4589 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -148,6 +148,7 @@ struct fsnotify_group { #define FS_PRIO_1 1 /* fanotify content based access control */ #define FS_PRIO_2 2 /* fanotify pre-content access */ unsigned int priority; + bool shutdown; /* group is being shut down, don't queue more events */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ @@ -179,7 +180,6 @@ struct fsnotify_group { spinlock_t access_lock; struct list_head access_list; wait_queue_head_t access_waitq; - atomic_t bypass_perm; #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ int f_flags; unsigned int max_marks; @@ -308,6 +308,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); +/* group destruction begins, stop queuing new events */ +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); /* fasync handler function */ @@ -320,8 +322,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); -/* Remove passed event from groups notification queue */ -extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event); /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 7c27fa1030e873d1841f3e291e9fc806992967e3..795852dc343407520b0a273ac7cde8891d5b310b 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh); + const struct nlmsghdr *unlh, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2d9b650047a5b96582f66dff2b2581cce97c2b65..d49e26c6cdc7b5e48e41591f7c73e74d200441a8 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -429,6 +429,7 @@ struct intel_iommu { struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ struct idr pasid_idr; + u32 pasid_max; #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h index ab5a1dc6753e281c71274cac7ca0a094bb440fe8..029e11f9919bbbbd0958a353b16258e796516830 100644 --- a/include/linux/io-pgtable-fast.h +++ b/include/linux/io-pgtable-fast.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,7 +17,7 @@ typedef u64 av8l_fast_iopte; -#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12)) +#define iopte_pmd_offset(pmds, base, iova) (pmds + ((iova - base) >> 12)) int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size, int prot); @@ -36,7 +36,8 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size); */ #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa -void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync); +void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base, + u64 end, bool skip_sync); void av8l_register_notify(struct notifier_block *nb); #else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ @@ -44,6 +45,8 @@ void av8l_register_notify(struct notifier_block *nb); #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0 static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, + u64 base, + u64 end, bool skip_sync) { } diff --git a/include/linux/irq.h b/include/linux/irq.h index b0bcc1561d3d7b58c5843f8be32476c5b71a29a0..f653225a896d613ed772ec307d8e4ea15e250f1f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -925,6 +925,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } #endif +/* + * The irqsave variants are for usage in non interrupt code. Do not use + * them in irq_chip callbacks. Use irq_gc_lock() instead. + */ +#define irq_gc_lock_irqsave(gc, flags) \ + raw_spin_lock_irqsave(&(gc)->lock, flags) + +#define irq_gc_unlock_irqrestore(gc, flags) \ + raw_spin_unlock_irqrestore(&(gc)->lock, flags) + static inline void irq_reg_writel(struct irq_chip_generic *gc, u32 val, int reg_offset) { diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index e98425058f20dc6483a04ca7327409c6743d5d23..54048f336a1fa06c838c9493408efec6e35c4136 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -218,7 +218,7 @@ #define GITS_BASER_TYPE_SHIFT (56) #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) #define GITS_BASER_ENTRY_SIZE_SHIFT (48) -#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) #define GITS_BASER_NonShareable (0UL << 10) #define GITS_BASER_InnerShareable (1UL << 10) #define GITS_BASER_OuterShareable (2UL << 10) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 924853d33a13e0a312f70ac939bfe20a9814c993..50220cab738c1d816f7f4117c2ae19baeb2561da 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -202,26 +202,26 @@ extern int _cond_resched(void); /** * abs - return absolute value of an argument - * @x: the value. If it is unsigned type, it is converted to signed type first - * (s64, long or int depending on its size). + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. * - * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, - * otherwise it is signed long. + * Return: an absolute value of x. */ -#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ - s64 __x = (x); \ - (__x < 0) ? -__x : __x; \ - }), ({ \ - long ret; \ - if (sizeof(x) == sizeof(long)) { \ - long __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } else { \ - int __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } \ - ret; \ - })) +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) /** * reciprocal_scale - "scale" a value into range [0, ep_ro) @@ -356,6 +356,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); +int __must_check kstrtobool(const char *s, bool *res); int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); @@ -367,6 +368,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) { diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index f09648d14694334b21371120e0f70f9daef50a73..782d4e814e2132e4f4673255b22bec8fdcc27bba 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -1,6 +1,8 @@ #ifndef NVM_H #define NVM_H +#include + enum { NVM_IO_OK = 0, NVM_IO_REQUEUE = 1, @@ -11,10 +13,71 @@ enum { NVM_IOTYPE_GC = 1, }; +#define NVM_BLK_BITS (16) +#define NVM_PG_BITS (16) +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (8) +#define NVM_LUN_BITS (8) +#define NVM_CH_BITS (8) + +struct ppa_addr { + /* Generic structure for all addresses */ + union { + struct { + u64 blk : NVM_BLK_BITS; + u64 pg : NVM_PG_BITS; + u64 sec : NVM_SEC_BITS; + u64 pl : NVM_PL_BITS; + u64 lun : NVM_LUN_BITS; + u64 ch : NVM_CH_BITS; + } g; + + u64 ppa; + }; +}; + +struct nvm_rq; +struct nvm_id; +struct nvm_dev; + +typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); +typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); +typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); +typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, + nvm_l2p_update_fn *, void *); +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, + nvm_bb_update_fn *, void *); +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); +typedef void (nvm_destroy_dma_pool_fn)(void *); +typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, + dma_addr_t *); +typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_get_l2p_tbl_fn *get_l2p_tbl; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb_tbl; + + nvm_submit_io_fn *submit_io; + nvm_erase_blk_fn *erase_block; + + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; + + unsigned int max_phys_sect; +}; + + + #ifdef CONFIG_NVM #include -#include #include #include @@ -126,29 +189,6 @@ struct nvm_tgt_instance { #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 -#define NVM_BLK_BITS (16) -#define NVM_PG_BITS (16) -#define NVM_SEC_BITS (8) -#define NVM_PL_BITS (8) -#define NVM_LUN_BITS (8) -#define NVM_CH_BITS (8) - -struct ppa_addr { - /* Generic structure for all addresses */ - union { - struct { - u64 blk : NVM_BLK_BITS; - u64 pg : NVM_PG_BITS; - u64 sec : NVM_SEC_BITS; - u64 pl : NVM_PL_BITS; - u64 lun : NVM_LUN_BITS; - u64 ch : NVM_CH_BITS; - } g; - - u64 ppa; - }; -}; - struct nvm_rq { struct nvm_tgt_instance *ins; struct nvm_dev *dev; @@ -182,39 +222,6 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) struct nvm_block; -typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); -typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); -typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); -typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, - nvm_l2p_update_fn *, void *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, - nvm_bb_update_fn *, void *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); -typedef void (nvm_destroy_dma_pool_fn)(void *); -typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, - dma_addr_t *); -typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); - -struct nvm_dev_ops { - nvm_id_fn *identity; - nvm_get_l2p_tbl_fn *get_l2p_tbl; - nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb_tbl; - - nvm_submit_io_fn *submit_io; - nvm_erase_blk_fn *erase_block; - - nvm_create_dma_pool_fn *create_dma_pool; - nvm_destroy_dma_pool_fn *destroy_dma_pool; - nvm_dev_dma_alloc_fn *dev_dma_alloc; - nvm_dev_dma_free_fn *dev_dma_free; - - unsigned int max_phys_sect; -}; - struct nvm_lun { int id; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 51ee5ddfa4fbfdb89eba18875c07e05b72d906e8..ab93174fa639d463407044a554a6819aab1fdb63 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -25,6 +25,7 @@ enum { MEMBLOCK_NONE = 0x0, /* No special request */ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ MEMBLOCK_MIRROR = 0x2, /* mirrored region */ + MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ }; struct memblock_region { @@ -82,6 +83,7 @@ bool memblock_overlaps_region(struct memblock_type *type, int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); +int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); ulong choose_memblock_flags(void); unsigned long memblock_region_resize_late_begin(void); void memblock_region_resize_late_end(unsigned long); @@ -186,6 +188,11 @@ static inline bool memblock_is_mirror(struct memblock_region *m) return m->flags & MEMBLOCK_MIRROR; } +static inline bool memblock_is_nomap(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_NOMAP; +} + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); @@ -321,6 +328,7 @@ phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); int memblock_is_memory(phys_addr_t addr); +int memblock_is_map_memory(phys_addr_t addr); int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size); int memblock_is_reserved(phys_addr_t addr); diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index d409ceb2231ec1908842416a9de1ddb62bef2709..c118a7ec94d6f15b25f558dbd171336da400a9f6 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -350,7 +350,7 @@ static inline int pm80x_dev_suspend(struct device *dev) int irq = platform_get_irq(pdev, 0); if (device_may_wakeup(dev)) - set_bit((1 << irq), &chip->wu_flag); + set_bit(irq, &chip->wu_flag); return 0; } @@ -362,7 +362,7 @@ static inline int pm80x_dev_resume(struct device *dev) int irq = platform_get_irq(pdev, 0); if (device_may_wakeup(dev)) - clear_bit((1 << irq), &chip->wu_flag); + clear_bit(irq, &chip->wu_flag); return 0; } diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 1fd50dcfe47c629b75e9d8cfe855cad3aba8a1fa..175c82699e9d68a84e034295c6ef14aaf6e6e309 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -138,16 +138,16 @@ /* * time in us for processing a single channel, calculated as follows: * - * num cycles = open delay + (sample delay + conv time) * averaging + * max num cycles = open delay + (sample delay + conv time) * averaging * - * num cycles: 152 + (1 + 13) * 16 = 376 + * max num cycles: 262143 + (255 + 13) * 16 = 266431 * * clock frequency: 26MHz / 8 = 3.25MHz * clock period: 1 / 3.25MHz = 308ns * - * processing time: 376 * 308ns = 116us + * max processing time: 266431 * 308ns = 83ms(approx) */ -#define IDLE_TIMEOUT 116 /* microsec */ +#define IDLE_TIMEOUT 83 /* milliseconds */ #define TSCADC_CELLS 2 diff --git a/include/linux/mm.h b/include/linux/mm.h index 87a4bb8ed512ec8151dc9c650138f1e9a1d6969f..186d2ab9cd139f84a9f4aad0dc20d84ac41d3a13 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1912,6 +1912,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm); extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); +extern struct file *get_task_exe_file(struct task_struct *task); extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index a5a3bb28636126f8fe369b338e111536d54d782f..55a30d6852265cb015c07cd9a1990c0a2f1480c7 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -111,7 +111,9 @@ struct mmc_request { struct mmc_cmdq_req *cmdq_req; struct request *req; ktime_t io_start; - int lat_hist_enabled; +#ifdef CONFIG_BLOCK + int lat_hist_enabled; +#endif }; struct mmc_bus_ops { diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index f67b2ec18e6d87c8df27b314f7320e49163850f5..7776afb0ffa5833bc784d32317cece83f1ea2cbd 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -172,7 +172,7 @@ struct dw_mci { /* For edmac */ struct dw_mci_dma_slave *dms; /* Registers's physical base address */ - void *phy_regs; + resource_size_t phy_regs; u32 cmd_status; u32 data_status; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 45037a3a8e8e01a0cb5df7bb203b15670031cc39..ad7f915ddf76278ab7bfce0e45285397406eb1a5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -376,10 +376,10 @@ struct zone { struct per_cpu_pageset __percpu *pageset; /* - * This is a per-zone reserve of pages that should not be - * considered dirtyable memory. + * This is a per-zone reserve of pages that are not available + * to userspace allocations. */ - unsigned long dirty_balance_reserve; + unsigned long totalreserve_pages; #ifdef CONFIG_CMA bool cma_alloc; #endif diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 79aaa9fc1a15d6ed7caac2b147ae030b18da060a..d5277fc3ce2eea2470891f360031f694d92b3996 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -103,5 +103,5 @@ struct mfc_cache { struct rtmsg; extern int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 66982e7640514389098c140a39665f95261071d8..f831155dc7d1201f5b2ef899a51a1e3a45f768e8 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -115,7 +115,7 @@ struct mfc6_cache { struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #ifdef CONFIG_IPV6_MROUTE extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index acf302ef2f019aaf2302023b66a5c8ef94bd9123..c909ba0ba99781becc26f5f73af7b5f9e14cd58d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -511,7 +511,6 @@ static inline void napi_enable(struct napi_struct *n) clear_bit(NAPI_STATE_NPSVC, &n->state); } -#ifdef CONFIG_SMP /** * napi_synchronize - wait until NAPI is not running * @n: napi context @@ -522,12 +521,12 @@ static inline void napi_enable(struct napi_struct *n) */ static inline void napi_synchronize(const struct napi_struct *n) { - while (test_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); + if (IS_ENABLED(CONFIG_SMP)) + while (test_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + else + barrier(); } -#else -# define napi_synchronize(n) barrier() -#endif enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF, @@ -1987,8 +1986,8 @@ struct napi_gro_cb { /* This is non-zero if the packet may be of the same flow. */ u8 same_flow:1; - /* Used in udp_gro_receive */ - u8 udp_mark:1; + /* Used in tunnel GRO receive */ + u8 encap_mark:1; /* GRO checksum is valid */ u8 csum_valid:1; @@ -2004,7 +2003,10 @@ struct napi_gro_cb { /* Used in foo-over-udp, set in udp[46]_gro_receive */ u8 is_ipv6:1; - /* 7 bit hole */ + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 3 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2015,6 +2017,25 @@ struct napi_gro_cb { #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) +#define GRO_RECURSION_LIMIT 15 +static inline int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; +} + +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); +static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@ -2060,6 +2081,22 @@ struct udp_offload { struct udp_offload_callbacks callbacks; }; +typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **, + struct sk_buff *, + struct udp_offload *); +static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb, + struct sk_buff **head, + struct sk_buff *skb, + struct udp_offload *uoff) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb, uoff); +} + /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_sw_netstats { u64 rx_packets; @@ -3041,6 +3078,7 @@ static inline void napi_free_frags(struct napi_struct *napi) napi->skb = NULL; } +bool netdev_is_rx_handler_busy(struct net_device *dev); int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data); diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 9bb77d3ed6e0c5a35f8c77c0fb20821a237984ea..c2256d74654333b9103fab444c4e318e2c7d422a 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell) { } -static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 26eabf5ec718a457eb9cef5635f5c475cca0c48d..fbfadba81c5ac949628799c81c50f300ab907fa9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -601,56 +601,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) */ static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { - int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; + if (unlikely(uaddr > end)) + return -EFAULT; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ - while (uaddr <= end) { - ret = __put_user(0, uaddr); - if (ret != 0) - return ret; + do { + if (unlikely(__put_user(0, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); + return __put_user(0, end); - return ret; + return 0; } static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { volatile char c; - int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; - while (uaddr <= end) { - ret = __get_user(c, uaddr); - if (ret != 0) - return ret; + if (unlikely(uaddr > end)) + return -EFAULT; + + do { + if (unlikely(__get_user(c, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { - ret = __get_user(c, end); - (void)c; + return __get_user(c, end); } - return ret; + return 0; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, diff --git a/include/linux/pci.h b/include/linux/pci.h index e89c7ee7e8036b55abd63a687fbd91e858006554..5f37614f2451faa1fdcfdf91031963746cea2c83 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1802,6 +1802,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } +static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) +{ + while (1) { + if (!pci_is_pcie(dev)) + break; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return dev; + if (!dev->bus->self) + break; + dev = dev->bus->self; + } + return NULL; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 3e96a6a7610338ff8ddd825a21ddc9bc5fe52e8b..d1a8ad7e5ae450b38c8d83ed2e74327424cfa79f 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -95,6 +95,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *); extern int posix_acl_chmod(struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); +extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); extern int simple_set_acl(struct inode *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); diff --git a/include/linux/psci.h b/include/linux/psci.h index 12c4865457adc3d0412c573b710feec7d3d6fd81..393efe2edf9afb9d8a38d1a16201e78c64881026 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu); bool psci_power_state_loses_context(u32 state); bool psci_power_state_is_valid(u32 state); +int psci_cpu_init_idle(unsigned int cpu); +int psci_cpu_suspend_enter(unsigned long index); + struct psci_operations { int (*cpu_suspend)(u32 state, unsigned long entry_point); int (*cpu_off)(u32 state); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index cfc3ed46cad20a26ffa131b51e25927ac6045f4c..aa8736d5b2f38ba1319a9f030da47ef18f537761 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -331,6 +331,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) #ifdef CONFIG_PWM_SYSFS void pwmchip_sysfs_export(struct pwm_chip *chip); void pwmchip_sysfs_unexport(struct pwm_chip *chip); +void pwmchip_sysfs_unexport_children(struct pwm_chip *chip); #else static inline void pwmchip_sysfs_export(struct pwm_chip *chip) { @@ -339,6 +340,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip) static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) { } + +static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) +{ +} #endif /* CONFIG_PWM_SYSFS */ #endif /* __LINUX_PWM_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 708c4284b8d9a93ffb82b2d89b40be95b17f1ef4..3a7521064fe3954ef47ebb3d8b607d5441bf73ea 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1436,6 +1436,8 @@ struct sched_rt_entity { unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED diff --git a/include/linux/sem.h b/include/linux/sem.h index 976ce3a19f1b23646c4494029929e538f5e0204b..d0efd6e6c20a6a6a39273639dbd33f3d77c2e156 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -21,6 +21,7 @@ struct sem_array { struct list_head list_id; /* undo requests on this array */ int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ + bool complex_mode; /* no parallel simple ops */ }; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 76199b75d5845d21cbcfb67786dfec757e30a446..e302c447e057a98b0ba47b5bc73acd816b9dc7f7 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -1,6 +1,16 @@ #ifndef __SMC91X_H__ #define __SMC91X_H__ +/* + * These bits define which access sizes a platform can support, rather + * than the maximal access size. So, if your platform can do 16-bit + * and 32-bit accesses to the SMC91x device, but not 8-bit, set both + * SMC91X_USE_16BIT and SMC91X_USE_32BIT. + * + * The SMC91x driver requires at least one of SMC91X_USE_8BIT or + * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is + * an invalid configuration. + */ #define SMC91X_USE_8BIT (1 << 0) #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) diff --git a/include/linux/string.h b/include/linux/string.h index 9ef7795e65e40c5dfbee53726909fbcb2ce341b0..aa30789b0f65eea407dfc56c68f9817c756f281d 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -127,7 +127,11 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); -extern int strtobool(const char *s, bool *res); +extern int kstrtobool(const char *s, bool *res); +static inline int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} #ifdef CONFIG_BINARY_PRINTF int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); diff --git a/include/linux/swap.h b/include/linux/swap.h index a93eb9262eb6713a591a7e8281888a6c57a4c8b2..d5ec583b47f5aeed4382d087a188b712417e2e25 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -270,6 +270,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node) static inline void workingset_node_pages_dec(struct radix_tree_node *node) { + VM_WARN_ON_ONCE(!workingset_node_pages(node)); node->count--; } @@ -285,13 +286,13 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node) static inline void workingset_node_shadows_dec(struct radix_tree_node *node) { + VM_WARN_ON_ONCE(!workingset_node_shadows(node)); node->count -= 1U << RADIX_TREE_COUNT_SHIFT; } /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; -extern unsigned long dirty_balance_reserve; extern unsigned long nr_free_buffer_pages(void); extern unsigned long nr_free_pagecache_pages(void); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index fa7bc29925c929a36e0f1e53a3a7efc1a18cc9e6..ef17db6caaed837cc4ae7aed8909de54692dcf7c 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -41,6 +41,8 @@ extern int proc_dostring(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec(struct ctl_table *, int, void __user *, size_t *, loff_t *); +extern int proc_douintvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); extern int proc_dointvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_jiffies(struct ctl_table *, int, diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ec89d846324cac714807f7c002339636c497e4d8..b7246d2ed7c9772cf4bc8757ae9e64664d47516c 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -233,6 +233,7 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); +extern u64 ktime_get_boot_fast_ns(void); /* * Timespec interfaces utilizing the ktime based ones diff --git a/include/linux/uio.h b/include/linux/uio.h index 8b01e1c3c6146623759f122b72ff6b3b869a0617..e2225109b816a2ed45a0847830b202098bc211ea 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes); +#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); @@ -101,12 +101,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); -static inline size_t iov_iter_count(struct iov_iter *i) +static inline size_t iov_iter_count(const struct iov_iter *i) { return i->count; } -static inline bool iter_is_iovec(struct iov_iter *i) +static inline bool iter_is_iovec(const struct iov_iter *i) { return !(i->type & (ITER_BVEC | ITER_KVEC)); } diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h index 003adc38eb143f5cb6091531c325ab98b1a25625..8d13ca3cdd50f73a97716bebd140a81ab1ab8a4f 100644 --- a/include/media/msm_vidc.h +++ b/include/media/msm_vidc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -110,6 +110,7 @@ int msm_vidc_release_buffers(void *instance, int buffer_type); int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b); int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b); int msm_vidc_streamon(void *instance, enum v4l2_buf_type i); +int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl); int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i); int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd); int msm_vidc_poll(void *instance, struct file *filp, diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 9b4c418bebd84ae0a7debfbcc697ee92b136ec99..fd60eccb59a67969eba53416a376a3c912d62d81 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -52,7 +52,7 @@ struct unix_sock { struct sock sk; struct unix_address *addr; struct path path; - struct mutex readlock; + struct mutex iolock, bindlock; struct sock *peer; struct list_head link; atomic_long_t inflight; diff --git a/include/net/ip.h b/include/net/ip.h index e5a1d35b9905f2d565b8ca1e609bc22c88802ec5..8a8376239eaca2630be376ba17da44b5c6171e5e 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -556,7 +556,7 @@ int ip_options_rcv_srr(struct sk_buff *skb); */ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); -void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); +void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, @@ -578,7 +578,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) { - ip_cmsg_recv_offset(msg, skb, 0); + ip_cmsg_recv_offset(msg, skb, 0, 0); } bool icmp_global_allow(void); diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index ff788b665277d59d8039fd4588f3e0256639833c..9c2c044153f62a4df5e7c31ceb090d7695ecd244 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -86,6 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device_stats *stats = &dev->stats; int pkt_len, err; + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); pkt_len = skb->len - skb_inner_network_offset(skb); err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index af40bc586a1b6ae03ef751ce61276e58934e7406..86a7bdd61d1a90234dc84be0f48cf9fc680a4230 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -283,6 +283,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, int gso_type_mask); +static inline int iptunnel_pull_offloads(struct sk_buff *skb) +{ + if (skb_is_gso(skb)) { + int err; + + err = skb_unclone(skb, GFP_ATOMIC); + if (unlikely(err)) + return err; + skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >> + NETIF_F_GSO_SHIFT); + } + + skb->encapsulation = 0; + return 0; +} + static inline void iptunnel_xmit_stats(int err, struct net_device_stats *err_stats, struct pcpu_sw_netstats __percpu *stats) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 86df0835f6b58517c671e200779d9045200cb5d3..e5bba897d20649223bf393b94a66677177e0f554 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -408,6 +408,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); +static inline bool skb_at_tc_ingress(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + return G_TC_AT(skb->tc_verd) & AT_INGRESS; +#else + return false; +#endif +} + /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { diff --git a/include/net/sock.h b/include/net/sock.h index 0e49452f5c48b6e71114d0d5baf427664e85e4bd..628c2b588468c7ef0f5ca52017feeec888d4d306 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1428,6 +1428,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + + /* Avoid a possible overflow. + * TCP send queues can make this happen, if sk_mem_reclaim() + * is not called and more than 2 GBytes are released at once. + * + * If we reach 2 MBytes, reclaim 1 MBytes right now, there is + * no need to hold that much forward allocation anyway. + */ + if (unlikely(sk->sk_forward_alloc >= 1 << 21)) + __sk_mem_reclaim(sk, 1 << 20); } static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) diff --git a/include/net/udp.h b/include/net/udp.h index 6d4ed18e14278a6091b7e1c3fa40f1b2d4796d06..e57f50258cdae779b920815216f240fa284be713 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -238,6 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); void udp_err(struct sk_buff *, u32); +int udp_abort(struct sock *sk, int err); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udp_push_pending_frames(struct sock *sk); void udp_flush_pending_frames(struct sock *sk); diff --git a/include/soc/qcom/msm_qmi_interface.h b/include/soc/qcom/msm_qmi_interface.h index 135265b74991eb4b8e720cad0903dfe887432304..5ca808dd1fc2c72a7ae0abe0d8388f1a0ea1bf37 100644 --- a/include/soc/qcom/msm_qmi_interface.h +++ b/include/soc/qcom/msm_qmi_interface.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -92,6 +92,7 @@ enum qmi_result_type_v01 { QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = INT_MIN, QMI_RESULT_SUCCESS_V01 = 0, QMI_RESULT_FAILURE_V01 = 1, + QMI_ERR_DISABLED_V01 = 0x45, QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = INT_MAX, }; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 59081c73b2968a9435b33e45837197e36d95781c..6afc6f388edf9bc1062dae96aea5abb7584c62e8 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -180,6 +180,7 @@ enum tcm_sense_reason_table { TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), #undef R }; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 68a1f71fde9f7bcc90c21f586b57f1eab61de941..c7f189bd597997214f9c5f027562abac8ab154b1 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -72,6 +72,8 @@ enum { INET_DIAG_BC_AUTO, INET_DIAG_BC_S_COND, INET_DIAG_BC_D_COND, + INET_DIAG_BC_DEV_COND, /* u32 ifindex */ + INET_DIAG_BC_MARK_COND, }; struct inet_diag_hostcond { @@ -81,6 +83,11 @@ struct inet_diag_hostcond { __be32 addr[0]; }; +struct inet_diag_markcond { + __u32 mark; + __u32 mask; +}; + /* Base info structure. It contains socket identity (addrs/ports/cookie) * and, alas, the information shown by netstat. */ struct inet_diag_msg { @@ -113,9 +120,13 @@ enum { INET_DIAG_DCTCPINFO, INET_DIAG_PROTOCOL, /* response attribute only */ INET_DIAG_SKV6ONLY, + INET_DIAG_LOCALS, + INET_DIAG_PEERS, + INET_DIAG_PAD, + INET_DIAG_MARK, }; -#define INET_DIAG_MAX INET_DIAG_SKV6ONLY +#define INET_DIAG_MAX INET_DIAG_MARK /* INET_DIAG_MEM */ diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 0bdfe727d6e06ce6a46c49497c56e63fdde420a8..c6cad0ff1bbf8450bc6bb31fe879c472b032904e 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -56,6 +56,7 @@ #define SMB_SUPER_MAGIC 0x517B #define CGROUP_SUPER_MAGIC 0x27e0eb +#define CGROUP2_SUPER_MAGIC 0x63677270 #define STACK_END_MAGIC 0x57AC6E9D diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 3eb02a1d6d8cca26f00e5d5c6399436100b172b8..a2fad11894ffd16e5640ad3501eed3aca4b60438 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -344,7 +344,7 @@ struct rtnexthop { #define RTNH_F_OFFLOAD 8 /* offloaded route */ #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ -#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN) +#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD) /* Macros to handle hexthops */ diff --git a/init/Kconfig b/init/Kconfig index ac3f4c210aed03b6f01d0485bdcff49c39c67e70..9461bbc0e5fc90127b8f585d53940a8790f14361 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1334,6 +1334,16 @@ config SCHED_TUNE If unsure, say N. +config DEFAULT_USE_ENERGY_AWARE + bool "Default to enabling the Energy Aware Scheduler feature" + default n + help + This option defaults the ENERGY_AWARE scheduling feature to true, + as without SCHED_DEBUG set this feature can't be enabled or disabled + via sysctl. + + Say N if unsure. + config SYSFS_DEPRECATED bool "Enable deprecated sysfs features to support old userspace tools" depends on SYSFS diff --git a/ipc/sem.c b/ipc/sem.c index 20d07008ad5e0c226ed3f5bc197086af3a30f442..9862c3d1c26d294b1c281c38bec6bc59ec665c5c 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -155,14 +155,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); /* * Locking: + * a) global sem_lock() for read/write * sem_undo.id_next, * sem_array.complex_count, - * sem_array.pending{_alter,_cont}, - * sem_array.sem_undo: global sem_lock() for read/write - * sem_undo.proc_next: only "current" is allowed to read/write that field. + * sem_array.complex_mode + * sem_array.pending{_alter,_const}, + * sem_array.sem_undo * + * b) global or semaphore sem_lock() for read/write: * sem_array.sem_base[i].pending_{const,alter}: - * global or semaphore sem_lock() for read/write + * sem_array.complex_mode (for read) + * + * c) special: + * sem_undo_list.list_proc: + * * undo_list->lock for write + * * rcu for read */ #define sc_semmsl sem_ctls[0] @@ -263,24 +270,25 @@ static void sem_rcu_free(struct rcu_head *head) #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() /* - * Wait until all currently ongoing simple ops have completed. + * Enter the mode suitable for non-simple operations: * Caller must own sem_perm.lock. - * New simple ops cannot start, because simple ops first check - * that sem_perm.lock is free. - * that a) sem_perm.lock is free and b) complex_count is 0. */ -static void sem_wait_array(struct sem_array *sma) +static void complexmode_enter(struct sem_array *sma) { int i; struct sem *sem; - if (sma->complex_count) { - /* The thread that increased sma->complex_count waited on - * all sem->lock locks. Thus we don't need to wait again. - */ + if (sma->complex_mode) { + /* We are already in complex_mode. Nothing to do */ return; } + /* We need a full barrier after seting complex_mode: + * The write to complex_mode must be visible + * before we read the first sem->lock spinlock state. + */ + smp_store_mb(sma->complex_mode, true); + for (i = 0; i < sma->sem_nsems; i++) { sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); @@ -288,6 +296,28 @@ static void sem_wait_array(struct sem_array *sma) ipc_smp_acquire__after_spin_is_unlocked(); } +/* + * Try to leave the mode that disallows simple operations: + * Caller must own sem_perm.lock. + */ +static void complexmode_tryleave(struct sem_array *sma) +{ + if (sma->complex_count) { + /* Complex ops are sleeping. + * We must stay in complex mode + */ + return; + } + /* + * Immediately after setting complex_mode to false, + * a simple op can start. Thus: all memory writes + * performed by the current operation must be visible + * before we set complex_mode to false. + */ + smp_store_release(&sma->complex_mode, false); +} + +#define SEM_GLOBAL_LOCK (-1) /* * If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. @@ -304,56 +334,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, /* Complex operation - acquire a full lock */ ipc_lock_object(&sma->sem_perm); - /* And wait until all simple ops that are processed - * right now have dropped their locks. - */ - sem_wait_array(sma); - return -1; + /* Prevent parallel simple ops */ + complexmode_enter(sma); + return SEM_GLOBAL_LOCK; } /* * Only one semaphore affected - try to optimize locking. - * The rules are: - * - optimized locking is possible if no complex operation - * is either enqueued or processed right now. - * - The test for enqueued complex ops is simple: - * sma->complex_count != 0 - * - Testing for complex ops that are processed right now is - * a bit more difficult. Complex ops acquire the full lock - * and first wait that the running simple ops have completed. - * (see above) - * Thus: If we own a simple lock and the global lock is free - * and complex_count is now 0, then it will stay 0 and - * thus just locking sem->lock is sufficient. + * Optimized locking is possible if no complex operation + * is either enqueued or processed right now. + * + * Both facts are tracked by complex_mode. */ sem = sma->sem_base + sops->sem_num; - if (sma->complex_count == 0) { + /* + * Initial check for complex_mode. Just an optimization, + * no locking, no memory barrier. + */ + if (!sma->complex_mode) { /* * It appears that no complex operation is around. * Acquire the per-semaphore lock. */ spin_lock(&sem->lock); - /* Then check that the global lock is free */ - if (!spin_is_locked(&sma->sem_perm.lock)) { - /* - * We need a memory barrier with acquire semantics, - * otherwise we can race with another thread that does: - * complex_count++; - * spin_unlock(sem_perm.lock); - */ - ipc_smp_acquire__after_spin_is_unlocked(); + /* + * See 51d7d5205d33 + * ("powerpc: Add smp_mb() to arch_spin_is_locked()"): + * A full barrier is required: the write of sem->lock + * must be visible before the read is executed + */ + smp_mb(); - /* - * Now repeat the test of complex_count: - * It can't change anymore until we drop sem->lock. - * Thus: if is now 0, then it will stay 0. - */ - if (sma->complex_count == 0) { - /* fast path successful! */ - return sops->sem_num; - } + if (!smp_load_acquire(&sma->complex_mode)) { + /* fast path successful! */ + return sops->sem_num; } spin_unlock(&sem->lock); } @@ -373,15 +389,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, /* Not a false alarm, thus complete the sequence for a * full lock. */ - sem_wait_array(sma); - return -1; + complexmode_enter(sma); + return SEM_GLOBAL_LOCK; } } static inline void sem_unlock(struct sem_array *sma, int locknum) { - if (locknum == -1) { + if (locknum == SEM_GLOBAL_LOCK) { unmerge_queues(sma); + complexmode_tryleave(sma); ipc_unlock_object(&sma->sem_perm); } else { struct sem *sem = sma->sem_base + locknum; @@ -533,6 +550,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) } sma->complex_count = 0; + sma->complex_mode = true; /* dropped by sem_unlock below */ INIT_LIST_HEAD(&sma->pending_alter); INIT_LIST_HEAD(&sma->pending_const); INIT_LIST_HEAD(&sma->list_id); @@ -2186,10 +2204,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) /* * The proc interface isn't aware of sem_lock(), it calls * ipc_lock_object() directly (in sysvipc_find_ipc). - * In order to stay compatible with sem_lock(), we must wait until - * all simple semop() calls have left their critical regions. + * In order to stay compatible with sem_lock(), we must + * enter / leave complex_mode. */ - sem_wait_array(sma); + complexmode_enter(sma); sem_otime = get_semotime(sma); @@ -2206,6 +2224,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) sem_otime, sma->sem_ctime); + complexmode_tryleave(sma); + return 0; } #endif diff --git a/kernel/audit.c b/kernel/audit.c index 5ffcbd354a520b88781ed2d66c7839a7aaa7f86d..34f690b9213abc7f42088329f1005e0be468e111 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -870,6 +870,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } if (s.mask & AUDIT_STATUS_PID) { + /* NOTE: we are using task_tgid_vnr() below because + * the s.pid value is relative to the namespace + * of the caller; at present this doesn't matter + * much since you can really only run auditd + * from the initial pid namespace, but something + * to keep in mind if this changes */ int new_pid = s.pid; if ((!new_pid) && (task_tgid_vnr(current) != audit_pid)) @@ -1896,7 +1902,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) " euid=%u suid=%u fsuid=%u" " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", task_ppid_nr(tsk), - task_pid_nr(tsk), + task_tgid_nr(tsk), from_kuid(&init_user_ns, audit_get_loginuid(tsk)), from_kuid(&init_user_ns, cred->uid), from_kgid(&init_user_ns, cred->gid), diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 656c7e93ac0d30d3e42a8f7e0dfc7dd071360d78..939945a5649c7980f871fac6829ac624683190ac 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -19,6 +19,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include @@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark) unsigned long ino; dev_t dev; - rcu_read_lock(); - exe_file = rcu_dereference(tsk->mm->exe_file); + exe_file = get_task_exe_file(tsk); + if (!exe_file) + return 0; ino = exe_file->f_inode->i_ino; dev = exe_file->f_inode->i_sb->s_dev; - rcu_read_unlock(); + fput(exe_file); return audit_mark_compare(mark, ino, dev); } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 48f45987dc6c7782555103e58e42e6f4743799e4..63f0e495f5176655f16bbd998ed70ab600cbc64c 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -458,7 +458,7 @@ static int audit_filter_rules(struct task_struct *tsk, switch (f->type) { case AUDIT_PID: - pid = task_pid_nr(tsk); + pid = task_tgid_nr(tsk); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_PPID: @@ -1987,7 +1987,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; - audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); + audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid); audit_log_task_context(ab); audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", oldloginuid, loginuid, oldsessionid, sessionid, !rc); @@ -2212,7 +2212,7 @@ void __audit_ptrace(struct task_struct *t) { struct audit_context *context = current->audit_context; - context->target_pid = task_pid_nr(t); + context->target_pid = task_tgid_nr(t); context->target_auid = audit_get_loginuid(t); context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); @@ -2237,7 +2237,7 @@ int __audit_signal_info(int sig, struct task_struct *t) if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { - audit_sig_pid = task_pid_nr(tsk); + audit_sig_pid = task_tgid_nr(tsk); if (uid_valid(tsk->loginuid)) audit_sig_uid = tsk->loginuid; else @@ -2337,7 +2337,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, void __audit_log_capset(const struct cred *new, const struct cred *old) { struct audit_context *context = current->audit_context; - context->capset.pid = task_pid_nr(current); + context->capset.pid = task_tgid_nr(current); context->capset.cap.effective = new->cap_effective; context->capset.cap.inheritable = new->cap_effective; context->capset.cap.permitted = new->cap_permitted; @@ -2369,7 +2369,7 @@ static void audit_log_task(struct audit_buffer *ab) from_kgid(&init_user_ns, gid), sessionid); audit_log_task_context(ab); - audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); + audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); } diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e94c3c18933895946e8727c9798b27463d7a57de..b05fc202b5488aab1d2b985c214683afe74ed80d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -211,6 +211,7 @@ static unsigned long have_free_callback __read_mostly; /* Ditto for the can_fork callback. */ static unsigned long have_canfork_callback __read_mostly; +static struct file_system_type cgroup2_fs_type; static struct cftype cgroup_dfl_base_files[]; static struct cftype cgroup_legacy_base_files[]; @@ -236,6 +237,9 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css, */ static bool cgroup_ssid_enabled(int ssid) { + if (CGROUP_SUBSYS_COUNT == 0) + return false; + return static_key_enabled(cgroup_subsys_enabled_key[ssid]); } @@ -1649,10 +1653,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) all_ss = true; continue; } - if (!strcmp(token, "__DEVEL__sane_behavior")) { - opts->flags |= CGRP_ROOT_SANE_BEHAVIOR; - continue; - } if (!strcmp(token, "noprefix")) { opts->flags |= CGRP_ROOT_NOPREFIX; continue; @@ -1719,15 +1719,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) return -ENOENT; } - if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { - pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); - if (nr_opts != 1) { - pr_err("sane_behavior: no other mount options allowed\n"); - return -EINVAL; - } - return 0; - } - /* * If the 'all' option was specified select all the subsystems, * otherwise if 'none', 'name=' and a subsystem name options were @@ -2010,6 +2001,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { + bool is_v2 = fs_type == &cgroup2_fs_type; struct super_block *pinned_sb = NULL; struct cgroup_subsys *ss; struct cgroup_root *root = NULL; @@ -2026,6 +2018,17 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (!use_task_css_set_links) cgroup_enable_task_cg_lists(); + if (is_v2) { + if (data) { + pr_err("cgroup2: unknown option \"%s\"\n", (char *)data); + return ERR_PTR(-EINVAL); + } + cgrp_dfl_root_visible = true; + root = &cgrp_dfl_root; + cgroup_get(&root->cgrp); + goto out_mount; + } + mutex_lock(&cgroup_mutex); /* First find the desired set of subsystems */ @@ -2033,15 +2036,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto out_unlock; - /* look for a matching existing root */ - if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) { - cgrp_dfl_root_visible = true; - root = &cgrp_dfl_root; - cgroup_get(&root->cgrp); - ret = 0; - goto out_unlock; - } - /* * Destruction of cgroup root is asynchronous, so subsystems may * still be dying after the previous unmount. Let's drain the @@ -2152,9 +2146,10 @@ out_free: if (ret) return ERR_PTR(ret); - +out_mount: dentry = kernfs_mount(fs_type, flags, root->kf_root, - CGROUP_SUPER_MAGIC, &new_sb); + is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC, + &new_sb); if (IS_ERR(dentry) || !new_sb) cgroup_put(&root->cgrp); @@ -2197,6 +2192,12 @@ static struct file_system_type cgroup_fs_type = { .kill_sb = cgroup_kill_sb, }; +static struct file_system_type cgroup2_fs_type = { + .name = "cgroup2", + .mount = cgroup_mount, + .kill_sb = cgroup_kill_sb, +}; + /** * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy * @task: target task @@ -2677,45 +2678,6 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp, return ret; } -int subsys_cgroup_allow_attach(struct cgroup_taskset *tset) -{ - const struct cred *cred = current_cred(), *tcred; - struct task_struct *task; - struct cgroup_subsys_state *css; - - if (capable(CAP_SYS_NICE)) - return 0; - - cgroup_taskset_for_each(task, css, tset) { - tcred = __task_cred(task); - - if (current != task && !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) - return -EACCES; - } - - return 0; -} - -static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) -{ - struct cgroup_subsys_state *css; - int i; - int ret; - - for_each_css(css, i, cgrp) { - if (css->ss->allow_attach) { - ret = css->ss->allow_attach(tset); - if (ret) - return ret; - } else { - return -EACCES; - } - } - - return 0; -} - static int cgroup_procs_write_permission(struct task_struct *task, struct cgroup *dst_cgrp, struct kernfs_open_file *of) @@ -2730,24 +2692,9 @@ static int cgroup_procs_write_permission(struct task_struct *task, */ if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) { - /* - * if the default permission check fails, give each - * cgroup a chance to extend the permission check - */ - struct cgroup_taskset tset = { - .src_csets = LIST_HEAD_INIT(tset.src_csets), - .dst_csets = LIST_HEAD_INIT(tset.dst_csets), - .csets = &tset.src_csets, - }; - struct css_set *cset; - cset = task_css_set(task); - list_add(&cset->mg_node, &tset.src_csets); - ret = cgroup_allow_attach(dst_cgrp, &tset); - list_del(&tset.src_csets); - if (ret) - ret = -EACCES; - } + !uid_eq(cred->euid, tcred->suid) && + !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) + ret = -EACCES; if (!ret && cgroup_on_dfl(dst_cgrp)) { struct super_block *sb = of->file->f_path.dentry->d_sb; @@ -5447,6 +5394,7 @@ int __init cgroup_init(void) WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup")); WARN_ON(register_filesystem(&cgroup_fs_type)); + WARN_ON(register_filesystem(&cgroup2_fs_type)); WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations)); return 0; diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config index c2de56ab0fce8b2193a2414afc2d4eb743510c74..7fa0c4ae6394f028fa09694b219314dd3d7d8731 100644 --- a/kernel/configs/tiny.config +++ b/kernel/configs/tiny.config @@ -1,4 +1,12 @@ +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set CONFIG_OPTIMIZE_INLINING=y +# CONFIG_SLAB is not set +# CONFIG_SLUB is not set CONFIG_SLOB=y diff --git a/kernel/cpu.c b/kernel/cpu.c index 25cfcc804077e8bc080a3dd9efa474876a9c6180..2432cc630ffb7aa0f9f2350b7552f1d94f1112d4 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -185,10 +185,17 @@ void cpu_hotplug_disable(void) } EXPORT_SYMBOL_GPL(cpu_hotplug_disable); +static void __cpu_hotplug_enable(void) +{ + if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) + return; + cpu_hotplug_disabled--; +} + void cpu_hotplug_enable(void) { cpu_maps_update_begin(); - WARN_ON(--cpu_hotplug_disabled < 0); + __cpu_hotplug_enable(); cpu_maps_update_done(); } EXPORT_SYMBOL_GPL(cpu_hotplug_enable); @@ -616,7 +623,7 @@ void enable_nonboot_cpus(void) /* Allow everyone to use the CPU hotplug again */ cpu_maps_update_begin(); - WARN_ON(--cpu_hotplug_disabled < 0); + __cpu_hotplug_enable(); if (cpumask_empty(frozen_cpus)) goto out; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e3c0f38acbe68ee43ad3b1402a6b00558eb96d6f..29c7240172d3eb8efbb7f015599f61f6fdabc0f7 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2095,21 +2095,18 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) mutex_unlock(&cpuset_mutex); } -static int cpuset_allow_attach(struct cgroup_taskset *tset) +/* + * Make sure the new task conform to the current state of its parent, + * which could have been changed by cpuset just after it inherits the + * state from the parent and before it sits on the cgroup's task list. + */ +void cpuset_fork(struct task_struct *task, void *priv) { - const struct cred *cred = current_cred(), *tcred; - struct task_struct *task; - struct cgroup_subsys_state *css; - - cgroup_taskset_for_each(task, css, tset) { - tcred = __task_cred(task); - - if ((current != task) && !capable(CAP_SYS_ADMIN) && - cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val) - return -EACCES; - } + if (task_css_is_root(task, cpuset_cgrp_id)) + return; - return 0; + set_cpus_allowed_ptr(task, ¤t->cpus_allowed); + task->mems_allowed = current->mems_allowed; } struct cgroup_subsys cpuset_cgrp_subsys = { @@ -2118,11 +2115,11 @@ struct cgroup_subsys cpuset_cgrp_subsys = { .css_offline = cpuset_css_offline, .css_free = cpuset_css_free, .can_attach = cpuset_can_attach, - .allow_attach = cpuset_allow_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, .post_attach = cpuset_post_attach, .bind = cpuset_bind, + .fork = cpuset_fork, .legacy_cftypes = files, .early_init = 1, }; diff --git a/kernel/events/core.c b/kernel/events/core.c index 424961e5bd806c0b8a01636a7f3ec29b21c7810a..8d2482d77c042324c17d4620c4397b8cfeb0acbc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1554,12 +1554,33 @@ static int __init perf_workqueue_init(void) core_initcall(perf_workqueue_init); -static inline int pmu_filter_match(struct perf_event *event) +static inline int __pmu_filter_match(struct perf_event *event) { struct pmu *pmu = event->pmu; return pmu->filter_match ? pmu->filter_match(event) : 1; } +/* + * Check whether we should attempt to schedule an event group based on + * PMU-specific filtering. An event group can consist of HW and SW events, + * potentially with a SW leader, so we must check all the filters, to + * determine whether a group is schedulable: + */ +static inline int pmu_filter_match(struct perf_event *event) +{ + struct perf_event *child; + + if (!__pmu_filter_match(event)) + return 0; + + list_for_each_entry(child, &event->sibling_list, group_entry) { + if (!__pmu_filter_match(child)) + return 0; + } + + return 1; +} + static inline int event_filter_match(struct perf_event *event) { diff --git a/kernel/fork.c b/kernel/fork.c index a46ce4505066ef3df3a30f2ecdfe85da9d2903c4..7de03658692b1514099dbe8919ecc89f26791348 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -768,6 +768,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm) } EXPORT_SYMBOL(get_mm_exe_file); +/** + * get_task_exe_file - acquire a reference to the task's executable file + * + * Returns %NULL if task's mm (if any) has no associated executable file or + * this is a kernel thread with borrowed mm (see the comment above get_task_mm). + * User must release file via fput(). + */ +struct file *get_task_exe_file(struct task_struct *task) +{ + struct file *exe_file = NULL; + struct mm_struct *mm; + + task_lock(task); + mm = task->mm; + if (mm) { + if (!(task->flags & PF_KTHREAD)) + exe_file = get_mm_exe_file(mm); + } + task_unlock(task); + return exe_file; +} +EXPORT_SYMBOL(get_task_exe_file); + /** * get_task_mm - acquire a reference to the task's mm * @@ -884,14 +907,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm(tsk, mm); /* - * If we're exiting normally, clear a user-space tid field if - * requested. We leave this alone when dying by signal, to leave - * the value intact in a core dump, and to save the unnecessary - * trouble, say, a killed vfork parent shouldn't touch this mm. - * Userland only wants this done for a sys_exit. + * Signal userspace if we're not exiting with a core dump + * because we want to leave the value intact for debugging + * purposes. */ if (tsk->clear_child_tid) { - if (!(tsk->flags & PF_SIGNALED) && + if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index abd286afbd2732544e54a4653053a4d9ce785286..a4775f3451b92cade312a0671e8b45dcf3a1aa0f 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -411,8 +411,29 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, } EXPORT_SYMBOL_GPL(irq_map_generic_chip); +static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq) +{ + struct irq_data *data = irq_domain_get_irq_data(d, virq); + struct irq_domain_chip_generic *dgc = d->gc; + unsigned int hw_irq = data->hwirq; + struct irq_chip_generic *gc; + int irq_idx; + + gc = irq_get_domain_generic_chip(d, hw_irq); + if (!gc) + return; + + irq_idx = hw_irq % dgc->irqs_per_chip; + + clear_bit(irq_idx, &gc->installed); + irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL, + NULL); + +} + struct irq_domain_ops irq_generic_chip_ops = { .map = irq_map_generic_chip, + .unmap = irq_unmap_generic_chip, .xlate = irq_domain_xlate_onetwocell, }; EXPORT_SYMBOL_GPL(irq_generic_chip_ops); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 4b21779d51632e2dd551d5871926d52fdd075adb..cd60090065106d42ed3794908da05deab9b90040 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -298,6 +298,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, ops->msi_finish(&arg, 0); for_each_msi_entry(desc, dev) { + virq = desc->irq; if (desc->nvec_used == 1) dev_dbg(dev, "irq %d for MSI\n", virq); else diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index b70ada0028d251d7171f13b0de7e9e47e1e6086c..6030efd4a188ae0b6ae25998a496eb8ea9f5bb39 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -934,7 +934,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min, return 0; out: vfree(pi->sechdrs); + pi->sechdrs = NULL; + vfree(pi->purgatory_buf); + pi->purgatory_buf = NULL; return ret; } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index b7dd5718836e2b76c0c0700251bae66364e72da8..3124cebaec31e182e3705f4dd94fc7a33b654158 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -299,12 +299,12 @@ static int create_image(int platform_mode) save_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); error = swsusp_arch_suspend(); + /* Restore control flow magically appears here */ + restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) printk(KERN_ERR "PM: Error %d creating hibernation image\n", error); - /* Restore control flow magically appears here */ - restore_processor_state(); if (!in_suspend) events_check_enabled = false; diff --git a/kernel/power/main.c b/kernel/power/main.c index b2dd4d999900a26edd9cd26fb952b84b98ee411f..27946975eff004f210d563e73218f1fe6d0b9a9e 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -280,13 +280,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj, return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA; } -static ssize_t pm_wakeup_irq_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - return -EINVAL; -} -power_attr(pm_wakeup_irq); +power_attr_ro(pm_wakeup_irq); #else /* !CONFIG_PM_SLEEP_DEBUG */ static inline void pm_print_times_init(void) {} @@ -564,14 +558,7 @@ static ssize_t pm_trace_dev_match_show(struct kobject *kobj, return show_trace_dev_match(buf, PAGE_SIZE); } -static ssize_t -pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr, - const char *buf, size_t n) -{ - return -EINVAL; -} - -power_attr(pm_trace_dev_match); +power_attr_ro(pm_trace_dev_match); #endif /* CONFIG_PM_TRACE */ diff --git a/kernel/power/power.h b/kernel/power/power.h index caadb566e82bb51a5348d6ba67a73bda8c99f37d..efe1b3b17c88d0eb793fa84ceec00e5b31604af4 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -77,6 +77,15 @@ static struct kobj_attribute _name##_attr = { \ .store = _name##_store, \ } +#define power_attr_ro(_name) \ +static struct kobj_attribute _name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = S_IRUGO, \ + }, \ + .show = _name##_show, \ +} + /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 3a970604308ff5365f431e8190d1b82f7ac306d1..f155c62f1f2c993952597c4ba0ceb4c27a48c9d1 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -765,9 +765,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) */ static bool rtree_next_node(struct memory_bitmap *bm) { - bm->cur.node = list_entry(bm->cur.node->list.next, - struct rtree_node, list); - if (&bm->cur.node->list != &bm->cur.zone->leaves) { + if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { + bm->cur.node = list_entry(bm->cur.node->list.next, + struct rtree_node, list); bm->cur.node_pfn += BM_BITS_PER_BLOCK; bm->cur.node_bit = 0; touch_softlockup_watchdog(); @@ -775,9 +775,9 @@ static bool rtree_next_node(struct memory_bitmap *bm) } /* No more nodes, goto next zone */ - bm->cur.zone = list_entry(bm->cur.zone->list.next, + if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { + bm->cur.zone = list_entry(bm->cur.zone->list.next, struct mem_zone_bm_rtree, list); - if (&bm->cur.zone->list != &bm->zones) { bm->cur.node = list_entry(bm->cur.zone->leaves.next, struct rtree_node, list); bm->cur.node_pfn = 0; diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 084452e34a125ff24da375e6dce25c1224b46310..bdff5ed57f10a5ef57a015856830471422f3918a 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c @@ -203,8 +203,10 @@ static int __init test_suspend(void) /* RTCs have initialized by now too ... can we use one? */ dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); - if (dev) + if (dev) { rtc = rtc_class_open(dev_name(dev)); + put_device(dev); + } if (!rtc) { printk(warn_no_rtc); return 0; diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 12cd989dadf639c3276ca228fef1431284c862ec..160e1006640d585f417ae37ecab304e407971e67 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -36,6 +36,14 @@ #define HIBERNATE_SIG "S1SUSPEND" +/* + * When reading an {un,}compressed image, we may restore pages in place, + * in which case some architectures need these pages cleaning before they + * can be executed. We don't know which pages these may be, so clean the lot. + */ +static bool clean_pages_on_read; +static bool clean_pages_on_decompress; + /* * The swap map is a data structure used for keeping track of each page * written to a swap partition. It consists of many swap_map_page @@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio) if (bio_data_dir(bio) == WRITE) put_page(page); + else if (clean_pages_on_read) + flush_icache_range((unsigned long)page_address(page), + (unsigned long)page_address(page) + PAGE_SIZE); if (bio->bi_error && !hb->error) hb->error = bio->bi_error; @@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle, hib_init_batch(&hb); + clean_pages_on_read = true; printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", nr_to_read); m = nr_to_read / 10; @@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data) d->unc_len = LZO_UNC_SIZE; d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, d->unc, &d->unc_len); + if (clean_pages_on_decompress) + flush_icache_range((unsigned long)d->unc, + (unsigned long)d->unc + d->unc_len); + atomic_set(&d->stop, 1); wake_up(&d->done); } @@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle, } memset(crc, 0, offsetof(struct crc_data, go)); + clean_pages_on_decompress = true; + /* * Start the decompression threads. */ diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c index 276762f3a46078141117e5810c1afc964e869ec9..d5760c42f042f4accfb65de23784bcaa877d9b00 100644 --- a/kernel/printk/braille.c +++ b/kernel/printk/braille.c @@ -9,10 +9,10 @@ char *_braille_console_setup(char **str, char **brl_options) { - if (!memcmp(*str, "brl,", 4)) { + if (!strncmp(*str, "brl,", 4)) { *brl_options = ""; *str += 4; - } else if (!memcmp(str, "brl=", 4)) { + } else if (!strncmp(*str, "brl=", 4)) { *brl_options = *str + 4; *str = strchr(*brl_options, ','); if (!*str) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 630c19772630cc0c2cac42e2cf751dec002e81e2..32cbe72bf5458b1f3d080541d864846c66eb8931 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2275,6 +2275,7 @@ static int rcu_nocb_kthread(void *arg) cl++; c++; local_bh_enable(); + cond_resched_rcu_qs(); list = next; } trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3fcadbae663d7576f08404fd46a0e7abb9c98370..312ffdad034ab5cc82a147c6235f31a8e86ae112 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2044,6 +2044,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) success = 1; /* we're going to change ->state */ + /* + * Ensure we load p->on_rq _after_ p->state, otherwise it would + * be possible to, falsely, observe p->on_rq == 0 and get stuck + * in smp_cond_load_acquire() below. + * + * sched_ttwu_pending() try_to_wake_up() + * [S] p->on_rq = 1; [L] P->state + * UNLOCK rq->lock -----. + * \ + * +--- RMB + * schedule() / + * LOCK rq->lock -----' + * UNLOCK rq->lock + * + * [task p] + * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq + * + * Pairs with the UNLOCK+LOCK on rq->lock from the + * last wakeup of our task and the schedule that got our task + * current. + */ + smp_rmb(); if (p->on_rq && ttwu_remote(p, wake_flags)) goto stat; @@ -2321,6 +2343,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) __dl_clear_params(p); INIT_LIST_HEAD(&p->rt.run_list); + p->rt.timeout = 0; + p->rt.time_slice = sched_rr_timeslice; + p->rt.on_rq = 0; + p->rt.on_list = 0; #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&p->preempt_notifiers); @@ -3735,7 +3761,7 @@ EXPORT_SYMBOL(default_wake_function); */ void rt_mutex_setprio(struct task_struct *p, int prio) { - int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE; + int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; struct rq *rq; const struct sched_class *prev_class; @@ -3763,11 +3789,15 @@ void rt_mutex_setprio(struct task_struct *p, int prio) trace_sched_pi_setprio(p, prio); oldprio = p->prio; + + if (oldprio == prio) + queue_flag &= ~DEQUEUE_MOVE; + prev_class = p->sched_class; queued = task_on_rq_queued(p); running = task_current(rq, p); if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE); + dequeue_task(rq, p, queue_flag); if (running) put_prev_task(rq, p); @@ -3785,7 +3815,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (!dl_prio(p->normal_prio) || (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; - enqueue_flag |= ENQUEUE_REPLENISH; + queue_flag |= ENQUEUE_REPLENISH; } else p->dl.dl_boosted = 0; p->sched_class = &dl_sched_class; @@ -3793,7 +3823,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (dl_prio(oldprio)) p->dl.dl_boosted = 0; if (oldprio < prio) - enqueue_flag |= ENQUEUE_HEAD; + queue_flag |= ENQUEUE_HEAD; p->sched_class = &rt_sched_class; } else { if (dl_prio(oldprio)) @@ -3808,7 +3838,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (running) p->sched_class->set_curr_task(rq); if (queued) - enqueue_task(rq, p, enqueue_flag); + enqueue_task(rq, p, queue_flag); check_class_changed(rq, p, prev_class, oldprio); out_unlock: @@ -4164,6 +4194,7 @@ static int __sched_setscheduler(struct task_struct *p, const struct sched_class *prev_class; struct rq *rq; int reset_on_fork; + int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); @@ -4346,17 +4377,14 @@ change: * itself. */ new_effective_prio = rt_mutex_get_effective_prio(p, newprio); - if (new_effective_prio == oldprio) { - __setscheduler_params(p, attr); - task_rq_unlock(rq, p, &flags); - return 0; - } + if (new_effective_prio == oldprio) + queue_flags &= ~DEQUEUE_MOVE; } queued = task_on_rq_queued(p); running = task_current(rq, p); if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE); + dequeue_task(rq, p, queue_flags); if (running) put_prev_task(rq, p); @@ -4366,15 +4394,14 @@ change: if (running) p->sched_class->set_curr_task(rq); if (queued) { - int enqueue_flags = ENQUEUE_RESTORE; /* * We enqueue to tail when the priority of a task is * increased (user space view). */ - if (oldprio <= p->prio) - enqueue_flags |= ENQUEUE_HEAD; + if (oldprio < p->prio) + queue_flags |= ENQUEUE_HEAD; - enqueue_task(rq, p, enqueue_flags); + enqueue_task(rq, p, queue_flags); } check_class_changed(rq, p, prev_class, oldprio); @@ -8707,7 +8734,7 @@ void sched_move_task(struct task_struct *tsk) queued = task_on_rq_queued(tsk); if (queued) - dequeue_task(rq, tsk, DEQUEUE_SAVE); + dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE); if (unlikely(running)) put_prev_task(rq, tsk); @@ -8731,7 +8758,7 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->set_curr_task(rq); if (queued) - enqueue_task(rq, tsk, ENQUEUE_RESTORE); + enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); task_rq_unlock(rq, tsk, &flags); } @@ -9523,7 +9550,6 @@ struct cgroup_subsys cpu_cgrp_subsys = { .fork = cpu_cgroup_fork, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, - .allow_attach = subsys_cgroup_allow_attach, .legacy_cftypes = cpu_files, .early_init = 1, }; diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c index f6f9b9b3a4a8367a2753c82b27a373a73b3f3ddd..d751bc2d0d6e50f0549551088913c8f5dcdb77ad 100644 --- a/kernel/sched/cpufreq_sched.c +++ b/kernel/sched/cpufreq_sched.c @@ -289,7 +289,7 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy) pr_debug("%s: throttle threshold = %u [ns]\n", __func__, gd->up_throttle_nsec); - rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr()); + rc = sysfs_create_group(&policy->kobj, get_sysfs_attr()); if (rc) { pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc); goto err; @@ -332,7 +332,7 @@ static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy) put_task_struct(gd->task); } - sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr()); + sysfs_remove_group(&policy->kobj, get_sysfs_attr()); policy->governor_data = NULL; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 00bbd91d67673fb52c8ffaee9d012010b9fc406e..cf55fc2663fb150987571b6fc73ec5a9bdea75d6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6845,17 +6845,19 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre if (new_util < cur_capacity) { if (cpu_rq(i)->nr_running) { - if(prefer_idle) { - // Find a target cpu with lowest - // utilization. + if (prefer_idle) { + /* Find a target cpu with highest + * utilization. + */ if (target_util == 0 || target_util < new_util) { target_cpu = i; target_util = new_util; } } else { - // Find a target cpu with highest - // utilization. + /* Find a target cpu with lowest + * utilization. + */ if (target_util == 0 || target_util > new_util) { target_cpu = i; @@ -8382,7 +8384,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) mcc->cpu = cpu; #ifdef CONFIG_SCHED_DEBUG raw_spin_unlock_irqrestore(&mcc->lock, flags); - pr_info("CPU%d: update max cpu_capacity %lu\n", cpu, capacity); + printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n", + cpu, capacity); goto skip_unlock; #endif } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 7cc74e56fde4067487f19f433f9e018b6fa0be22..c30c48fde7e67582c40e9f56dc0c12a5eaa3d8cb 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -73,4 +73,8 @@ SCHED_FEAT(ATTACH_AGE_LOAD, true) * Energy aware scheduling. Use platform energy model to guide scheduling * decisions optimizing for energy efficiency. */ +#ifdef CONFIG_DEFAULT_USE_ENERGY_AWARE +SCHED_FEAT(ENERGY_AWARE, true) +#else SCHED_FEAT(ENERGY_AWARE, false) +#endif diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b72352bbd75236a3a7c6eab7d3815eba3ff750c0..07b2c63e4983a96e993826af6512aa93b3e6210f 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -441,7 +441,7 @@ static void dequeue_top_rt_rq(struct rt_rq *rt_rq); static inline int on_rt_rq(struct sched_rt_entity *rt_se) { - return !list_empty(&rt_se->run_list); + return rt_se->on_rq; } #ifdef CONFIG_RT_GROUP_SCHED @@ -487,8 +487,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) return rt_se->my_q; } -static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); -static void dequeue_rt_entity(struct sched_rt_entity *rt_se); +static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); +static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { @@ -504,7 +504,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) if (!rt_se) enqueue_top_rt_rq(rt_rq); else if (!on_rt_rq(rt_se)) - enqueue_rt_entity(rt_se, false); + enqueue_rt_entity(rt_se, 0); if (rt_rq->highest_prio.curr < curr->prio) resched_curr(rq); @@ -521,7 +521,7 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) if (!rt_se) dequeue_top_rt_rq(rt_rq); else if (on_rt_rq(rt_se)) - dequeue_rt_entity(rt_se); + dequeue_rt_entity(rt_se, 0); } static inline int rt_rq_throttled(struct rt_rq *rt_rq) @@ -1257,7 +1257,30 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_group(rt_se, rt_rq); } -static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) +/* + * Change rt_se->run_list location unless SAVE && !MOVE + * + * assumes ENQUEUE/DEQUEUE flags match + */ +static inline bool move_entity(unsigned int flags) +{ + if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) + return false; + + return true; +} + +static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) +{ + list_del_init(&rt_se->run_list); + + if (list_empty(array->queue + rt_se_prio(rt_se))) + __clear_bit(rt_se_prio(rt_se), array->bitmap); + + rt_se->on_list = 0; +} + +static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; @@ -1270,26 +1293,37 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) * get throttled and the current group doesn't have any other * active members. */ - if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) + if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { + if (rt_se->on_list) + __delist_rt_entity(rt_se, array); return; + } - if (head) - list_add(&rt_se->run_list, queue); - else - list_add_tail(&rt_se->run_list, queue); - __set_bit(rt_se_prio(rt_se), array->bitmap); + if (move_entity(flags)) { + WARN_ON_ONCE(rt_se->on_list); + if (flags & ENQUEUE_HEAD) + list_add(&rt_se->run_list, queue); + else + list_add_tail(&rt_se->run_list, queue); + + __set_bit(rt_se_prio(rt_se), array->bitmap); + rt_se->on_list = 1; + } + rt_se->on_rq = 1; inc_rt_tasks(rt_se, rt_rq); } -static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) +static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; - list_del_init(&rt_se->run_list); - if (list_empty(array->queue + rt_se_prio(rt_se))) - __clear_bit(rt_se_prio(rt_se), array->bitmap); + if (move_entity(flags)) { + WARN_ON_ONCE(!rt_se->on_list); + __delist_rt_entity(rt_se, array); + } + rt_se->on_rq = 0; dec_rt_tasks(rt_se, rt_rq); } @@ -1298,7 +1332,7 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. */ -static void dequeue_rt_stack(struct sched_rt_entity *rt_se) +static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) { struct sched_rt_entity *back = NULL; @@ -1311,31 +1345,31 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) for (rt_se = back; rt_se; rt_se = rt_se->back) { if (on_rt_rq(rt_se)) - __dequeue_rt_entity(rt_se); + __dequeue_rt_entity(rt_se, flags); } } -static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) +static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) { struct rq *rq = rq_of_rt_se(rt_se); - dequeue_rt_stack(rt_se); + dequeue_rt_stack(rt_se, flags); for_each_sched_rt_entity(rt_se) - __enqueue_rt_entity(rt_se, head); + __enqueue_rt_entity(rt_se, flags); enqueue_top_rt_rq(&rq->rt); } -static void dequeue_rt_entity(struct sched_rt_entity *rt_se) +static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) { struct rq *rq = rq_of_rt_se(rt_se); - dequeue_rt_stack(rt_se); + dequeue_rt_stack(rt_se, flags); for_each_sched_rt_entity(rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) - __enqueue_rt_entity(rt_se, false); + __enqueue_rt_entity(rt_se, flags); } enqueue_top_rt_rq(&rq->rt); } @@ -1351,7 +1385,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; - enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); + enqueue_rt_entity(rt_se, flags); inc_hmp_sched_stats_rt(rq, p); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) @@ -1363,7 +1397,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); - dequeue_rt_entity(rt_se); + dequeue_rt_entity(rt_se, flags); dec_hmp_sched_stats_rt(rq, p); dequeue_pushable_task(rq, p); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 360e298398fb6312f8b68e3cdb258db9bb87e12e..75500042fd326d795a7995b2283fe3ef86832b71 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1941,19 +1941,41 @@ static const u32 prio_to_wmult[40] = { /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; +/* + * {de,en}queue flags: + * + * DEQUEUE_SLEEP - task is no longer runnable + * ENQUEUE_WAKEUP - task just became runnable + * + * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks + * are in a known state which allows modification. Such pairs + * should preserve as much state as possible. + * + * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location + * in the runqueue. + * + * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) + * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) + * ENQUEUE_WAKING - sched_class::task_waking was called + * + */ + +#define DEQUEUE_SLEEP 0x01 +#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ +#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ + #define ENQUEUE_WAKEUP 0x01 -#define ENQUEUE_HEAD 0x02 +#define ENQUEUE_RESTORE 0x02 +#define ENQUEUE_MOVE 0x04 + +#define ENQUEUE_HEAD 0x08 +#define ENQUEUE_REPLENISH 0x10 #ifdef CONFIG_SMP -#define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */ +#define ENQUEUE_WAKING 0x20 #else #define ENQUEUE_WAKING 0x00 #endif -#define ENQUEUE_REPLENISH 0x08 -#define ENQUEUE_RESTORE 0x10 -#define ENQUEUE_WAKEUP_NEW 0x20 - -#define DEQUEUE_SLEEP 0x01 -#define DEQUEUE_SAVE 0x02 +#define ENQUEUE_WAKEUP_NEW 0x40 #define RETRY_TASK ((void *)-1UL) diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index b2ff383d60624a2f82951d4e2d9415e9d6c8f076..b0c5fe6d1f3ba8442433486916a366d9ca3d728b 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -896,7 +896,6 @@ struct cgroup_subsys schedtune_cgrp_subsys = { .cancel_attach = schedtune_cancel_attach, .legacy_cftypes = files, .early_init = 1, - .allow_attach = subsys_cgroup_allow_attach, .attach = schedtune_attach, }; @@ -910,6 +909,7 @@ schedtune_init_cgroups(void) for_each_possible_cpu(cpu) { bg = &per_cpu(cpu_boost_groups, cpu); memset(bg, 0, sizeof(struct boost_groups)); + raw_spin_lock_init(&bg->lock); } pr_info("schedtune: configured to support %d boost groups\n", diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 07b7f84b37e22de0ce8fba89a33645523f865501..2ffb1680b380e542bd767ab3b8d75e87ff0f75e9 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -22,7 +22,6 @@ #include #include #include -#include #include "sched.h" #include "walt.h" @@ -188,10 +187,8 @@ update_window_start(struct rq *rq, u64 wallclock) delta = wallclock - rq->window_start; /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */ if (delta < 0) { - if (arch_timer_read_counter() == 0) - delta = 0; - else - BUG_ON(1); + delta = 0; + WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n"); } if (delta < walt_ravg_window) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ffa85996313cea04f3b1d2e329e9cd337cfe8cbb..51eef8e7df39f3e4445296f0d471ae14e4069409 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2371,6 +2371,21 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, return 0; } +static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp, + int *valp, + int write, void *data) +{ + if (write) { + if (*negp) + return -EINVAL; + *valp = *lvalp; + } else { + unsigned int val = *valp; + *lvalp = (unsigned long)val; + } + return 0; +} + static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, @@ -2498,8 +2513,27 @@ static int do_proc_dointvec(struct ctl_table *table, int write, int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return do_proc_dointvec(table,write,buffer,lenp,ppos, - NULL,NULL); + return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL); +} + +/** + * proc_douintvec - read a vector of unsigned integers + * @table: the sysctl table + * @write: %TRUE if this is a write to the sysctl file + * @buffer: the user buffer + * @lenp: the size of the user buffer + * @ppos: file position + * + * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer + * values from/to the user buffer, treated as an ASCII string. + * + * Returns 0 on success. + */ +int proc_douintvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return do_proc_dointvec(table, write, buffer, lenp, ppos, + do_proc_douintvec_conv, NULL); } /* @@ -3112,6 +3146,12 @@ int proc_dointvec(struct ctl_table *table, int write, return -ENOSYS; } +int proc_douintvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return -ENOSYS; +} + int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -3157,6 +3197,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, * exception granted :-) */ EXPORT_SYMBOL(proc_dointvec); +EXPORT_SYMBOL(proc_douintvec); EXPORT_SYMBOL(proc_dointvec_jiffies); EXPORT_SYMBOL(proc_dointvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 0cdc34ebd8d1434d44b5b9e705900f86ee6dc68d..2af5687b83c9027a6d131ecbfe924e26a3e095cc 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -412,12 +412,10 @@ static int alarmtimer_suspend(struct device *dev) now = rtc_tm_to_ktime(tm); now = ktime_add(now, min); if (poweron_alarm) { - struct rtc_time tm_val; - unsigned long secs; + uint64_t msec = 0; - tm_val = rtc_ktime_to_tm(min); - rtc_tm_to_time(&tm_val, &secs); - lpm_suspend_wake_time(secs); + msec = ktime_to_ms(min); + lpm_suspend_wake_time(msec); } else { /* Set alarm, if in the past reject suspend briefly to handle */ ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 4ff237dbc006162d2d71a18f87a7aa5557b020b2..ede4bf13d3e93649be0a474000607c6a3b898965 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -298,17 +298,34 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; static inline u32 arch_gettimeoffset(void) { return 0; } #endif +static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr, + cycle_t delta) +{ + s64 nsec; + + nsec = delta * tkr->mult + tkr->xtime_nsec; + nsec >>= tkr->shift; + + /* If arch requires, add in get_arch_timeoffset() */ + return nsec + arch_gettimeoffset(); +} + static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) { cycle_t delta; - s64 nsec; delta = timekeeping_get_delta(tkr); + return timekeeping_delta_to_ns(tkr, delta); +} - nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift; +static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, + cycle_t cycles) +{ + cycle_t delta; - /* If arch requires, add in get_arch_timeoffset() */ - return nsec + arch_gettimeoffset(); + /* calculate the delta since the last update_wall_time */ + delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); + return timekeeping_delta_to_ns(tkr, delta); } /** @@ -385,8 +402,11 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) tkr = tkf->base + (seq & 0x01); now = ktime_to_ns(tkr->base); - now += clocksource_delta(tkr->read(tkr->clock), - tkr->cycle_last, tkr->mask); + now += timekeeping_delta_to_ns(tkr, + clocksource_delta( + tkr->read(tkr->clock), + tkr->cycle_last, + tkr->mask)); } while (read_seqcount_retry(&tkf->seq, seq)); return now; @@ -404,6 +424,35 @@ u64 ktime_get_raw_fast_ns(void) } EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); +/** + * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock. + * + * To keep it NMI safe since we're accessing from tracing, we're not using a + * separate timekeeper with updates to monotonic clock and boot offset + * protected with seqlocks. This has the following minor side effects: + * + * (1) Its possible that a timestamp be taken after the boot offset is updated + * but before the timekeeper is updated. If this happens, the new boot offset + * is added to the old timekeeping making the clock appear to update slightly + * earlier: + * CPU 0 CPU 1 + * timekeeping_inject_sleeptime64() + * __timekeeping_inject_sleeptime(tk, delta); + * timestamp(); + * timekeeping_update(tk, TK_CLEAR_NTP...); + * + * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be + * partially updated. Since the tk->offs_boot update is a rare event, this + * should be a rare occurrence which postprocessing should be able to handle. + */ +u64 notrace ktime_get_boot_fast_ns(void) +{ + struct timekeeper *tk = &tk_core.timekeeper; + + return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot)); +} +EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); + /* Suspend-time cycles value for halted fast timekeeper. */ static cycle_t cycles_at_suspend; diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 2acad4b6a92aaae8bf22deca1d8dcf49e7854452..2963266fb7bf0d345028f397911df85847df3055 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -1,4 +1,8 @@ +# We are fully aware of the dangers of __builtin_return_address() +FRAME_CFLAGS := $(call cc-disable-warning,frame-address) +KBUILD_CFLAGS += $(FRAME_CFLAGS) + # Do not instrument the tracer itself: ifdef CONFIG_FUNCTION_TRACER diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 069aa1aa82b602a4807bc386cdc6a4a863018db9..9b15d6eb1622df00f38c31288d38f6d0e738995b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -895,6 +895,7 @@ static struct { { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, + { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; @@ -4832,19 +4833,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, struct trace_iterator *iter = filp->private_data; ssize_t sret; - /* return any leftover data */ - sret = trace_seq_to_user(&iter->seq, ubuf, cnt); - if (sret != -EBUSY) - return sret; - - trace_seq_init(&iter->seq); - /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); + + /* return any leftover data */ + sret = trace_seq_to_user(&iter->seq, ubuf, cnt); + if (sret != -EBUSY) + goto out; + + trace_seq_init(&iter->seq); + if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) @@ -5874,9 +5876,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, return -EBUSY; #endif - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - if (*ppos & (PAGE_SIZE - 1)) return -EINVAL; @@ -5886,6 +5885,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, len &= PAGE_MASK; } + if (splice_grow_spd(pipe, &spd)) + return -ENOMEM; + again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); @@ -5943,19 +5945,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, /* did we read anything? */ if (!spd.nr_pages) { if (ret) - return ret; + goto out; + ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) - return -EAGAIN; + goto out; ret = wait_on_pipe(iter, true); if (ret) - return ret; + goto out; goto again; } ret = splice_to_pipe(pipe, &spd); +out: splice_shrink_spd(&spd); return ret; diff --git a/lib/genalloc.c b/lib/genalloc.c index 116a166b096f06eb64ed288e4d8b694648f854b8..27aa9c629d13408f6b8e849b21ac5a2bfbf46ebd 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -273,7 +273,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) struct gen_pool_chunk *chunk; unsigned long addr = 0; int order = pool->min_alloc_order; - int nbits, start_bit = 0, end_bit, remain; + int nbits, start_bit, end_bit, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -288,6 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) if (size > atomic_read(&chunk->avail)) continue; + start_bit = 0; end_bit = chunk_size(chunk) >> order; retry: start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 75232ad0a5e7ead00e5d8396ed34763d84a0685c..daca582a8ed072f0b4eb8b6871134a608e3192b1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -297,26 +297,6 @@ done: return wanted - bytes; } -/* - * Fault in the first iovec of the given iov_iter, to a maximum length - * of bytes. Returns 0 on success, or non-zero if the memory could not be - * accessed (ie. because it is an invalid address). - * - * writev-intensive code may want this to prefault several iovecs -- that - * would be possible (callers must not rely on the fact that _only_ the - * first iovec will be faulted with the current implementation). - */ -int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) -{ - if (!(i->type & (ITER_BVEC|ITER_KVEC))) { - char __user *buf = i->iov->iov_base + i->iov_offset; - bytes = min(bytes, i->iov->iov_len - i->iov_offset); - return fault_in_pages_readable(buf, bytes); - } - return 0; -} -EXPORT_SYMBOL(iov_iter_fault_in_readable); - /* * Fault in one or more iovecs of the given iov_iter, to a maximum length of * bytes. For each iovec, fault in each page that constitutes the iovec. @@ -324,7 +304,7 @@ EXPORT_SYMBOL(iov_iter_fault_in_readable); * Return 0 on success, or non-zero if the memory could not be accessed (i.e. * because it is an invalid address). */ -int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes) +int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) { size_t skip = i->iov_offset; const struct iovec *iov; @@ -341,7 +321,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes) } return 0; } -EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable); +EXPORT_SYMBOL(iov_iter_fault_in_readable); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 94be244e844103d0fb6ed20c7bee905ca908fa12..d8a5cf66c316fe21eaecee8d973deac1eb3bd5d7 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -321,6 +321,70 @@ int kstrtos8(const char *s, unsigned int base, s8 *res) } EXPORT_SYMBOL(kstrtos8); +/** + * kstrtobool - convert common user inputs into boolean values + * @s: input string + * @res: result + * + * This routine returns 0 iff the first character is one of 'Yy1Nn0', or + * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value + * pointed to by res is updated upon finding a match. + */ +int kstrtobool(const char *s, bool *res) +{ + if (!s) + return -EINVAL; + + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + return 0; + case 'n': + case 'N': + case '0': + *res = false; + return 0; + case 'o': + case 'O': + switch (s[1]) { + case 'n': + case 'N': + *res = true; + return 0; + case 'f': + case 'F': + *res = false; + return 0; + default: + break; + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(kstrtobool); + +/* + * Since "base" would be a nonsense argument, this open-codes the + * _from_user helper instead of using the helper macro below. + */ +int kstrtobool_from_user(const char __user *s, size_t count, bool *res) +{ + /* Longest string needed to differentiate, newline, terminator */ + char buf[4]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return kstrtobool(buf, res); +} +EXPORT_SYMBOL(kstrtobool_from_user); + #define kstrto_from_user(f, g, type) \ int f(const char __user *s, size_t count, unsigned int base, type *res) \ { \ diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index 5464c8744ea95647beeef44efc3722161f5b224b..e24388a863a76ff47afc2a7fc3680bc6ab6e8f4e 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) if (!esize) { /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 * depending on if MOD equals 1. */ - rp[0] = 1; res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; + if (res->nlimbs) { + if (mpi_resize(res, 1) < 0) + goto enomem; + rp = res->d; + rp[0] = 1; + } res->sign = 0; goto leave; } diff --git a/lib/string.c b/lib/string.c index 0323c0d5629af31e674bcfa24624e0917e2bb80d..1a90db9bc6e1b474099ff9f967efd74101451596 100644 --- a/lib/string.c +++ b/lib/string.c @@ -630,35 +630,6 @@ bool sysfs_streq(const char *s1, const char *s2) } EXPORT_SYMBOL(sysfs_streq); -/** - * strtobool - convert common user inputs into boolean values - * @s: input string - * @res: result - * - * This routine returns 0 iff the first character is one of 'Yy1Nn0'. - * Otherwise it will return -EINVAL. Value pointed to by res is - * updated upon finding a match. - */ -int strtobool(const char *s, bool *res) -{ - switch (s[0]) { - case 'y': - case 'Y': - case '1': - *res = true; - break; - case 'n': - case 'N': - case '0': - *res = false; - break; - default: - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL(strtobool); - #ifndef __HAVE_ARCH_MEMSET /** * memset - Fill a region of memory with the given value diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 06ebe4efa1f2e28b3805765cd9001716233b6319..b4aacfc0b4b33d38d9a109b1149af2a11f208fd6 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -126,6 +127,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) unsigned long max = max_addr - src_addr; long retval; + check_object_size(dst, count, false); user_access_begin(); retval = do_strncpy_from_user(dst, src, count, max); user_access_end(); diff --git a/mm/cma.c b/mm/cma.c index b55caef2454a2c8152e29dfc2d541ac61e219a61..20e661a501a6fd8299aec2b1d9fffeb015d2fe26 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -188,7 +188,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, return -EINVAL; /* ensure minimal alignment required by mm core */ - alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + alignment = PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); /* alignment should be aligned with order_per_bit */ if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) @@ -271,8 +272,8 @@ int __init cma_declare_contiguous(phys_addr_t base, * migratetype page by page allocator's buddy algorithm. In the case, * you couldn't get a contiguous memory, which is not what we want. */ - alignment = max(alignment, - (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); diff --git a/mm/filemap.c b/mm/filemap.c index a322c035c6b9567e3f18591c217649cd7105f164..c46678d7d041939bc68d2f1437f1a89d2a086dc7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -109,6 +109,48 @@ * ->tasklist_lock (memory_failure, collect_procs_ao) */ +static int page_cache_tree_insert(struct address_space *mapping, + struct page *page, void **shadowp) +{ + struct radix_tree_node *node; + void **slot; + int error; + + error = __radix_tree_create(&mapping->page_tree, page->index, + &node, &slot); + if (error) + return error; + if (*slot) { + void *p; + + p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); + if (!radix_tree_exceptional_entry(p)) + return -EEXIST; + if (shadowp) + *shadowp = p; + mapping->nrshadows--; + if (node) + workingset_node_shadows_dec(node); + } + radix_tree_replace_slot(slot, page); + mapping->nrpages++; + if (node) { + workingset_node_pages_inc(node); + /* + * Don't track node that contains actual pages. + * + * Avoid acquiring the list_lru lock if already + * untracked. The list_empty() test is safe as + * node->private_list is protected by + * mapping->tree_lock. + */ + if (!list_empty(&node->private_list)) + list_lru_del(&workingset_shadow_nodes, + &node->private_list); + } + return 0; +} + static void page_cache_tree_delete(struct address_space *mapping, struct page *page, void *shadow) { @@ -122,6 +164,14 @@ static void page_cache_tree_delete(struct address_space *mapping, __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); + if (!node) { + /* + * We need a node to properly account shadow + * entries. Don't plant any without. XXX + */ + shadow = NULL; + } + if (shadow) { mapping->nrshadows++; /* @@ -540,9 +590,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) memcg = mem_cgroup_begin_page_stat(old); spin_lock_irqsave(&mapping->tree_lock, flags); __delete_from_page_cache(old, NULL, memcg); - error = radix_tree_insert(&mapping->page_tree, offset, new); + error = page_cache_tree_insert(mapping, new, NULL); BUG_ON(error); - mapping->nrpages++; /* * hugetlb pages do not participate in page cache accounting. @@ -564,48 +613,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(replace_page_cache_page); -static int page_cache_tree_insert(struct address_space *mapping, - struct page *page, void **shadowp) -{ - struct radix_tree_node *node; - void **slot; - int error; - - error = __radix_tree_create(&mapping->page_tree, page->index, - &node, &slot); - if (error) - return error; - if (*slot) { - void *p; - - p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); - if (!radix_tree_exceptional_entry(p)) - return -EEXIST; - if (shadowp) - *shadowp = p; - mapping->nrshadows--; - if (node) - workingset_node_shadows_dec(node); - } - radix_tree_replace_slot(slot, page); - mapping->nrpages++; - if (node) { - workingset_node_pages_inc(node); - /* - * Don't track node that contains actual pages. - * - * Avoid acquiring the list_lru lock if already - * untracked. The list_empty() test is safe as - * node->private_list is protected by - * mapping->tree_lock. - */ - if (!list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, - &node->private_list); - } - return 0; -} - static int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 125c7dd553228fa7b3a66a8b00495b7960ace306..4434cdd4cd9a6af734a32ff3cae884e9146cd90e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1416,12 +1416,13 @@ static void dissolve_free_huge_page(struct page *page) { spin_lock(&hugetlb_lock); if (PageHuge(page) && !page_count(page)) { - struct hstate *h = page_hstate(page); - int nid = page_to_nid(page); - list_del(&page->lru); + struct page *head = compound_head(page); + struct hstate *h = page_hstate(head); + int nid = page_to_nid(head); + list_del(&head->lru); h->free_huge_pages--; h->free_huge_pages_node[nid]--; - update_and_free_page(h, page); + update_and_free_page(h, head); } spin_unlock(&hugetlb_lock); } @@ -1429,7 +1430,8 @@ static void dissolve_free_huge_page(struct page *page) /* * Dissolve free hugepages in a given pfn range. Used by memory hotplug to * make specified memory blocks removable from the system. - * Note that start_pfn should aligned with (minimum) hugepage size. + * Note that this will dissolve a free gigantic hugepage completely, if any + * part of it lies within the given range. */ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) { @@ -1438,7 +1440,6 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) if (!hugepages_supported()) return; - VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order)); for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) dissolve_free_huge_page(pfn_to_page(pfn)); } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 4f6c62e5c21edd2d800484b2f18025e45ce034fb..37ff0ab6a8ff7f1cb87ce1f6eea13de34538d05b 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -52,6 +52,9 @@ struct kasan_global { #if KASAN_ABI_VERSION >= 4 struct kasan_source_location *location; #endif +#if KASAN_ABI_VERSION >= 5 + char *odr_indicator; +#endif }; static inline const void *kasan_shadow_to_mem(const void *shadow_addr) diff --git a/mm/ksm.c b/mm/ksm.c index 7b59d9723adbbaaea46306b1925e898ebf7deeab..25cc13c1fe208b381e58ba7a961a5c0c8a84d3f4 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -301,7 +301,8 @@ static inline struct rmap_item *alloc_rmap_item(void) { struct rmap_item *rmap_item; - rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); + rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | + __GFP_NORETRY | __GFP_NOWARN); if (rmap_item) ksm_rmap_items++; return rmap_item; diff --git a/mm/list_lru.c b/mm/list_lru.c index afc71ea9a381f853faf65a05edc7f411a378d7e5..5d8dffd5b57c83575b2daf4b9ded58b63c5c6f6e 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, err = memcg_init_list_lru(lru, memcg_aware); if (err) { kfree(lru->node); + /* Do this so a list_lru_destroy() doesn't crash: */ + lru->node = NULL; goto out; } diff --git a/mm/memblock.c b/mm/memblock.c index 7f0a860a357ededc7b50664b3f89bac5bdb696a5..89e74fa822901110b5856d285991af75ce7af041 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -827,6 +827,17 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); } +/** + * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. + * @base: the base phys addr of the region + * @size: the size of the region + * + * Return 0 on success, -errno on failure. + */ +int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) +{ + return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); +} /** * __next_reserved_mem_region - next function for for_each_reserved_region() @@ -918,6 +929,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) continue; + /* skip nomap memory unless we were asked for it explicitly */ + if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) + continue; + if (!type_b) { if (out_start) *out_start = m_start; @@ -1027,6 +1042,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) continue; + /* skip nomap memory unless we were asked for it explicitly */ + if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) + continue; + if (!type_b) { if (out_start) *out_start = m_start; @@ -1538,6 +1557,15 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) return memblock_search(&memblock.memory, addr) != -1; } +int __init_memblock memblock_is_map_memory(phys_addr_t addr) +{ + int i = memblock_search(&memblock.memory, addr); + + if (i == -1) + return false; + return !memblock_is_nomap(&memblock.memory.regions[i]); +} + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int __init_memblock memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b62b33a3cfec78d190866a5b3c90523fb2c11d6c..5d9c8a3136bc0404e086487b7b4a87a102a2fe2c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2055,6 +2055,15 @@ retry: current->flags & PF_EXITING)) goto force; + /* + * Prevent unbounded recursion when reclaim operations need to + * allocate memory. This might exceed the limits temporarily, + * but we prefer facilitating memory reclaim and getting back + * under the limit over triggering OOM kills in these cases. + */ + if (unlikely(current->flags & PF_MEMALLOC)) + goto force; + if (unlikely(task_in_memcg_oom(current))) goto nomem; @@ -4972,11 +4981,6 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) return ret; } -static int mem_cgroup_allow_attach(struct cgroup_taskset *tset) -{ - return subsys_cgroup_allow_attach(tset); -} - static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { if (mc.to) @@ -5131,10 +5135,6 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; } -static int mem_cgroup_allow_attach(struct cgroup_taskset *tset) -{ - return 0; -} static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { } @@ -5352,7 +5352,6 @@ struct cgroup_subsys memory_cgrp_subsys = { .css_reset = mem_cgroup_css_reset, .can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, - .allow_attach = mem_cgroup_allow_attach, .post_attach = mem_cgroup_move_task, .bind = mem_cgroup_bind, .dfl_cftypes = memory_files, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6b4fa64a5c9162bdbbf35e0af90eecd824da273f..15cb026ef807b7e74ef690796e5c88b4926d8b66 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) unsigned long nr_pages; nr_pages = zone_page_state(zone, NR_FREE_PAGES); - nr_pages -= min(nr_pages, zone->dirty_balance_reserve); + /* + * Pages reserved for the kernel should not be considered + * dirtyable, to prevent a situation where reclaim has to + * clean pages in order to balance the zones. + */ + nr_pages -= min(nr_pages, zone->totalreserve_pages); nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); @@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void) unsigned long x; x = global_page_state(NR_FREE_PAGES); - x -= min(x, dirty_balance_reserve); + /* + * Pages reserved for the kernel should not be considered + * dirtyable, to prevent a situation where reclaim has to + * clean pages in order to balance the zones. + */ + x -= min(x, totalreserve_pages); x += global_page_state(NR_INACTIVE_FILE); x += global_page_state(NR_ACTIVE_FILE); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index af5901c65ba814e8b3ee29a17ad8ba210fa090d6..3048c5dbda16b8446e36df2190436642517d29fc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; -/* - * When calculating the number of globally allowed dirty pages, there - * is a certain number of per-zone reserves that should not be - * considered dirtyable memory. This is the sum of those reserves - * over all existing zones that contribute dirtyable memory. - */ -unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; @@ -6090,20 +6083,12 @@ static void calculate_totalreserve_pages(void) if (max > zone->managed_pages) max = zone->managed_pages; + + zone->totalreserve_pages = max; + reserve_pages += max; - /* - * Lowmem reserves are not available to - * GFP_HIGHUSER page cache allocations and - * kswapd tries to balance zones to their high - * watermark. As a result, neither should be - * regarded as dirtyable memory, to prevent a - * situation where reclaim has to clean pages - * in order to balance the zones. - */ - zone->dirty_balance_reserve = max; } } - dirty_balance_reserve = reserve_pages; totalreserve_pages = reserve_pages; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 3ae1225084c94f7006f628c81b346c739f4a3a00..45656508512b72eb1e5790918a32e137ceaa05de 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2268,6 +2268,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p, swab32s(&swap_header->info.version); swab32s(&swap_header->info.last_page); swab32s(&swap_header->info.nr_badpages); + if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) + return 0; for (i = 0; i < swap_header->info.nr_badpages; i++) swab32s(&swap_header->info.badpages[i]); } diff --git a/mm/usercopy.c b/mm/usercopy.c index 089328f2b9209ea3526c6caed032b95adad85d0b..b34996a3860b0bad6676f26eeb38a7469cc63de4 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include diff --git a/mm/vmscan.c b/mm/vmscan.c index 6801adacadb0b5a37af1162feb6ba9c2df918cd0..d82765ba44f43a104c7aa4c483460f039339d65f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2265,23 +2265,6 @@ out: } } -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH -static void init_tlb_ubc(void) -{ - /* - * This deliberately does not clear the cpumask as it's expensive - * and unnecessary. If there happens to be data in there then the - * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and - * then will be cleared. - */ - current->tlb_ubc.flush_required = false; -} -#else -static inline void init_tlb_ubc(void) -{ -} -#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ - /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ @@ -2316,8 +2299,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness, scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && sc->priority == DEF_PRIORITY); - init_tlb_ubc(); - blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { @@ -3043,7 +3024,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, sc.may_writepage, sc.gfp_mask); + current->flags |= PF_MEMALLOC; nr_reclaimed = do_try_to_free_pages(zonelist, &sc); + current->flags &= ~PF_MEMALLOC; trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); diff --git a/mm/workingset.c b/mm/workingset.c index aa017133744b227bed7592ea6cc32f360c3e142c..df66f426fdcf523c4b0d6ccd9a63c2e4db823f17 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -341,21 +341,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ - - BUG_ON(!node->count); - BUG_ON(node->count & RADIX_TREE_COUNT_MASK); + BUG_ON(!workingset_node_shadows(node)); + BUG_ON(workingset_node_pages(node)); for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { if (node->slots[i]) { BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); node->slots[i] = NULL; - BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); - node->count -= 1U << RADIX_TREE_COUNT_SHIFT; + workingset_node_shadows_dec(node); BUG_ON(!mapping->nrshadows); mapping->nrshadows--; } } - BUG_ON(node->count); + BUG_ON(workingset_node_shadows(node)); inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); if (!__radix_tree_delete_node(&mapping->page_tree, node)) BUG(); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index d2cd9de4b7241dcc8c1df5fd7832b3b317799923..ad8d6e6b87cab9c2c03c9e317513880d1b755b95 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 17851d3aaf224626dc758a2b9b161e1a847444f2..6282f021ddfb68fd2a4ead55431b3e79b1f96192 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -197,18 +197,12 @@ static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node) { struct hlist_node *node_tmp; struct batadv_neigh_ifinfo *neigh_ifinfo; - struct batadv_algo_ops *bao; - - bao = neigh_node->orig_node->bat_priv->bat_algo_ops; hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, &neigh_node->ifinfo_list, list) { batadv_neigh_ifinfo_free_ref(neigh_ifinfo); } - if (bao->bat_neigh_free) - bao->bat_neigh_free(neigh_node); - batadv_hardif_free_ref(neigh_node->if_incoming); kfree_rcu(neigh_node, rcu); diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index d260efd70499bd62ceb7b86b57684f45a96612e9..cbd347c2e4a581c36405cb0b258ef0ad4ecb3e1e 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1136,8 +1136,6 @@ struct batadv_forw_packet { * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or better * than neigh2 for their respective outgoing interface from the metric * prospective - * @bat_neigh_free: free the resources allocated by the routing algorithm for a - * neigh_node object * @bat_orig_print: print the originator table (optional) * @bat_orig_free: free the resources allocated by the routing algorithm for an * orig_node object @@ -1165,7 +1163,6 @@ struct batadv_algo_ops { struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2); - void (*bat_neigh_free)(struct batadv_neigh_node *neigh); /* orig_node handling API */ void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq, struct batadv_hard_iface *hard_iface); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7173a685309ac921a6b17eb92b637b6f51074214..d80c15d028fe2a65e0a61d0d2fad447ecad13102 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -951,13 +951,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query) mod_timer(&query->timer, jiffies); } -void br_multicast_enable_port(struct net_bridge_port *port) +static void __br_multicast_enable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; - spin_lock(&br->multicast_lock); if (br->multicast_disabled || !netif_running(br->dev)) - goto out; + return; br_multicast_enable(&port->ip4_own_query); #if IS_ENABLED(CONFIG_IPV6) @@ -965,8 +964,14 @@ void br_multicast_enable_port(struct net_bridge_port *port) #endif if (port->multicast_router == 2 && hlist_unhashed(&port->rlist)) br_multicast_add_router(br, port); +} -out: +void br_multicast_enable_port(struct net_bridge_port *port) +{ + struct net_bridge *br = port->br; + + spin_lock(&br->multicast_lock); + __br_multicast_enable_port(port); spin_unlock(&br->multicast_lock); } @@ -1113,7 +1118,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, } else { err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, vid); - if (!err) + if (err) break; } } @@ -1905,8 +1910,9 @@ static void br_multicast_start_querier(struct net_bridge *br, int br_multicast_toggle(struct net_bridge *br, unsigned long val) { - int err = 0; struct net_bridge_mdb_htable *mdb; + struct net_bridge_port *port; + int err = 0; spin_lock_bh(&br->multicast_lock); if (br->multicast_disabled == !val) @@ -1934,10 +1940,9 @@ rollback: goto rollback; } - br_multicast_start_querier(br, &br->ip4_own_query); -#if IS_ENABLED(CONFIG_IPV6) - br_multicast_start_querier(br, &br->ip6_own_query); -#endif + br_multicast_open(br); + list_for_each_entry(port, &br->port_list, list) + __br_multicast_enable_port(port); unlock: spin_unlock_bh(&br->multicast_lock); diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index f6c3b2137eeaacdc5cf38d1eac03e018d8f2d37b..59ce1fcc220ce0a71fb57733be7cc91e6b8ac7fc 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -286,7 +286,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len) else skb_trim(skb, len); - return cfpkt_getlen(pkt); + return cfpkt_getlen(pkt); } /* Need to expand SKB */ diff --git a/net/can/bcm.c b/net/can/bcm.c index 6863310d6973ba616323f33a5d57a04a583a5430..8ef1afacad828600cc52bbc605d857a6d765f05d 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); + int ret = 0; if (len < sizeof(*addr)) return -EINVAL; - if (bo->bound) - return -EISCONN; + lock_sock(sk); + + if (bo->bound) { + ret = -EISCONN; + goto fail; + } /* bind a device to this socket */ if (addr->can_ifindex) { struct net_device *dev; dev = dev_get_by_index(&init_net, addr->can_ifindex); - if (!dev) - return -ENODEV; - + if (!dev) { + ret = -ENODEV; + goto fail; + } if (dev->type != ARPHRD_CAN) { dev_put(dev); - return -ENODEV; + ret = -ENODEV; + goto fail; } bo->ifindex = dev->ifindex; @@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, bo->ifindex = 0; } - bo->bound = 1; - if (proc_dir) { /* unique socket address as filename */ sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_data(bo->procname, 0644, proc_dir, &bcm_proc_fops, sk); + if (!bo->bcm_proc_read) { + ret = -ENOMEM; + goto fail; + } } - return 0; + bo->bound = 1; + +fail: + release_sock(sk); + + return ret; } static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, diff --git a/net/core/dev.c b/net/core/dev.c index a4c647893e52542d483a2274eb7f67db692c023c..ea08b75ad6952406a2e27bf58440a49d48cefd47 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2464,7 +2464,7 @@ int skb_checksum_help(struct sk_buff *skb) goto out; } - *(__sum16 *)(skb->data + offset) = csum_fold(csum); + *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: @@ -2842,6 +2842,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d } return head; } +EXPORT_SYMBOL_GPL(validate_xmit_skb_list); static void qdisc_pkt_len_init(struct sk_buff *skb) { @@ -3727,6 +3728,22 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, return skb; } +/** + * netdev_is_rx_handler_busy - check if receive handler is registered + * @dev: device to check + * + * Check if a receive handler is already registered for a given device. + * Return true if there one. + * + * The caller must hold the rtnl_mutex. + */ +bool netdev_is_rx_handler_busy(struct net_device *dev) +{ + ASSERT_RTNL(); + return dev && rtnl_dereference(dev->rx_handler); +} +EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); + /** * netdev_rx_handler_register - register receive handler * @dev: device to register a handler for @@ -4231,7 +4248,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; - NAPI_GRO_CB(skb)->udp_mark = 0; + NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->recursion_counter = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ @@ -5219,6 +5237,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, static int __netdev_adjacent_dev_insert(struct net_device *dev, struct net_device *adj_dev, + u16 ref_nr, struct list_head *dev_list, void *private, bool master) { @@ -5228,7 +5247,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, adj = __netdev_find_adj(adj_dev, dev_list); if (adj) { - adj->ref_nr++; + adj->ref_nr += ref_nr; return 0; } @@ -5238,7 +5257,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, adj->dev = adj_dev; adj->master = master; - adj->ref_nr = 1; + adj->ref_nr = ref_nr; adj->private = private; dev_hold(adj_dev); @@ -5277,6 +5296,7 @@ free_adj: static void __netdev_adjacent_dev_remove(struct net_device *dev, struct net_device *adj_dev, + u16 ref_nr, struct list_head *dev_list) { struct netdev_adjacent *adj; @@ -5289,10 +5309,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, BUG(); } - if (adj->ref_nr > 1) { - pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name, - adj->ref_nr-1); - adj->ref_nr--; + if (adj->ref_nr > ref_nr) { + pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name, + ref_nr, adj->ref_nr-ref_nr); + adj->ref_nr -= ref_nr; return; } @@ -5311,21 +5331,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, static int __netdev_adjacent_dev_link_lists(struct net_device *dev, struct net_device *upper_dev, + u16 ref_nr, struct list_head *up_list, struct list_head *down_list, void *private, bool master) { int ret; - ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, - master); + ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list, + private, master); if (ret) return ret; - ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, - false); + ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list, + private, false); if (ret) { - __netdev_adjacent_dev_remove(dev, upper_dev, up_list); + __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); return ret; } @@ -5333,9 +5354,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev, } static int __netdev_adjacent_dev_link(struct net_device *dev, - struct net_device *upper_dev) + struct net_device *upper_dev, + u16 ref_nr) { - return __netdev_adjacent_dev_link_lists(dev, upper_dev, + return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr, &dev->all_adj_list.upper, &upper_dev->all_adj_list.lower, NULL, false); @@ -5343,17 +5365,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev, static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, struct net_device *upper_dev, + u16 ref_nr, struct list_head *up_list, struct list_head *down_list) { - __netdev_adjacent_dev_remove(dev, upper_dev, up_list); - __netdev_adjacent_dev_remove(upper_dev, dev, down_list); + __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); + __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); } static void __netdev_adjacent_dev_unlink(struct net_device *dev, - struct net_device *upper_dev) + struct net_device *upper_dev, + u16 ref_nr) { - __netdev_adjacent_dev_unlink_lists(dev, upper_dev, + __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr, &dev->all_adj_list.upper, &upper_dev->all_adj_list.lower); } @@ -5362,17 +5386,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, struct net_device *upper_dev, void *private, bool master) { - int ret = __netdev_adjacent_dev_link(dev, upper_dev); + int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1); if (ret) return ret; - ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, + ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1, &dev->adj_list.upper, &upper_dev->adj_list.lower, private, master); if (ret) { - __netdev_adjacent_dev_unlink(dev, upper_dev); + __netdev_adjacent_dev_unlink(dev, upper_dev, 1); return ret; } @@ -5382,8 +5406,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, struct net_device *upper_dev) { - __netdev_adjacent_dev_unlink(dev, upper_dev); - __netdev_adjacent_dev_unlink_lists(dev, upper_dev, + __netdev_adjacent_dev_unlink(dev, upper_dev, 1); + __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, &dev->adj_list.upper, &upper_dev->adj_list.lower); } @@ -5435,7 +5459,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { pr_debug("Interlinking %s with %s, non-neighbour\n", i->dev->name, j->dev->name); - ret = __netdev_adjacent_dev_link(i->dev, j->dev); + ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr); if (ret) goto rollback_mesh; } @@ -5445,7 +5469,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { pr_debug("linking %s's upper device %s with %s\n", upper_dev->name, i->dev->name, dev->name); - ret = __netdev_adjacent_dev_link(dev, i->dev); + ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr); if (ret) goto rollback_upper_mesh; } @@ -5454,7 +5478,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, list_for_each_entry(i, &dev->all_adj_list.lower, list) { pr_debug("linking %s's lower device %s with %s\n", dev->name, i->dev->name, upper_dev->name); - ret = __netdev_adjacent_dev_link(i->dev, upper_dev); + ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr); if (ret) goto rollback_lower_mesh; } @@ -5468,7 +5492,7 @@ rollback_lower_mesh: list_for_each_entry(i, &dev->all_adj_list.lower, list) { if (i == to_i) break; - __netdev_adjacent_dev_unlink(i->dev, upper_dev); + __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr); } i = NULL; @@ -5478,7 +5502,7 @@ rollback_upper_mesh: list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { if (i == to_i) break; - __netdev_adjacent_dev_unlink(dev, i->dev); + __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); } i = j = NULL; @@ -5490,7 +5514,7 @@ rollback_mesh: list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { if (i == to_i && j == to_j) break; - __netdev_adjacent_dev_unlink(i->dev, j->dev); + __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr); } if (i == to_i) break; @@ -5574,16 +5598,16 @@ void netdev_upper_dev_unlink(struct net_device *dev, */ list_for_each_entry(i, &dev->all_adj_list.lower, list) list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) - __netdev_adjacent_dev_unlink(i->dev, j->dev); + __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr); /* remove also the devices itself from lower/upper device * list */ list_for_each_entry(i, &dev->all_adj_list.lower, list) - __netdev_adjacent_dev_unlink(i->dev, upper_dev); + __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr); list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) - __netdev_adjacent_dev_unlink(dev, i->dev); + __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); diff --git a/net/core/filter.c b/net/core/filter.c index eedb05468fcbf4dfca9ce2cbbc44937858df0fec..e94355452166c8f9864a153c2f552ff37c9c756b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -85,7 +85,6 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) filter = rcu_dereference(sk->sk_filter); if (filter) { unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); - err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock(); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 4509cba1d0e6235d15c8a59c05ca79ea52f14b91..5ec5502d73607cc1897e3eb9139ac2867eebe4fd 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -132,7 +132,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_keyid *key_keyid; u8 ip_proto = 0; - bool ret = false; + bool ret; if (!data) { data = skb->data; @@ -527,12 +527,17 @@ ip_proto_again: out_good: ret = true; -out_bad: + key_control->thoff = (u16)nhoff; +out: key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; - key_control->thoff = (u16)nhoff; return ret; + +out_bad: + ret = false; + key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); + goto out; } EXPORT_SYMBOL(__skb_flow_dissect); @@ -979,4 +984,4 @@ static int __init init_default_flow_dissectors(void) return 0; } -late_initcall_sync(init_default_flow_dissectors); +core_initcall(init_default_flow_dissectors); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 2c2eb1b629b11d6911abfa885549e32d927815a6..2e9a1c2818c7ab2d378cd112e70cb27ba9f369a3 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -217,6 +217,8 @@ int peernet2id_alloc(struct net *net, struct net *peer) bool alloc; int id; + if (atomic_read(&net->count) == 0) + return NETNSA_NSID_NOT_ASSIGNED; spin_lock_irqsave(&net->nsid_lock, flags); alloc = atomic_read(&peer->count) == 0 ? false : true; id = __peernet2id_alloc(net, peer, &alloc); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 4da4d51a2ccfd6e8b8833c35cc0604991536ed68..b6327601f97974b7f7d1ccf31c3f0cfb9306d4fb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -215,8 +215,8 @@ #define M_NETIF_RECEIVE 1 /* Inject packets into stack */ /* If lock -- protects updating of if_list */ -#define if_lock(t) spin_lock(&(t->if_lock)); -#define if_unlock(t) spin_unlock(&(t->if_lock)); +#define if_lock(t) mutex_lock(&(t->if_lock)); +#define if_unlock(t) mutex_unlock(&(t->if_lock)); /* Used to help with determining the pkts on receive */ #define PKTGEN_MAGIC 0xbe9be955 @@ -422,7 +422,7 @@ struct pktgen_net { }; struct pktgen_thread { - spinlock_t if_lock; /* for list of devices */ + struct mutex if_lock; /* for list of devices */ struct list_head if_list; /* All device here */ struct list_head th_list; struct task_struct *tsk; @@ -2002,11 +2002,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d { struct pktgen_thread *t; + mutex_lock(&pktgen_thread_lock); + list_for_each_entry(t, &pn->pktgen_threads, th_list) { struct pktgen_dev *pkt_dev; - rcu_read_lock(); - list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { + if_lock(t); + list_for_each_entry(pkt_dev, &t->if_list, list) { if (pkt_dev->odev != dev) continue; @@ -2021,8 +2023,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d dev->name); break; } - rcu_read_unlock(); + if_unlock(t); } + mutex_unlock(&pktgen_thread_lock); } static int pktgen_device_event(struct notifier_block *unused, @@ -2278,7 +2281,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) { - pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev); + pkt_dev->pkt_overhead = 0; pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); @@ -2769,13 +2772,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, } static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, - struct pktgen_dev *pkt_dev, - unsigned int extralen) + struct pktgen_dev *pkt_dev) { + unsigned int extralen = LL_RESERVED_SPACE(dev); struct sk_buff *skb = NULL; - unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen + - pkt_dev->pkt_overhead; + unsigned int size; + size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead; if (pkt_dev->flags & F_NODE) { int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); @@ -2788,8 +2791,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); } + /* the caller pre-fetches from skb->data and reserves for the mac hdr */ if (likely(skb)) - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, extralen - 16); return skb; } @@ -2822,16 +2826,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, mod_cur_headers(pkt_dev); queue_map = pkt_dev->cur_queue_map; - datalen = (odev->hard_header_len + 16) & ~0xf; - - skb = pktgen_alloc_skb(odev, pkt_dev, datalen); + skb = pktgen_alloc_skb(odev, pkt_dev); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; } prefetchw(skb->data); - skb_reserve(skb, datalen); + skb_reserve(skb, 16); /* Reserve for ethernet and IP header */ eth = (__u8 *) skb_push(skb, 14); @@ -2951,7 +2953,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, mod_cur_headers(pkt_dev); queue_map = pkt_dev->cur_queue_map; - skb = pktgen_alloc_skb(odev, pkt_dev, 16); + skb = pktgen_alloc_skb(odev, pkt_dev); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -3727,7 +3729,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) return -ENOMEM; } - spin_lock_init(&t->if_lock); + mutex_init(&t->if_lock); t->cpu = cpu; INIT_LIST_HEAD(&t->if_list); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 87b91ffbdec313a7c599c3892325ddf6b2b50262..b94e165a4f7911a5591c39380bba5b7c6a4bda57 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2600,7 +2600,10 @@ nla_put_failure: static inline size_t rtnl_fdb_nlmsg_size(void) { - return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); + return NLMSG_ALIGN(sizeof(struct ndmsg)) + + nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ + nla_total_size(sizeof(u16)) + /* NDA_VLAN */ + 0; } static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type) diff --git a/net/core/sock.c b/net/core/sock.c index fed6f3462c955cc6ff3f205270be1476d9670bea..5ba8a6795cd029c4c1f2101be4e5a9cabf889d2f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -745,7 +745,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, val = min_t(u32, val, sysctl_wmem_max); set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); /* Wake up sending tasks if we upped the value. */ sk->sk_write_space(sk); break; @@ -781,7 +781,7 @@ set_rcvbuf: * returning the value we actually used in getsockopt * is the most desirable behavior. */ - sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); break; case SO_RCVBUFFORCE: @@ -1574,6 +1574,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) } newsk->sk_err = 0; + newsk->sk_err_soft = 0; newsk->sk_priority = 0; newsk->sk_incoming_cpu = raw_smp_processor_id(); atomic64_set(&newsk->sk_cookie, 0); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 8be8f27bfaccdddf0dd1949d0dfd59ee63ad8ebc..0759f5b9180e56056d2a365c58ef7aa49a7567e5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (struct iphdr *)skb->data; const u8 offset = iph->ihl << 2; - const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); + const struct dccp_hdr *dh; struct dccp_sock *dp; struct inet_sock *inet; const int type = icmp_hdr(skb)->type; @@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) int err; struct net *net = dev_net(skb->dev); - if (skb->len < offset + sizeof(*dh) || - skb->len < offset + __dccp_basic_hdr_len(dh)) { - ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); - return; - } + /* Only need dccph_dport & dccph_sport which are the first + * 4 bytes in dccp header. + * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. + */ + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); + dh = (struct dccp_hdr *)(skb->data + offset); sk = __inet_lookup_established(net, &dccp_hashinfo, iph->daddr, dh->dccph_dport, @@ -696,6 +698,7 @@ int dccp_invalid_packet(struct sk_buff *skb) { const struct dccp_hdr *dh; unsigned int cscov; + u8 dccph_doff; if (skb->pkt_type != PACKET_HOST) return 1; @@ -717,18 +720,19 @@ int dccp_invalid_packet(struct sk_buff *skb) /* * If P.Data Offset is too small for packet type, drop packet and return */ - if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { - DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); + dccph_doff = dh->dccph_doff; + if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { + DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff); return 1; } /* * If P.Data Offset is too too large for packet, drop packet and return */ - if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { - DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); + if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) { + DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff); return 1; } - + dh = dccp_hdr(skb); /* * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet * has short sequence numbers), drop packet and return diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index b8608b71a66d34894722c17121754c403c74d946..27c4e81efa24c993d86a4547359fb3c423b2e79d 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; - const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); + const struct dccp_hdr *dh; struct dccp_sock *dp; struct ipv6_pinfo *np; struct sock *sk; @@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, __u64 seq; struct net *net = dev_net(skb->dev); - if (skb->len < offset + sizeof(*dh) || - skb->len < offset + __dccp_basic_hdr_len(dh)) { - ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), - ICMP6_MIB_INERRORS); - return; - } + /* Only need dccph_dport & dccph_sport which are the first + * 4 bytes in dccp header. + * Our caller (icmpv6_notify()) already pulled 8 bytes for us. + */ + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); + dh = (struct dccp_hdr *)(skb->data + offset); sk = __inet6_lookup_established(net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport, @@ -947,6 +948,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), + .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 41e65804ddf59651c78ae58b697e7e5e603c9167..9fe25bf6329691ecf0acdc35df7278b074d446c1 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout) __kfree_skb(skb); } + /* If socket has been already reset kill it. */ + if (sk->sk_state == DCCP_CLOSED) + goto adjudge_to_death; + if (data_was_unread) { /* Unread data was tossed, send an appropriate Reset Code */ DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 9e63f252a89ead08fda157e716607938452c0b49..de85d4e1cf43c10016e159248a94e7fd544e4c04 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, skb_gro_pull(skb, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1021e770355bbfc75a9b68d76bbf7865f5345e83..fc65a0167fbeb5223aafc272888584ca955ebd16 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1400,7 +1400,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -1411,6 +1411,19 @@ out: return pp; } +static struct sk_buff **ipip_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->encap_mark) { + NAPI_GRO_CB(skb)->flush = 1; + return NULL; + } + + NAPI_GRO_CB(skb)->encap_mark = 1; + + return inet_gro_receive(head, skb); +} + int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { if (sk->sk_family == AF_INET) @@ -1453,6 +1466,13 @@ out_unlock: return err; } +static int ipip_gro_complete(struct sk_buff *skb, int nhoff) +{ + skb->encapsulation = 1; + skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP; + return inet_gro_complete(skb, nhoff); +} + int inet_ctl_sock_create(struct sock **sk, unsigned short family, unsigned short type, unsigned char protocol, struct net *net) @@ -1680,8 +1700,8 @@ static struct packet_offload ip_packet_offload __read_mostly = { static const struct net_offload ipip_offload = { .callbacks = { .gso_segment = inet_gso_segment, - .gro_receive = inet_gro_receive, - .gro_complete = inet_gro_complete, + .gro_receive = ipip_gro_receive, + .gro_complete = ipip_gro_complete, }, }; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index d95631d0924899f17549e69801567bed076491b9..20fb25e3027bbbf8b8c2068751caf40df939546d 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) esph = (void *)skb_push(skb, 4); *seqhi = esph->spi; esph->spi = esph->seq_no; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; aead_request_set_callback(req, 0, esp_input_done_esn, skb); } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3990ee077060016356ee64106b14e9fd405b75ad..2059c2010f9cf6f3543852466a819406514d7731 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2453,29 +2453,22 @@ struct fib_route_iter { static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos) { - struct fib_table *tb = iter->main_tb; struct key_vector *l, **tp = &iter->tnode; - struct trie *t; t_key key; - /* use cache location of next-to-find key */ + /* use cached location of previously found key */ if (iter->pos > 0 && pos >= iter->pos) { - pos -= iter->pos; key = iter->key; } else { - t = (struct trie *)tb->tb_data; - iter->tnode = t->kv; - iter->pos = 0; + iter->pos = 1; key = 0; } - while ((l = leaf_walk_rcu(tp, key)) != NULL) { + pos -= iter->pos; + + while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { key = l->key + 1; iter->pos++; - - if (--pos <= 0) - break; - l = NULL; /* handle unlikely case of a key wrap */ @@ -2484,7 +2477,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, } if (l) - iter->key = key; /* remember it */ + iter->key = l->key; /* remember it */ else iter->pos = 0; /* forget it */ @@ -2505,14 +2498,14 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) return NULL; iter->main_tb = tb; + t = (struct trie *)tb->tb_data; + iter->tnode = t->kv; if (*pos != 0) return fib_route_get_idx(iter, *pos); - t = (struct trie *)tb->tb_data; - iter->tnode = t->kv; iter->pos = 0; - iter->key = 0; + iter->key = KEY_MAX; return SEQ_START_TOKEN; } @@ -2521,7 +2514,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_route_iter *iter = seq->private; struct key_vector *l = NULL; - t_key key = iter->key; + t_key key = iter->key + 1; ++*pos; @@ -2530,7 +2523,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) l = leaf_walk_rcu(&iter->tnode, key); if (l) { - iter->key = l->key + 1; + iter->key = l->key; iter->pos++; } else { iter->pos = 0; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index bd903fe0f7508d9d7a94c4ce1e0a825f3e35c398..08d8ee12453801653860354220a60fe043252d4d 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(struct sock *sk) return sk->sk_user_data; } -static void fou_recv_pull(struct sk_buff *skb, size_t len) +static int fou_recv_pull(struct sk_buff *skb, size_t len) { struct iphdr *iph = ip_hdr(skb); @@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff *skb, size_t len) __skb_pull(skb, len); skb_postpull_rcsum(skb, udp_hdr(skb), len); skb_reset_transport_header(skb); + return iptunnel_pull_offloads(skb); } static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) @@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) if (!fou) return 1; - fou_recv_pull(skb, sizeof(struct udphdr)); + if (fou_recv_pull(skb, sizeof(struct udphdr))) + goto drop; return -fou->protocol; + +drop: + kfree_skb(skb); + return 0; } static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, @@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) __skb_pull(skb, sizeof(struct udphdr) + hdrlen); skb_reset_transport_header(skb); + if (iptunnel_pull_offloads(skb)) + goto drop; + return -guehdr->proto_ctype; drop: @@ -192,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, if (!ops || !ops->callbacks.gro_receive) goto out_unlock; - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -351,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) goto out_unlock; - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 5a8ee3282550880a7749b8d6a9086dc413661519..79ae0d7becbf522250f31e84674a515b5ed5a7d4 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, struct packet_offload *ptype; __be16 type; + if (NAPI_GRO_CB(skb)->encap_mark) + goto out; + + NAPI_GRO_CB(skb)->encap_mark = 1; + off = skb_gro_offset(skb); hlen = off + sizeof(*greh); greh = skb_gro_header_fast(skb, off); @@ -214,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ skb_gro_postpull_rcsum(skb, greh, grehlen); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index a403a676d452d050f4518cec52631aa6823800cf..fcb83b2a61f00681e1d918c0ec862c051d537e71 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -44,6 +44,8 @@ struct inet_diag_entry { u16 dport; u16 family; u16 userlocks; + u32 ifindex; + u32 mark; }; static DEFINE_MUTEX(inet_diag_table_mutex); @@ -96,6 +98,7 @@ static size_t inet_sk_attr_size(void) + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + nla_total_size(1) /* INET_DIAG_TOS */ + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(4) /* INET_DIAG_MARK */ + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) @@ -108,7 +111,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + const struct nlmsghdr *unlh, + bool net_admin) { const struct inet_sock *inet = inet_sk(sk); const struct tcp_congestion_ops *ca_ops; @@ -158,6 +162,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, } #endif + if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark)) + goto errout; + r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk); @@ -256,10 +263,11 @@ static int inet_csk_diag_fill(struct sock *sk, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + const struct nlmsghdr *unlh, + bool net_admin) { - return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, - user_ns, portid, seq, nlmsg_flags, unlh); + return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns, + portid, seq, nlmsg_flags, unlh, net_admin); } static int inet_twsk_diag_fill(struct sock *sk, @@ -301,8 +309,9 @@ static int inet_twsk_diag_fill(struct sock *sk, static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + const struct nlmsghdr *unlh, bool net_admin) { + struct request_sock *reqsk = inet_reqsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; @@ -316,7 +325,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, inet_diag_msg_common_fill(r, sk); r->idiag_state = TCP_SYN_RECV; r->idiag_timer = 1; - r->idiag_retrans = inet_reqsk(sk)->num_retrans; + r->idiag_retrans = reqsk->num_retrans; BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) != offsetof(struct sock, sk_cookie)); @@ -328,6 +337,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, r->idiag_uid = 0; r->idiag_inode = 0; + if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, + inet_rsk(reqsk)->ir_mark)) + return -EMSGSIZE; + nlmsg_end(skb, nlh); return 0; } @@ -336,7 +349,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, const struct inet_diag_req_v2 *r, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + const struct nlmsghdr *unlh, bool net_admin) { if (sk->sk_state == TCP_TIME_WAIT) return inet_twsk_diag_fill(sk, skb, portid, seq, @@ -344,10 +357,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, if (sk->sk_state == TCP_NEW_SYN_RECV) return inet_req_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh); + nlmsg_flags, unlh, net_admin); return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, - nlmsg_flags, unlh); + nlmsg_flags, unlh, net_admin); } struct sock *inet_diag_find_one_icsk(struct net *net, @@ -414,7 +427,8 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh); + nlh->nlmsg_seq, 0, nlh, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); if (err < 0) { WARN_ON(err == -EMSGSIZE); nlmsg_free(rep); @@ -552,6 +566,22 @@ static int inet_diag_bc_run(const struct nlattr *_bc, yes = 0; break; } + case INET_DIAG_BC_DEV_COND: { + u32 ifindex; + + ifindex = *((const u32 *)(op + 1)); + if (ifindex != entry->ifindex) + yes = 0; + break; + } + case INET_DIAG_BC_MARK_COND: { + struct inet_diag_markcond *cond; + + cond = (struct inet_diag_markcond *)(op + 1); + if ((entry->mark & cond->mask) != cond->mark) + yes = 0; + break; + } } if (yes) { @@ -594,7 +624,14 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) entry_fill_addrs(&entry, sk); entry.sport = inet->inet_num; entry.dport = ntohs(inet->inet_dport); + entry.ifindex = sk->sk_bound_dev_if; entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; + if (sk_fullsock(sk)) + entry.mark = sk->sk_mark; + else if (sk->sk_state == TCP_NEW_SYN_RECV) + entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark; + else + entry.mark = 0; return inet_diag_bc_run(bc, &entry); } @@ -617,6 +654,17 @@ static int valid_cc(const void *bc, int len, int cc) return 0; } +/* data is u32 ifindex */ +static bool valid_devcond(const struct inet_diag_bc_op *op, int len, + int *min_len) +{ + /* Check ifindex space. */ + *min_len += sizeof(u32); + if (len < *min_len) + return false; + + return true; +} /* Validate an inet_diag_hostcond. */ static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, int *min_len) @@ -666,10 +714,25 @@ static bool valid_port_comparison(const struct inet_diag_bc_op *op, return true; } -static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) +static bool valid_markcond(const struct inet_diag_bc_op *op, int len, + int *min_len) +{ + *min_len += sizeof(struct inet_diag_markcond); + return len >= *min_len; +} + +static int inet_diag_bc_audit(const struct nlattr *attr, + const struct sk_buff *skb) { - const void *bc = bytecode; - int len = bytecode_len; + bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN); + const void *bytecode, *bc; + int bytecode_len, len; + + if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op)) + return -EINVAL; + + bytecode = bc = nla_data(attr); + len = bytecode_len = nla_len(attr); while (len > 0) { int min_len = sizeof(struct inet_diag_bc_op); @@ -681,6 +744,10 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) if (!valid_hostcond(bc, len, &min_len)) return -EINVAL; break; + case INET_DIAG_BC_DEV_COND: + if (!valid_devcond(bc, len, &min_len)) + return -EINVAL; + break; case INET_DIAG_BC_S_GE: case INET_DIAG_BC_S_LE: case INET_DIAG_BC_D_GE: @@ -688,6 +755,12 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) if (!valid_port_comparison(bc, len, &min_len)) return -EINVAL; break; + case INET_DIAG_BC_MARK_COND: + if (!net_admin) + return -EPERM; + if (!valid_markcond(bc, len, &min_len)) + return -EINVAL; + break; case INET_DIAG_BC_AUTO: case INET_DIAG_BC_JMP: case INET_DIAG_BC_NOP: @@ -716,7 +789,8 @@ static int inet_csk_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, - const struct nlattr *bc) + const struct nlattr *bc, + bool net_admin) { if (!inet_diag_bc_sk(bc, sk)) return 0; @@ -724,7 +798,8 @@ static int inet_csk_diag_dump(struct sock *sk, return inet_csk_diag_fill(sk, skb, r, sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); + cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, + net_admin); } static void twsk_build_assert(void) @@ -760,6 +835,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, struct net *net = sock_net(skb->sk); int i, num, s_i, s_num; u32 idiag_states = r->idiag_states; + bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); if (idiag_states & TCPF_SYN_RECV) idiag_states |= TCPF_NEW_SYN_RECV; @@ -801,7 +877,8 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, cb->args[3] > 0) goto next_listen; - if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { + if (inet_csk_diag_dump(sk, skb, cb, r, + bc, net_admin) < 0) { spin_unlock_bh(&ilb->lock); goto done; } @@ -869,7 +946,7 @@ skip_listen_ht: sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh); + cb->nlh, net_admin); if (res < 0) { spin_unlock_bh(lock); goto done; @@ -976,13 +1053,13 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) if (nlh->nlmsg_flags & NLM_F_DUMP) { if (nlmsg_attrlen(nlh, hdrlen)) { struct nlattr *attr; + int err; attr = nlmsg_find_attr(nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - if (!attr || - nla_len(attr) < sizeof(struct inet_diag_bc_op) || - inet_diag_bc_audit(nla_data(attr), nla_len(attr))) - return -EINVAL; + err = inet_diag_bc_audit(attr, skb); + if (err) + return err; } { struct netlink_dump_control c = { @@ -1007,13 +1084,13 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) h->nlmsg_flags & NLM_F_DUMP) { if (nlmsg_attrlen(h, hdrlen)) { struct nlattr *attr; + int err; attr = nlmsg_find_attr(h, hdrlen, INET_DIAG_REQ_BYTECODE); - if (!attr || - nla_len(attr) < sizeof(struct inet_diag_bc_op) || - inet_diag_bc_audit(nla_data(attr), nla_len(attr))) - return -EINVAL; + err = inet_diag_bc_audit(attr, skb); + if (err) + return err; } { struct netlink_dump_control c = { diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index dbf7f7ee2958a0b2bb01d83eddab0cf919012074..661bda968594acee601a85096f87f6b4fcff8768 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -102,6 +102,9 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) iph->tot_len = htons(skb->len); ip_send_check(iph); + + skb->protocol = htons(ETH_P_IP); + return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index a50124260f5a4aaa98a3e4a582dbcbdbc236e370..9ce202549e7a1d7de0dbc02f45aef0ed901c4e90 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) } static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, - int offset) + int tlen, int offset) { __wsum csum = skb->csum; @@ -106,7 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, return; if (offset != 0) - csum = csum_sub(csum, csum_partial(skb->data, offset, 0)); + csum = csum_sub(csum, + csum_partial(skb->data + tlen, + offset, 0)); put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); } @@ -152,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) } void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, - int offset) + int tlen, int offset) { struct inet_sock *inet = inet_sk(skb->sk); unsigned int flags = inet->cmsg_flags; @@ -215,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, } if (flags & IP_CMSG_CHECKSUM) - ip_cmsg_recv_checksum(msg, skb, offset); + ip_cmsg_recv_checksum(msg, skb, tlen, offset); } EXPORT_SYMBOL(ip_cmsg_recv_offset); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6cb9009c3d96785e566a3cb2cd065860e05980e1..dbda0565781cfe8ae1486994b3fe0795d736f71e 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -116,7 +116,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) skb->vlan_tci = 0; skb_set_queue_mapping(skb, 0); skb->pkt_type = PACKET_HOST; - return 0; + + return iptunnel_pull_offloads(skb); } EXPORT_SYMBOL_GPL(iptunnel_pull_header); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 4d8f0b6987777ab6c096fadbce538c53cbcad24b..65036891e080e56434f914b27a3f7c03885a2666 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -540,6 +540,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { .get_link_net = ip_tunnel_get_link_net, }; +static bool is_vti_tunnel(const struct net_device *dev) +{ + return dev->netdev_ops == &vti_netdev_ops; +} + +static int vti_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ip_tunnel *tunnel = netdev_priv(dev); + + if (!is_vti_tunnel(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_DOWN: + if (!net_eq(tunnel->net, dev_net(dev))) + xfrm_garbage_collect(tunnel->net); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block vti_notifier_block __read_mostly = { + .notifier_call = vti_device_event, +}; + static int __init vti_init(void) { const char *msg; @@ -547,6 +574,8 @@ static int __init vti_init(void) pr_info("IPv4 over IPsec tunneling driver\n"); + register_netdevice_notifier(&vti_notifier_block); + msg = "tunnel device"; err = register_pernet_device(&vti_net_ops); if (err < 0) @@ -579,6 +608,7 @@ xfrm_proto_ah_failed: xfrm_proto_esp_failed: unregister_pernet_device(&vti_net_ops); pernet_dev_failed: + unregister_netdevice_notifier(&vti_notifier_block); pr_err("vti init: failed to register %s\n", msg); return err; } @@ -590,6 +620,7 @@ static void __exit vti_fini(void) xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); unregister_pernet_device(&vti_net_ops); + unregister_netdevice_notifier(&vti_notifier_block); } module_init(vti_init); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d1e555496e35fa878cb61491336391b93c5183f..8e77786549c614a8aa0d979dbab37cdb84d4cea8 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2192,7 +2192,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait) + struct rtmsg *rtm, int nowait, u32 portid) { struct mfc_cache *cache; struct mr_table *mrt; @@ -2237,6 +2237,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, return -ENOMEM; } + NETLINK_CB(skb2).portid = portid; skb_push(skb2, sizeof(struct iphdr)); skb_reset_network_header(skb2); iph = ip_hdr(skb2); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 76c86e7d5e1064c8c488a25e3d7df975efa69045..0f443831e85112908361875e65642d7bb18d85d3 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -660,6 +660,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, if (len > 0xFFFF || len < icmph_len) return -EMSGSIZE; + /* Must have at least a full ICMP header. */ + if (len < icmph_len) + return -EINVAL; + /* * Check the flags. */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fb54659320d86af6a08355ebe2783ec4be3c97fe..f75b5658a3a0534d26d882db1fd86fff26d62070 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -749,7 +749,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow goto reject_redirect; } - n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); + n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); + if (!n) + n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); if (!IS_ERR(n)) { if (!(n->nud_state & NUD_VALID)) { neigh_event_send(n, NULL); @@ -2499,7 +2501,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, - r, nowait); + r, nowait, portid); + if (err <= 0) { if (!nowait) { if (err == 0) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index a839dcbc0fd8143bec88ca6bd0b64fbf6ba29428..8233e27679f2603bc754efd937c6510a4129d869 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -101,11 +101,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low container_of(table->data, struct net, ipv4.ping_group_range.range); unsigned int seq; do { - seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); + seq = read_seqbegin(&net->ipv4.ping_group_range.lock); *low = data[0]; *high = data[1]; - } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); + } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq)); } /* Update system visible IP port range */ @@ -114,10 +114,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig kgid_t *data = table->data; struct net *net = container_of(table->data, struct net, ipv4.ping_group_range.range); - write_seqlock(&net->ipv4.ip_local_ports.lock); + write_seqlock(&net->ipv4.ping_group_range.lock); data[0] = low; data[1] = high; - write_sequnlock(&net->ipv4.ip_local_ports.lock); + write_sequnlock(&net->ipv4.ping_group_range.lock); } /* Validate changes from /proc interface. */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d5bd460a13f1641c896d35a281043427d9088ab6..676566faf649ad0f16eb2962fdb66930492bafbd 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1218,7 +1218,7 @@ new_segment: if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { - if (i == sysctl_max_skb_frags || !sg) { + if (i >= sysctl_max_skb_frags || !sg) { tcp_mark_push(tp, skb); goto new_segment; } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 7e538f71f5fbae087c3e3e4367d60e08cd609ac5..55d7da1d2ce9912ad9efc654206a402913d8b831 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -56,6 +56,7 @@ struct dctcp { u32 next_seq; u32 ce_state; u32 delayed_ack_reserved; + u32 loss_cwnd; }; static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ @@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk) ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); ca->delayed_ack_reserved = 0; + ca->loss_cwnd = 0; ca->ce_state = 0; dctcp_reset(tp, ca); @@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk) static u32 dctcp_ssthresh(struct sock *sk) { - const struct dctcp *ca = inet_csk_ca(sk); + struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); + ca->loss_cwnd = tp->snd_cwnd; return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); } @@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, return 0; } +static u32 dctcp_cwnd_undo(struct sock *sk) +{ + const struct dctcp *ca = inet_csk_ca(sk); + + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); +} + static struct tcp_congestion_ops dctcp __read_mostly = { .init = dctcp_init, .in_ack_event = dctcp_update_alpha, .cwnd_event = dctcp_cwnd_event, .ssthresh = dctcp_ssthresh, .cong_avoid = tcp_reno_cong_avoid, + .undo_cwnd = dctcp_cwnd_undo, .set_state = dctcp_state, .get_info = dctcp_get_info, .flags = TCP_CONG_NEEDS_ECN, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5f0b6b0f0249dd994e9e9f50d4c68e28cdfb0314..e7e227f6760f9e247d4e84a0c415fd4d7c469a74 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2325,10 +2325,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { - struct ipv6_pinfo *np = inet6_sk(sk); pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, - &np->daddr, ntohs(inet->inet_dport), + &sk->sk_v6_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e3b0b55f2c928555a14312b460664db609a06032..b49a262b06f93623479fc8d138142d6af7adafb3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -808,8 +808,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt; + /* RFC 7323 2.3 + * The window field (SEG.WND) of every outgoing segment, with the + * exception of segments, MUST be right-shifted by + * Rcv.Wind.Shift bits: + */ tcp_v4_send_ack(sock_net(sk), skb, seq, - tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, + tcp_rsk(req)->rcv_nxt, + req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, 0, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9eb81a4b0da20b55229d488f5e4e8a69e7f282d2..ca3731721d81dac1b73ef94aee51ac2b4e3834e4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1950,12 +1950,14 @@ static int tcp_mtu_probe(struct sock *sk) len = 0; tcp_for_write_queue_from_safe(skb, next, sk) { copy = min_t(int, skb->len, probe_size - len); - if (nskb->ip_summed) + if (nskb->ip_summed) { skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); - else - nskb->csum = skb_copy_and_csum_bits(skb, 0, - skb_put(nskb, copy), - copy, nskb->csum); + } else { + __wsum csum = skb_copy_and_csum_bits(skb, 0, + skb_put(nskb, copy), + copy, 0); + nskb->csum = csum_block_add(nskb->csum, csum, len); + } if (skb->len <= copy) { /* We've eaten all the data from this skb. @@ -2569,7 +2571,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) * copying overhead: fragmentation, tunneling, mangling etc. */ if (atomic_read(&sk->sk_wmem_alloc) > - min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) + min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), + sk->sk_sndbuf)) return -EAGAIN; if (skb_still_in_host_queue(sk, skb)) diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 3e6a472e6b8831439d3f745b8efd43096612c39d..92ab5bc9159219d00914846460825bce9bd1743d 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -75,7 +75,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) if (!tcp_is_cwnd_limited(sk)) return; - if (tp->snd_cwnd <= tp->snd_ssthresh) + if (tcp_in_slow_start(tp)) tcp_slow_start(tp, acked); else if (!yeah->doing_reno_now) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6d98857faca06dfeb4e01d83971b8e81b615c870..7e37ad9e1a8047b782484fab758ab896d425f338 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1348,7 +1348,7 @@ try_again: *addr_len = sizeof(*sin); } if (inet->cmsg_flags) - ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr)); + ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off); err = copied; if (flags & MSG_TRUNC) @@ -2270,6 +2270,20 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) } EXPORT_SYMBOL(udp_poll); +int udp_abort(struct sock *sk, int err) +{ + lock_sock(sk); + + sk->sk_err = err; + sk->sk_error_report(sk); + udp_disconnect(sk, 0); + + release_sock(sk); + + return 0; +} +EXPORT_SYMBOL_GPL(udp_abort); + struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, @@ -2301,6 +2315,7 @@ struct proto udp_prot = { .compat_getsockopt = compat_udp_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, + .diag_destroy = udp_abort, }; EXPORT_SYMBOL(udp_prot); diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 6116604bf6e8fd64d82b5d9496197cb7e4accef7..092aa60e8b92898a8d3612c447123a64604ac7aa 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -20,7 +20,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *req, - struct nlattr *bc) + struct nlattr *bc, bool net_admin) { if (!inet_diag_bc_sk(bc, sk)) return 0; @@ -28,7 +28,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, return inet_sk_diag_fill(sk, NULL, skb, req, sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); + cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin); } static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, @@ -75,7 +75,8 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, err = inet_sk_diag_fill(sk, NULL, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh); + nlh->nlmsg_seq, 0, nlh, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(rep); @@ -98,6 +99,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, { int num, s_num, slot, s_slot; struct net *net = sock_net(skb->sk); + bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); s_slot = cb->args[0]; num = s_num = cb->args[1]; @@ -132,7 +134,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, r->id.idiag_dport) goto next; - if (sk_diag_dump(sk, skb, cb, r, bc) < 0) { + if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) { spin_unlock_bh(&hslot->lock); goto done; } @@ -165,12 +167,88 @@ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, r->idiag_wqueue = sk_wmem_alloc_get(sk); } +#ifdef CONFIG_INET_DIAG_DESTROY +static int __udp_diag_destroy(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req, + struct udp_table *tbl) +{ + struct net *net = sock_net(in_skb->sk); + struct sock *sk; + int err; + + rcu_read_lock(); + + if (req->sdiag_family == AF_INET) + sk = __udp4_lib_lookup(net, + req->id.idiag_dst[0], req->id.idiag_dport, + req->id.idiag_src[0], req->id.idiag_sport, + req->id.idiag_if, tbl); +#if IS_ENABLED(CONFIG_IPV6) + else if (req->sdiag_family == AF_INET6) { + if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && + ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src)) + sk = __udp4_lib_lookup(net, + req->id.idiag_dst[3], req->id.idiag_dport, + req->id.idiag_src[3], req->id.idiag_sport, + req->id.idiag_if, tbl); + + else + sk = __udp6_lib_lookup(net, + (struct in6_addr *)req->id.idiag_dst, + req->id.idiag_dport, + (struct in6_addr *)req->id.idiag_src, + req->id.idiag_sport, + req->id.idiag_if, tbl); + } +#endif + else { + rcu_read_unlock(); + return -EINVAL; + } + + if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; + + rcu_read_unlock(); + + if (!sk) + return -ENOENT; + + if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) { + sock_put(sk); + return -ENOENT; + } + + err = sock_diag_destroy(sk, ECONNABORTED); + + sock_put(sk); + + return err; +} + +static int udp_diag_destroy(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req) +{ + return __udp_diag_destroy(in_skb, req, &udp_table); +} + +static int udplite_diag_destroy(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req) +{ + return __udp_diag_destroy(in_skb, req, &udplite_table); +} + +#endif + static const struct inet_diag_handler udp_diag_handler = { .dump = udp_diag_dump, .dump_one = udp_diag_dump_one, .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDP, .idiag_info_size = 0, +#ifdef CONFIG_INET_DIAG_DESTROY + .destroy = udp_diag_destroy, +#endif }; static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, @@ -192,6 +270,9 @@ static const struct inet_diag_handler udplite_diag_handler = { .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDPLITE, .idiag_info_size = 0, +#ifdef CONFIG_INET_DIAG_DESTROY + .destroy = udplite_diag_destroy, +#endif }; static int __init udp_diag_init(void) diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index f9386160cbee0288e294ea2cd8ba3b5be65cdbf6..6396f1c80ae9ef36469acf69c4ba496ffd85dec8 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -299,14 +299,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, unsigned int off = skb_gro_offset(skb); int flush = 1; - if (NAPI_GRO_CB(skb)->udp_mark || + if (NAPI_GRO_CB(skb)->encap_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; - /* mark that this skb passed once through the udp gro layer */ - NAPI_GRO_CB(skb)->udp_mark = 1; + /* mark that this skb passed once through the tunnel gro layer */ + NAPI_GRO_CB(skb)->encap_mark = 1; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); @@ -339,8 +339,8 @@ unflush: skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; - pp = uo_priv->offload->callbacks.gro_receive(head, skb, - uo_priv->offload); + pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive, + head, skb, uo_priv->offload); out_unlock: rcu_read_unlock(); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index bb9a2e90948ed6acd1e0ad8c96320adcfc3bac96..3ce9df8f86c830627ef3f5aaf7fddf38681e2383 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1902,6 +1902,7 @@ errdad: spin_unlock_bh(&ifp->lock); addrconf_mod_dad_work(ifp, 0); + in6_ifa_put(ifp); } /* Join to solicited addr multicast group. @@ -2957,7 +2958,7 @@ static void init_loopback(struct net_device *dev) * lo device down, release this obsolete dst and * reallocate a new router for ifa. */ - if (sp_ifa->rt->dst.obsolete > 0) { + if (!atomic_read(&sp_ifa->rt->rt6i_ref)) { ip6_rt_put(sp_ifa->rt); sp_ifa->rt = NULL; } else { @@ -3653,6 +3654,7 @@ static void addrconf_dad_work(struct work_struct *w) addrconf_dad_begin(ifp); goto out; } else if (action == DAD_ABORT) { + in6_ifa_hold(ifp); addrconf_dad_stop(ifp, 1); goto out; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index f921368c32c99b79aec721eb85100b1cc97a4799..17df6bbebcffbecc1e8d883997c33766f6f30e33 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) esph = (void *)skb_push(skb, 4); *seqhi = esph->spi; esph->spi = esph->seq_no; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; aead_request_set_callback(req, 0, esp_input_done_esn, skb); } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4650c6824783ea29f687780aa61669a82e889858..17430f3410737fc1ae3fd0bd3375d18017454772 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -886,7 +886,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = skb->protocol; err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index f542438c70ce5b64529ae832d4da79b9a4a5775c..88f3d459bcfd9ca22b881c623888ed0dcbf77bab 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -244,7 +244,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, skb_gro_postpull_rcsum(skb, iph, nlen); - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -255,6 +255,19 @@ out: return pp; } +static struct sk_buff **sit_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->encap_mark) { + NAPI_GRO_CB(skb)->flush = 1; + return NULL; + } + + NAPI_GRO_CB(skb)->encap_mark = 1; + + return ipv6_gro_receive(head, skb); +} + static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; @@ -299,7 +312,7 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { static const struct net_offload sit_offload = { .callbacks = { .gso_segment = ipv6_gso_segment, - .gro_receive = ipv6_gro_receive, + .gro_receive = sit_gro_receive, .gro_complete = sit_gro_complete, }, }; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 3991b21e24ad359e64c415e4108c485cf31a49cc..2994d1f1a6616070acb17598ebdf7e129634b216 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -246,6 +246,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ hash = HASH(&any, local); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(local, &t->parms.laddr) && + ipv6_addr_any(&t->parms.raddr) && (t->dev->flags & IFF_UP)) return t; } @@ -253,6 +254,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ hash = HASH(remote, &any); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(remote, &t->parms.raddr) && + ipv6_addr_any(&t->parms.laddr) && (t->dev->flags & IFF_UP)) return t; } @@ -1041,6 +1043,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, struct ipv6_tel_txoption opt; struct dst_entry *dst = NULL, *ndst = NULL; struct net_device *tdev; + bool use_cache = false; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); u8 proto; @@ -1068,7 +1071,15 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); neigh_release(neigh); - } else if (!fl6->flowi6_mark) + } else if (!(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only only if the routing decision does + * not depend on the current inner header value + */ + use_cache = true; + } + + if (use_cache) dst = ip6_tnl_dst_get(t); if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) @@ -1132,7 +1143,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, skb = new_skb; } - if (!fl6->flowi6_mark && ndst) + if (use_cache && ndst) ip6_tnl_dst_set(t, ndst); skb_dst_set(skb, dst); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e207cb2468dab0799422b2fc4620d729ed09a44b..d9843e5a667fe76c62eea32cfeae892b1a2663ef 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2276,8 +2276,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, return 1; } -int ip6mr_get_route(struct net *net, - struct sk_buff *skb, struct rtmsg *rtm, int nowait) +int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, + int nowait, u32 portid) { int err; struct mr6_table *mrt; @@ -2322,6 +2322,7 @@ int ip6mr_get_route(struct net *net, return -ENOMEM; } + NETLINK_CB(skb2).portid = portid; skb_reset_transport_header(skb2); skb_put(skb2, sizeof(struct ipv6hdr)); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 462f2a76b5c2270dba806e193a4c5bde57e14a39..1d184322a7b1eec00b1b3345b774c1fb363b9f9f 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -148,6 +148,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) ipv6_hdr(skb)->payload_len = htons(len); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); + skb->protocol = htons(ETH_P_IPV6); + return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 9411c8d770a53cf6cf367a567228b7c8e832879e..fa65e92e9510b3369bddf874f0d1a9a0681e36a2 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -152,8 +152,10 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rt = (struct rt6_info *) dst; np = inet6_sk(sk); - if (!np) - return -EBADF; + if (!np) { + err = -EBADF; + goto dst_err_out; + } pfh.icmph.type = user_icmph.icmp6_type; pfh.icmph.code = user_icmph.icmp6_code; @@ -183,6 +185,9 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } release_sock(sk); +dst_err_out: + dst_release(dst); + if (err) return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 161cdc072547820cfffb65ac373c56f86db2df34..fe9e22ac065a10490b6600066fb722316778fca2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3125,7 +3125,9 @@ static int rt6_fill_node(struct net *net, if (iif) { #ifdef CONFIG_IPV6_MROUTE if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { - int err = ip6mr_get_route(net, skb, rtm, nowait); + int err = ip6mr_get_route(net, skb, rtm, nowait, + portid); + if (err <= 0) { if (!nowait) { if (err == 0) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index ba3d2f3d66d2b6c1f4668e49b71cc42d67121654..3da2b16356eb64a9ff33fdc95809a768a81917f6 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -681,14 +681,15 @@ static int ipip6_rcv(struct sk_buff *skb) skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; - skb->protocol = htons(ETH_P_IPV6); + skb->dev = tunnel->dev; if (packet_is_spoofed(skb, iph, tunnel)) { tunnel->dev->stats.rx_errors++; goto out; } - __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); + if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6))) + goto out; err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8647afa77db4c8a063ba6987733615adcd8c3336..3bd3d19b45a0d5bd9cec2282442ce67382e07074 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -933,9 +933,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ + /* RFC 7323 2.3 + * The window field (SEG.WND) of every outgoing segment, with the + * exception of segments, MUST be right-shifted by + * Rcv.Wind.Shift bits: + */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, - tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, + tcp_rsk(req)->rcv_nxt, + req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); @@ -1174,6 +1180,16 @@ out: return NULL; } +static void tcp_v6_restore_cb(struct sk_buff *skb) +{ + /* We need to move header back to the beginning if xfrm6_policy_check() + * and tcp_v6_fill_cb() are going to be called again. + * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. + */ + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, + sizeof(struct inet6_skb_parm)); +} + /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * @@ -1303,6 +1319,7 @@ ipv6_pktoptions: np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { skb_set_owner_r(opt_skb, sk); + tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); @@ -1336,15 +1353,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, TCP_SKB_CB(skb)->sacked = 0; } -static void tcp_v6_restore_cb(struct sk_buff *skb) -{ - /* We need to move header back to the beginning if xfrm6_policy_check() - * and tcp_v6_fill_cb() are going to be called again. - */ - memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, - sizeof(struct inet6_skb_parm)); -} - static int tcp_v6_rcv(struct sk_buff *skb) { const struct tcphdr *th; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1a5b9322e7138d597aa88e9f365083c5b3fc68e0..1207379c1cce3a214afef4c2a39230360aa4bdc7 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -498,7 +498,8 @@ try_again: if (is_udp4) { if (inet->cmsg_flags) - ip_cmsg_recv(msg, skb); + ip_cmsg_recv_offset(msg, skb, + sizeof(struct udphdr), off); } else { if (np->rxopt.all) ip6_datagram_recv_specific_ctl(sk, msg, skb); @@ -1552,6 +1553,7 @@ struct proto udpv6_prot = { .compat_getsockopt = compat_udpv6_getsockopt, #endif .clear_sk = udp_v6_clear_sk, + .diag_destroy = udp_abort, }; static struct inet_protosw udpv6_protosw = { diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 4a7ae32afa09b90fab1d57bcb807ca2b8a1200c6..1138eaf5c6829ac08431a91c31fb3339dfe6213d 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, self->magic = IAS_MAGIC; self->mode = mode; - if (mode == IAS_CLIENT) - iriap_register_lsap(self, slsap_sel, mode); + if (mode == IAS_CLIENT) { + if (iriap_register_lsap(self, slsap_sel, mode)) { + kfree(self); + return NULL; + } + } self->confirm = callback; self->priv = priv; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 42de4ccd159f6f6853930afd44cea239e2011a54..d0e906d3964281d4cbcd4a9c2d97b25d6eb85b80 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) int ret; int chk_addr_ret; - if (!sock_flag(sk, SOCK_ZAPPED)) - return -EINVAL; if (addr_len < sizeof(struct sockaddr_l2tpip)) return -EINVAL; if (addr->l2tp_family != AF_INET) @@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) read_unlock_bh(&l2tp_ip_lock); lock_sock(sk); + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out; + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) goto out; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 9ee4ddb6b3973899995e6e3f5766f7e210f90370..3c4f867d3633144f38c2ddc5783dd0fac23832f8 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -266,8 +266,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) int addr_type; int err; - if (!sock_flag(sk, SOCK_ZAPPED)) - return -EINVAL; if (addr->l2tp_family != AF_INET6) return -EINVAL; if (addr_len < sizeof(*addr)) @@ -293,6 +291,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) lock_sock(sk); err = -EINVAL; + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out_unlock; + if (sk->sk_state != TCP_CLOSE) goto out_unlock; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 803001a45aa16e6b5a372ab385dba8e9c09bd2f0..1b07578bedf336c53e3b6072c8c3324f7f18081b 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1545,7 +1545,8 @@ error: /* * Set up receiving multicast socket over UDP */ -static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id) +static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, + int ifindex) { /* multicast addr */ union ipvs_sockaddr mcast_addr; @@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id) set_sock_size(sock->sk, 0, result); get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); + sock->sk->sk_bound_dev_if = ifindex; result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); if (result < 0) { pr_err("Error binding to the multicast addr\n"); @@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, if (state == IP_VS_STATE_MASTER) sock = make_send_sock(ipvs, id); else - sock = make_receive_sock(ipvs, id); + sock = make_receive_sock(ipvs, id, dev->ifindex); if (IS_ERR(sock)) { result = PTR_ERR(sock); goto outtinfo; diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index a5d41dfa9f05d6363d0ea3253493a7539842df32..2c89f90cd7bc95037b6057d49efbe2e20ae12286 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -401,7 +401,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, size_t size = *lenp; int r = 0; int tindex = (unsigned long)table->extra1; - struct net *net = current->nsproxy->net_ns; + struct net *net = table->extra2; if (write) { if (size > sizeof(buf)) @@ -453,7 +453,6 @@ static int netfilter_log_sysctl_init(struct net *net) 3, "%d", i); nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i]; - nf_log_sysctl_table[i].data = NULL; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN; nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = @@ -463,6 +462,9 @@ static int netfilter_log_sysctl_init(struct net *net) } } + for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) + table[i].extra2 = net; + net->nf.nf_log_dir_header = register_net_sysctl(net, "net/netfilter/nf_log", table); diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 9dec3bd1b63cc01a784ec025f7cf97f42b38fd06..0a5df0cbaa286ad393b8a4b75242a3d5158e4721 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -140,7 +140,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) return -EINVAL; - timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( + tb[NFTA_DYNSET_TIMEOUT]))); } priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); @@ -227,7 +228,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) goto nla_put_failure; if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) goto nla_put_failure; - if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout))) + if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, + cpu_to_be64(jiffies_to_msecs(priv->timeout)))) goto nla_put_failure; if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) goto nla_put_failure; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2b67ae1c53e49a115f812ff496bc4db87e8b7421..f4dd706c6cd1377f79ebb30c94833a83185d3404 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2554,7 +2554,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, /* Record the max length of recvmsg() calls for future allocations */ nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, - 16384); + SKB_WITH_OVERHEAD(32768)); copied = data_skb->len; if (len < copied) { @@ -2807,14 +2807,13 @@ static int netlink_dump(struct sock *sk) if (alloc_min_size < nlk->max_recvmsg_len) { alloc_size = nlk->max_recvmsg_len; skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, - GFP_KERNEL | - __GFP_NOWARN | - __GFP_NORETRY); + (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) | + __GFP_NOWARN | __GFP_NORETRY); } if (!skb) { alloc_size = alloc_min_size; skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, - GFP_KERNEL); + (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM)); } if (!skb) goto errout_skb; diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 6acee01a419f543e7b40f8c74b9b43e6a04cea03..b0c1ddc97260394a9bba82a58f220d45b67bff6d 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -3,6 +3,7 @@ #include #include +#include #include #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) @@ -52,6 +53,7 @@ struct netlink_sock { #endif /* CONFIG_NETLINK_MMAP */ struct rhash_head node; + struct work_struct work; }; static inline struct netlink_sock *nlk_sk(struct sock *sk) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a86f26d05bc22e378ef96943f957724bbaca4140..f223d1c80ccf7f9251dce0f8f31f58c766576946 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -249,7 +249,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; - netdev_features_t features; + struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; @@ -257,9 +257,8 @@ static int packet_direct_xmit(struct sk_buff *skb) !netif_carrier_ok(dev))) goto drop; - features = netif_skb_features(skb); - if (skb_needs_linearize(skb, features) && - __skb_linearize(skb)) + skb = validate_xmit_skb_list(skb, dev); + if (skb != orig_skb) goto drop; txq = skb_get_tx_queue(dev, skb); @@ -279,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb) return ret; drop: atomic_long_inc(&dev->tx_dropped); - kfree_skb(skb); + kfree_skb_list(skb); return NET_XMIT_DROP; } @@ -3573,19 +3572,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: - po->tp_version = val; - return 0; + break; default: return -EINVAL; } + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_version = val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_RESERVE: { @@ -3855,6 +3860,7 @@ static int packet_notifier(struct notifier_block *this, } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); + fanout_release(sk); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); @@ -4067,6 +4073,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; + lock_sock(sk); /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { WARN(1, "Tx-ring is not supported.\n"); @@ -4148,7 +4155,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; } - lock_sock(sk); /* Detach socket from network */ spin_lock(&po->bind_lock); @@ -4197,11 +4203,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } - release_sock(sk); if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: + release_sock(sk); return err; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index e38a7701f154c97db2070b1e0b8b54fabdb8b0f3..c3434e9024450e5cff700ae7b48d104360e535ce 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -104,6 +104,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind) kfree(keys); } +static bool offset_valid(struct sk_buff *skb, int offset) +{ + if (offset > 0 && offset > skb->len) + return false; + + if (offset < 0 && -offset > skb_headroom(skb)) + return false; + + return true; +} + static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { @@ -130,6 +141,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, if (tkey->offmask) { char *d, _d; + if (!offset_valid(skb, off + tkey->at)) { + pr_info("tc filter pedit 'at' offset %d out of bounds\n", + off + tkey->at); + goto bad; + } d = skb_header_pointer(skb, off + tkey->at, 1, &_d); if (!d) @@ -142,10 +158,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, " offset must be on 32 bit boundaries\n"); goto bad; } - if (offset > 0 && offset > skb->len) { - pr_info("tc filter pedit" - " offset %d can't exceed pkt length %d\n", - offset, skb->len); + + if (!offset_valid(skb, off + offset)) { + pr_info("tc filter pedit offset %d out of bounds\n", + offset); goto bad; } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 796785e0bf96b0e65f598d3b2dad8256485d034a..d7edba4536bd22258cdcb302ce1c863a6082c890 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -33,6 +33,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, bstats_update(&v->tcf_bstats, skb); action = v->tcf_action; + /* Ensure 'data' points at mac_header prior calling vlan manipulating + * functions. + */ + if (skb_at_tc_ingress(skb)) + skb_push_rcsum(skb, skb->mac_len); + switch (v->tcfv_action) { case TCA_VLAN_ACT_POP: err = skb_vlan_pop(skb); @@ -54,6 +60,9 @@ drop: action = TC_ACT_SHOT; v->tcf_qstats.drops++; unlock: + if (skb_at_tc_ingress(skb)) + skb_pull_rcsum(skb, skb->mac_len); + spin_unlock(&v->tcf_lock); return action; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a75864d93142153bfff4ab765620e10bcfab3e96..ecc1904e454f052d58a1d121c35a3a07aa1c6a48 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -315,7 +315,8 @@ replay: if (err == 0) { struct tcf_proto *next = rtnl_dereference(tp->next); - tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); + tfilter_notify(net, skb, n, tp, + t->tcm_handle, RTM_DELTFILTER); if (tcf_destroy(tp, false)) RCU_INIT_POINTER(*back, next); } diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 0b8c3ace671f1fff47cf2a12f7e6428bb5704b9f..1bf1f4517db623126952646fba50633929b300c4 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle) struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; - if (head == NULL) - return 0UL; - list_for_each_entry(f, &head->flist, link) { if (f->handle == handle) { l = (unsigned long) f; @@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force) tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, basic_delete_filter); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 5faaa5425f7b72293fdd80e1b27405fcf479e207..3eef0215e53f35764ff02f5f6052f71d5972dde8 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -199,7 +199,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) call_rcu(&prog->rcu, __cls_bpf_delete_prog); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } @@ -210,9 +209,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) struct cls_bpf_prog *prog; unsigned long ret = 0UL; - if (head == NULL) - return 0UL; - list_for_each_entry(prog, &head->plist, link) { if (prog->handle == handle) { ret = (unsigned long) prog; diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 4c85bd3a750cbb02c743779f28cbde6ceacb5ecf..c104c2019feb6245154a9149d4f685b1ab23cd18 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -130,11 +130,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force) if (!force) return false; - - if (head) { - RCU_INIT_POINTER(tp->root, NULL); + /* Head can still be NULL due to cls_cgroup_init(). */ + if (head) call_rcu(&head->rcu, cls_cgroup_destroy_rcu); - } + return true; } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index fbfec6a188390007fd30cf0a351e74f5b2d77057..d7ba2b4ff0f3ac1ef6011804c58e35a41d77ff4d 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -583,7 +583,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force) list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 95b021243233bd46a9ef836b07b7c4671faa9e15..e5a58c82728a485b9fb5abb307915a8f026d13c9 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -55,7 +56,10 @@ struct cls_fl_head { bool mask_assigned; struct list_head filters; struct rhashtable_params ht_params; - struct rcu_head rcu; + union { + struct work_struct work; + struct rcu_head rcu; + }; }; struct cls_fl_filter { @@ -165,6 +169,24 @@ static void fl_destroy_filter(struct rcu_head *head) kfree(f); } +static void fl_destroy_sleepable(struct work_struct *work) +{ + struct cls_fl_head *head = container_of(work, struct cls_fl_head, + work); + if (head->mask_assigned) + rhashtable_destroy(&head->ht); + kfree(head); + module_put(THIS_MODULE); +} + +static void fl_destroy_rcu(struct rcu_head *rcu) +{ + struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu); + + INIT_WORK(&head->work, fl_destroy_sleepable); + schedule_work(&head->work); +} + static bool fl_destroy(struct tcf_proto *tp, bool force) { struct cls_fl_head *head = rtnl_dereference(tp->root); @@ -177,10 +199,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force) list_del_rcu(&f->list); call_rcu(&f->rcu, fl_destroy_filter); } - RCU_INIT_POINTER(tp->root, NULL); - if (head->mask_assigned) - rhashtable_destroy(&head->ht); - kfree_rcu(head, rcu); + + __module_get(THIS_MODULE); + call_rcu(&head->rcu, fl_destroy_rcu); return true; } diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index f9c9fc075fe65402c531ada58be9e4b916d282cd..9992dfac693886bf5502f3aac8a549d09fc2bdd3 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; nhptr = ip_hdr(skb); #endif - + if (unlikely(!head)) + return -1; restart: #if RSVP_DST_LEN == 4 diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 944c8ff450558ca1d5475a5581410b2d5b69e3be..403746b202637f35ac1cfca5026e703ce194bcf2 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -503,7 +503,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force) walker.fn = tcindex_destroy_element; tcindex_walk(tp, &walker); - RCU_INIT_POINTER(tp->root, NULL); call_rcu(&p->rcu, __tcindex_destroy); return true; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 22c2bf367d7e8c7025065f33eabfd7e93a7f4021..29c7c43de1086e79e1888bd9398f9b24d2b634a8 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3426,6 +3426,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); + /* Report violation if chunk len overflows */ + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb_tail_pointer(skb)) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + /* Now that we know we at least have a chunk header, * do things that are type appropriate. */ @@ -3457,12 +3463,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, } } - /* Report violation if chunk len overflows */ - ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); - if (ch_end > skb_tail_pointer(skb)) - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, - commands); - ch = (sctp_chunkhdr_t *) ch_end; } while (ch_end < skb_tail_pointer(skb)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index be1489fc3234bd2ed0281d50f7e11a2a205d38d1..b5fd4ab56156b09c33ec33b72125c34e221cac54 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1212,9 +1212,12 @@ static int __sctp_connect(struct sock *sk, timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); - err = sctp_wait_for_connect(asoc, &timeo); - if ((err == 0 || err == -EINPROGRESS) && assoc_id) + if (assoc_id) *assoc_id = asoc->assoc_id; + err = sctp_wait_for_connect(asoc, &timeo); + /* Note: the asoc may be freed after the return of + * sctp_wait_for_connect. + */ /* Don't free association on exit. */ asoc = NULL; @@ -4371,7 +4374,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { - if (len <= 0) + if (len == 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); @@ -5972,6 +5975,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; + if (len < 0) + return -EINVAL; + lock_sock(sk); switch (optname) { diff --git a/net/socket.c b/net/socket.c index 1cdfe02104a640fd832423d07181e236e2bcbdba..9016cadf8b144bb273e7a13d361e170cb36e773c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2087,6 +2087,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, if (err) break; ++datagrams; + if (msg_data_left(&msg_sys)) + break; } fput_light(sock->file, fput_needed); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index cc9852897395c6f6db42653c773788446a9c5d3e..c5b0cb4f4056c4da0a0adc556cf46bebb48d2e7a 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1188,11 +1188,17 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); /* Encode reply */ - if (test_bit(RQ_DROPME, &rqstp->rq_flags)) { + if (*statp == rpc_drop_reply || + test_bit(RQ_DROPME, &rqstp->rq_flags)) { if (procp->pc_release) procp->pc_release(rqstp, NULL, rqstp->rq_resp); goto dropit; } + if (*statp == rpc_autherr_badcred) { + if (procp->pc_release) + procp->pc_release(rqstp, NULL, rqstp->rq_resp); + goto err_bad_auth; + } if (*statp == rpc_success && (xdr = procp->pc_encode) && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1ba4172074659fb0ef86c214464c5a6e4b4e60ff..27b6f55fa43a87fe98fddb946aeac015ecd1f6ab 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -474,7 +474,16 @@ static int xs_nospace(struct rpc_task *task) spin_unlock_bh(&xprt->transport_lock); /* Race breaker in case memory is freed before above code is called */ - sk->sk_write_space(sk); + if (ret == -EAGAIN) { + struct socket_wq *wq; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); + rcu_read_unlock(); + + sk->sk_write_space(sk); + } return ret; } diff --git a/net/tipc/link.c b/net/tipc/link.c index 91aea071ab27fee550e3c88ecc4097adac399d83..72268eac4ec7237787f53f2dadf886325f8e5cf6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1262,6 +1262,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, /* fall thru' */ case ACTIVATE_MSG: + skb_linearize(skb); + hdr = buf_msg(skb); /* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index c07612bab95c0957b2e34434aa73b6ef02708fff..f51c8bdbea1c010a6f6405932c2d3e1144f75219 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -397,6 +397,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) spin_lock_bh(&tn->nametbl_lock); for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { + skb_linearize(skb); msg = buf_msg(skb); mtype = msg_type(msg); item = (struct distr_item *)msg_data(msg); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 9b713e0ce00dbb79487e979337f449248cc78a51..b26b7a1277736c1f8c780f25a9672fd98fe3383c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2111,7 +2111,8 @@ restart: TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, onode, dport, oport, TIPC_CONN_SHUTDOWN); - tipc_node_xmit_skb(net, skb, dnode, tsk->portid); + if (skb) + tipc_node_xmit_skb(net, skb, dnode, tsk->portid); } tsk->connected = 0; sock->state = SS_DISCONNECTING; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 70c03271b798f429d8d6faa5142fb2654753d342..6af78c6276b4b185be00d62370932bb1d816bfd4 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -48,7 +48,6 @@ #include #include "core.h" #include "bearer.h" -#include "msg.h" /* IANA assigned UDP port */ #define UDP_PORT_DEFAULT 6118 @@ -224,10 +223,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) { struct udp_bearer *ub; struct tipc_bearer *b; - int usr = msg_user(buf_msg(skb)); - - if ((usr == LINK_PROTOCOL) || (usr == NAME_DISTRIBUTOR)) - skb_linearize(skb); ub = rcu_dereference_sk_user_data(sk); if (!ub) { diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 6579fd6e7459bc3e1b40f2008f7bf4f32e02c0ca..73f75258ce4694be1235078ff062c3f0e73a1e3b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val) { struct unix_sock *u = unix_sk(sk); - if (mutex_lock_interruptible(&u->readlock)) + if (mutex_lock_interruptible(&u->iolock)) return -EINTR; sk->sk_peek_off = val; - mutex_unlock(&u->readlock); + mutex_unlock(&u->iolock); return 0; } @@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) spin_lock_init(&u->lock); atomic_long_set(&u->inflight, 0); INIT_LIST_HEAD(&u->link); - mutex_init(&u->readlock); /* single task reading lock */ + mutex_init(&u->iolock); /* single task reading lock */ + mutex_init(&u->bindlock); /* single task binding lock */ init_waitqueue_head(&u->peer_wait); init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); unix_insert_socket(unix_sockets_unbound(sk), sk); @@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock) int err; unsigned int retries = 0; - err = mutex_lock_interruptible(&u->readlock); + err = mutex_lock_interruptible(&u->bindlock); if (err) return err; @@ -894,7 +895,7 @@ retry: spin_unlock(&unix_table_lock); err = 0; -out: mutex_unlock(&u->readlock); +out: mutex_unlock(&u->bindlock); return err; } @@ -953,20 +954,32 @@ fail: return NULL; } -static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode, - struct path *res) +static int unix_mknod(const char *sun_path, umode_t mode, struct path *res) { - int err; + struct dentry *dentry; + struct path path; + int err = 0; + /* + * Get the parent directory, calculate the hash for last + * component. + */ + dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); + err = PTR_ERR(dentry); + if (IS_ERR(dentry)) + return err; - err = security_path_mknod(path, dentry, mode, 0); + /* + * All right, let's create it. + */ + err = security_path_mknod(&path, dentry, mode, 0); if (!err) { - err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0); + err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0); if (!err) { - res->mnt = mntget(path->mnt); + res->mnt = mntget(path.mnt); res->dentry = dget(dentry); } } - + done_path_create(&path, dentry); return err; } @@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; char *sun_path = sunaddr->sun_path; - int err, name_err; + int err; unsigned int hash; struct unix_address *addr; struct hlist_head *list; - struct path path; - struct dentry *dentry; err = -EINVAL; if (sunaddr->sun_family != AF_UNIX) @@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; addr_len = err; - name_err = 0; - dentry = NULL; - if (sun_path[0]) { - /* Get the parent directory, calculate the hash for last - * component. - */ - dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); - - if (IS_ERR(dentry)) { - /* delay report until after 'already bound' check */ - name_err = PTR_ERR(dentry); - dentry = NULL; - } - } - - err = mutex_lock_interruptible(&u->readlock); + err = mutex_lock_interruptible(&u->bindlock); if (err) - goto out_path; + goto out; err = -EINVAL; if (u->addr) goto out_up; - if (name_err) { - err = name_err == -EEXIST ? -EADDRINUSE : name_err; - goto out_up; - } - err = -ENOMEM; addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); if (!addr) @@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) addr->hash = hash ^ sk->sk_type; atomic_set(&addr->refcnt, 1); - if (dentry) { - struct path u_path; + if (sun_path[0]) { + struct path path; umode_t mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current_umask()); - err = unix_mknod(dentry, &path, mode, &u_path); + err = unix_mknod(sun_path, mode, &path); if (err) { if (err == -EEXIST) err = -EADDRINUSE; @@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out_up; } addr->hash = UNIX_HASH_SIZE; - hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); + hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); spin_lock(&unix_table_lock); - u->path = u_path; + u->path = path; list = &unix_socket_table[hash]; } else { spin_lock(&unix_table_lock); @@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) out_unlock: spin_unlock(&unix_table_lock); out_up: - mutex_unlock(&u->readlock); -out_path: - if (dentry) - done_path_create(&path, dentry); - + mutex_unlock(&u->bindlock); out: return err; } @@ -1971,17 +1958,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, if (false) { alloc_skb: unix_state_unlock(other); - mutex_unlock(&unix_sk(other)->readlock); + mutex_unlock(&unix_sk(other)->iolock); newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, &err, 0); if (!newskb) goto err; } - /* we must acquire readlock as we modify already present + /* we must acquire iolock as we modify already present * skbs in the sk_receive_queue and mess with skb->len */ - err = mutex_lock_interruptible(&unix_sk(other)->readlock); + err = mutex_lock_interruptible(&unix_sk(other)->iolock); if (err) { err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; goto err; @@ -2048,7 +2035,7 @@ alloc_skb: } unix_state_unlock(other); - mutex_unlock(&unix_sk(other)->readlock); + mutex_unlock(&unix_sk(other)->iolock); other->sk_data_ready(other); scm_destroy(&scm); @@ -2057,7 +2044,7 @@ alloc_skb: err_state_unlock: unix_state_unlock(other); err_unlock: - mutex_unlock(&unix_sk(other)->readlock); + mutex_unlock(&unix_sk(other)->iolock); err: kfree_skb(newskb); if (send_sigpipe && !(flags & MSG_NOSIGNAL)) @@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, if (flags&MSG_OOB) goto out; - err = mutex_lock_interruptible(&u->readlock); + err = mutex_lock_interruptible(&u->iolock); if (unlikely(err)) { /* recvmsg() in non blocking mode is supposed to return -EAGAIN * sk_rcvtimeo is not honored by mutex_lock_interruptible() @@ -2198,7 +2185,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, out_free: skb_free_datagram(sk, skb); out_unlock: - mutex_unlock(&u->readlock); + mutex_unlock(&u->iolock); out: return err; } @@ -2207,7 +2194,8 @@ out: * Sleep until more data has arrived. But check for races.. */ static long unix_stream_data_wait(struct sock *sk, long timeo, - struct sk_buff *last, unsigned int last_len) + struct sk_buff *last, unsigned int last_len, + bool freezable) { struct sk_buff *tail; DEFINE_WAIT(wait); @@ -2228,7 +2216,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); unix_state_unlock(sk); - timeo = freezable_schedule_timeout(timeo); + if (freezable) + timeo = freezable_schedule_timeout(timeo); + else + timeo = schedule_timeout(timeo); unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) @@ -2258,7 +2249,8 @@ struct unix_stream_read_state { unsigned int splice_flags; }; -static int unix_stream_read_generic(struct unix_stream_read_state *state) +static int unix_stream_read_generic(struct unix_stream_read_state *state, + bool freezable) { struct scm_cookie scm; struct socket *sock = state->socket; @@ -2293,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) /* Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ - mutex_lock(&u->readlock); + mutex_lock(&u->iolock); if (flags & MSG_PEEK) skip = sk_peek_offset(sk, flags); @@ -2334,10 +2326,10 @@ again: break; } - mutex_unlock(&u->readlock); + mutex_unlock(&u->iolock); timeo = unix_stream_data_wait(sk, timeo, last, - last_len); + last_len, freezable); if (signal_pending(current)) { err = sock_intr_errno(timeo); @@ -2345,7 +2337,7 @@ again: goto out; } - mutex_lock(&u->readlock); + mutex_lock(&u->iolock); continue; unlock: unix_state_unlock(sk); @@ -2448,7 +2440,7 @@ unlock: } } while (size); - mutex_unlock(&u->readlock); + mutex_unlock(&u->iolock); if (state->msg) scm_recv(sock, state->msg, &scm, flags); else @@ -2479,7 +2471,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, .flags = flags }; - return unix_stream_read_generic(&state); + return unix_stream_read_generic(&state, true); } static ssize_t skb_unix_socket_splice(struct sock *sk, @@ -2489,9 +2481,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk, int ret; struct unix_sock *u = unix_sk(sk); - mutex_unlock(&u->readlock); + mutex_unlock(&u->iolock); ret = splice_to_pipe(pipe, spd); - mutex_lock(&u->readlock); + mutex_lock(&u->iolock); return ret; } @@ -2525,7 +2517,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, flags & SPLICE_F_NONBLOCK) state.flags = MSG_DONTWAIT; - return unix_stream_read_generic(&state); + return unix_stream_read_generic(&state, false); } static int unix_shutdown(struct socket *sock, int mode) diff --git a/net/wireless/core.h b/net/wireless/core.h index a972e386ad7f5aa5e26b625d12843e4ce104e258..46495dd821c87959bca3cfaf71f0b940b68c8d9b 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -71,6 +71,7 @@ struct cfg80211_registered_device { struct list_head bss_list; struct rb_root bss_tree; u32 bss_generation; + u32 bss_entries; struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; struct cfg80211_sched_scan_request __rcu *sched_scan_req; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5ec8e5cf8a8ed363a870a963107388baf84971da..dcae8ae9bfad24836d608cb89ba49cc9e8fdbdc3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7143,7 +7143,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) params.n_counter_offsets_presp = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && - (params.n_counter_offsets_beacon > + (params.n_counter_offsets_presp > rdev->wiphy.max_num_csa_counters)) return -EINVAL; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 16c3424507c3ed91c009b3b603b25dea849da6ed..5b4906ad1b819db9742c436704dc6755d0d64ca4 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -56,6 +56,19 @@ * also linked into the probe response struct. */ +/* + * Limit the number of BSS entries stored in mac80211. Each one is + * a bit over 4k at most, so this limits to roughly 4-5M of memory. + * If somebody wants to really attack this though, they'd likely + * use small beacons, and only one type of frame, limiting each of + * the entries to a much smaller size (in order to generate more + * entries in total, so overhead is bigger.) + */ +static int bss_entries_limit = 1000; +module_param(bss_entries_limit, int, 0644); +MODULE_PARM_DESC(bss_entries_limit, + "limit to number of scan BSS entries (per wiphy, default 1000)"); + #define IEEE80211_SCAN_RESULT_EXPIRE (7 * HZ) static void bss_free(struct cfg80211_internal_bss *bss) @@ -136,6 +149,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, list_del_init(&bss->list); rb_erase(&bss->rbn, &rdev->bss_tree); + rdev->bss_entries--; + WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), + "rdev bss entries[%d]/list[empty:%d] corruption\n", + rdev->bss_entries, list_empty(&rdev->bss_list)); bss_ref_put(rdev, bss); return true; } @@ -162,6 +179,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, rdev->bss_generation++; } +static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) +{ + struct cfg80211_internal_bss *bss, *oldest = NULL; + bool ret; + + lockdep_assert_held(&rdev->bss_lock); + + list_for_each_entry(bss, &rdev->bss_list, list) { + if (atomic_read(&bss->hold)) + continue; + + if (!list_empty(&bss->hidden_list) && + !bss->pub.hidden_beacon_bss) + continue; + + if (oldest && time_before(oldest->ts, bss->ts)) + continue; + oldest = bss; + } + + if (WARN_ON(!oldest)) + return false; + + /* + * The callers make sure to increase rdev->bss_generation if anything + * gets removed (and a new entry added), so there's no need to also do + * it here. + */ + + ret = __cfg80211_unlink_bss(rdev, oldest); + WARN_ON(!ret); + return ret; +} + void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message) { @@ -687,6 +738,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, const u8 *ie; int i, ssidlen; u8 fold = 0; + u32 n_entries = 0; ies = rcu_access_pointer(new->pub.beacon_ies); if (WARN_ON(!ies)) @@ -710,6 +762,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, /* This is the bad part ... */ list_for_each_entry(bss, &rdev->bss_list, list) { + /* + * we're iterating all the entries anyway, so take the + * opportunity to validate the list length accounting + */ + n_entries++; + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) continue; if (bss->pub.channel != new->pub.channel) @@ -738,6 +796,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, new->pub.beacon_ies); } + WARN_ONCE(n_entries != rdev->bss_entries, + "rdev bss entries[%d]/list[len:%d] corruption\n", + rdev->bss_entries, n_entries); + return true; } @@ -890,7 +952,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, } } + if (rdev->bss_entries >= bss_entries_limit && + !cfg80211_bss_expire_oldest(rdev)) { + kfree(new); + goto drop; + } + list_add_tail(&new->list, &rdev->bss_list); + rdev->bss_entries++; rb_insert_bss(rdev, new); found = new; } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index c753211cb83f8b0db3907af66e5bc708456000a6..b50ee5d622e14d4fb486ce0c533757e958702f54 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -955,29 +955,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, return private(dev, iwr, cmd, info, handler); } /* Old driver API : call driver ioctl handler */ - if (dev->netdev_ops->ndo_do_ioctl) { -#ifdef CONFIG_COMPAT - if (info->flags & IW_REQUEST_FLAG_COMPAT) { - int ret = 0; - struct iwreq iwr_lcl; - struct compat_iw_point *iwp_compat = (void *) &iwr->u.data; - - memcpy(&iwr_lcl, iwr, sizeof(struct iwreq)); - iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer); - iwr_lcl.u.data.length = iwp_compat->length; - iwr_lcl.u.data.flags = iwp_compat->flags; - - ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd); - - iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer); - iwp_compat->length = iwr_lcl.u.data.length; - iwp_compat->flags = iwr_lcl.u.data.flags; - - return ret; - } else -#endif - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); - } + if (dev->netdev_ops->ndo_do_ioctl) + return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); return -EOPNOTSUPP; } diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 727eb21c9c5624f2998f321d59db1017339c69a3..83795435bd991b117324424d7a1a2cb2985786ad 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -42,6 +42,11 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs) " ex1 = 0x%lx\n", p->addr, regs->pc, regs->ex1); #endif +#ifdef CONFIG_ARM64 + pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx," + " pstate = 0x%lx\n", + p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate); +#endif /* A dump_stack() here will give a stack backtrace */ return 0; @@ -67,6 +72,10 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs, printk(KERN_INFO "post_handler: p->addr = 0x%p, ex1 = 0x%lx\n", p->addr, regs->ex1); #endif +#ifdef CONFIG_ARM64 + pr_info("<%s> post_handler: p->addr = 0x%p, pstate = 0x%lx\n", + p->symbol_name, p->addr, (long)regs->pstate); +#endif } /* diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index 973e8c1415677eeba513543655e939de234521e3..17867e723a51a667fdec65194b9033346ff1b786 100755 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh @@ -1,6 +1,6 @@ #!/bin/sh -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" if [ "$?" -eq "0" ] ; then echo y else diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index dc0027b28b049fa86b83543e2ce0a0640e591c89..53426a6ee6dc535478e49d8a9a7f666447aa7e44 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -623,8 +623,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) /* released below */ cred = get_current_cred(); cxt = cred_cxt(cred); - profile = aa_cred_profile(cred); - previous_profile = cxt->previous; + profile = aa_get_newest_profile(aa_cred_profile(cred)); + previous_profile = aa_get_newest_profile(cxt->previous); if (unconfined(profile)) { info = "unconfined"; @@ -720,6 +720,8 @@ audit: out: aa_put_profile(hat); kfree(name); + aa_put_profile(profile); + aa_put_profile(previous_profile); put_cred(cred); return error; diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index 1d950fbb2aecbb21c62b28233dba7a707a116a6b..2d1fe34781fa70f79f139969e01da15d3fb10a29 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -202,7 +202,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint, } hash; if (xattr_value) - *xattr_len = ima_read_xattr(file->f_path.dentry, xattr_value); + *xattr_len = ima_read_xattr(file_dentry(file), xattr_value); if (!(iint->flags & IMA_COLLECTED)) { u64 i_version = file_inode(file)->i_version; diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 1873b5536f804c28cbb9452b5b1a8cc2f180828a..ed5a9c110b3a2238025a3be9d4a58787ceaea230 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -189,7 +189,7 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, { static const char op[] = "appraise_data"; char *cause = "unknown"; - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = file_dentry(file); struct inode *inode = d_backing_inode(dentry); enum integrity_status status = INTEGRITY_UNKNOWN; int rc = xattr_len, hash_start = 0; @@ -289,7 +289,7 @@ out: */ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = file_dentry(file); int rc = 0; /* do not collect and update hash for digital signatures */ diff --git a/security/lsm_audit.c b/security/lsm_audit.c index cccbf3068cdca800eb53e49caf1d29de9d93b035..45d927ab807d82425b470d1e597b786fa91b7259 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -220,7 +220,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, */ BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2); - audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); + audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm))); switch (a->type) { @@ -294,7 +294,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, case LSM_AUDIT_DATA_TASK: { struct task_struct *tsk = a->u.tsk; if (tsk) { - pid_t pid = task_pid_nr(tsk); + pid_t pid = task_tgid_nr(tsk); if (pid) { char comm[sizeof(tsk->comm)]; audit_log_format(ab, " opid=%d ocomm=", pid); diff --git a/sound/core/info.c b/sound/core/info.c index b5158b5673f1fb0895280fd9271f1f5874ec7c5d..79dee33b5035623caf1e1b8f88570e777b865a73 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -325,10 +325,15 @@ static ssize_t snd_info_text_entry_write(struct file *file, size_t next; int err = 0; + if (!entry->c.text.write) + return -EIO; pos = *offset; if (!valid_pos(pos, count)) return -EIO; next = pos + count; + /* don't handle too large text inputs */ + if (next > 16 * 1024) + return -EIO; mutex_lock(&entry->access); buf = data->wbuffer; if (!buf) { @@ -366,7 +371,9 @@ static int snd_info_seq_show(struct seq_file *seq, void *p) struct snd_info_private_data *data = seq->private; struct snd_info_entry *entry = data->entry; - if (entry->c.text.read) { + if (!entry->c.text.read) { + return -EIO; + } else { data->rbuffer->buffer = (char *)seq; /* XXX hack! */ entry->c.text.read(entry, data->rbuffer); } diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index b73133885384b6a0af89b0b4470e53638d2bffc2..2530669e2f94cfdebea44c591a36f4aa4691723f 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1901,8 +1901,8 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) snd_timer_interrupt(substream->timer, 1); #endif _end: - snd_pcm_stream_unlock_irqrestore(substream, flags); kill_fasync(&runtime->fasync, SIGIO, POLL_IN); + snd_pcm_stream_unlock_irqrestore(substream, flags); } EXPORT_SYMBOL(snd_pcm_period_elapsed); diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index 36470af7eda70689619388f6b2e774cf850492cd..92b819e4f7290c7ac056d72e35ce553829dce64a 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c @@ -1408,6 +1408,7 @@ snd_ali_playback_pointer(struct snd_pcm_substream *substream) spin_unlock(&codec->reg_lock); dev_dbg(codec->card->dev, "playback pointer returned cso=%xh.\n", cso); + cso %= runtime->buffer_size; return cso; } @@ -1428,6 +1429,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2)); spin_unlock(&codec->reg_lock); + cso %= runtime->buffer_size; return cso; } diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c index 9c22f95838efbd31d23448cc26321918c03bd5e1..19d41da79f93cc95500859c9e1690a2d89935d23 100644 --- a/sound/pci/hda/dell_wmi_helper.c +++ b/sound/pci/hda/dell_wmi_helper.c @@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec, removefunc = true; if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) { dell_led_value = 0; - if (spec->gen.num_adc_nids > 1) + if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch) codec_dbg(codec, "Skipping micmute LED control due to several ADCs"); else { dell_old_cap_hook = spec->gen.cap_sync_hook; diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 22dbfa563919fca4ace4d3d50b7c822008c7bb34..5baf8b56b6e7604637bca89eae54ca63ce4e6232 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -956,7 +956,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) status = azx_readb(chip, RIRBSTS); if (status & RIRB_INT_MASK) { if (status & RIRB_INT_RESPONSE) { - if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) + if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) udelay(80); snd_hdac_bus_update_rirb(bus); } @@ -1055,11 +1055,6 @@ int azx_bus_init(struct azx *chip, const char *model, if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR) bus->core.corbrp_self_clear = true; - if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) { - dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n"); - bus->needs_damn_long_delay = 1; - } - if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) bus->core.align_bdle_4k = true; diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 7b635d68cfe1a2d76efd9826b2020c10f8c834a3..b17539537b2e0b90cb55f95a6d3e0689003822a8 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -32,8 +32,8 @@ #define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */ #define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */ #define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */ -#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */ -#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */ +/* 13 unused */ +/* 14 unused */ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */ #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */ #define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index d4671973d8890626555eac72e24a10f74cea8423..ad4a1e9a3ae1be732bd2fa602cd01eea511a7160 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -334,8 +334,7 @@ enum { /* quirks for Nvidia */ #define AZX_DCAPS_PRESET_NVIDIA \ - (AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \ - AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\ + (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\ AZX_DCAPS_SNOOP_TYPE(NVIDIA)) #define AZX_DCAPS_PRESET_CTHDA \ @@ -1637,6 +1636,11 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, return err; } + if (chip->driver_type == AZX_DRIVER_NVIDIA) { + dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n"); + chip->bus.needs_damn_long_delay = 1; + } + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { dev_err(card->dev, "Error creating device [card]!\n"); @@ -1720,6 +1724,10 @@ static int azx_first_init(struct azx *chip) } } + /* NVidia hardware normally only supports up to 40 bits of DMA */ + if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA) + dma_bits = 40; + /* disable 64bit DMA address on some devices */ if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { dev_dbg(card->dev, "Disabling 64bit DMA\n"); @@ -2406,14 +2414,12 @@ static const struct pci_device_id azx_ids[] = { .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, .class_mask = 0xffffff, .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | - AZX_DCAPS_NO_64BIT | - AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, + AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB }, #else /* this entry seems still valid -- i.e. without emu20kx chip */ { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | - AZX_DCAPS_NO_64BIT | - AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, + AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB }, #endif /* CM8888 */ { PCI_DEVICE(0x13f6, 0x5011), diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 58c0aad372842125be5529d07aecbbe98e2c8859..17fd81736d3d67f6a6fb67ef1ad624a6d96d93a9 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -464,6 +464,8 @@ static int hda_tegra_create(struct snd_card *card, if (err < 0) return err; + chip->bus.needs_damn_long_delay = 1; + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { dev_err(card->dev, "Error creating device\n"); @@ -481,8 +483,7 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match); static int hda_tegra_probe(struct platform_device *pdev) { - const unsigned int driver_flags = AZX_DCAPS_RIRB_DELAY | - AZX_DCAPS_CORBRP_SELF_CLEAR; + const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR; struct snd_card *card; struct azx *chip; struct hda_tegra *hda; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 600af5878e7545563384e387966062a9691bd33b..36cd715986bc53a40b61f4cd27bc927e114bec64 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -261,6 +261,7 @@ enum { CXT_FIXUP_HP_530, CXT_FIXUP_CAP_MIX_AMP_5047, CXT_FIXUP_MUTE_LED_EAPD, + CXT_FIXUP_HP_SPECTRE, }; /* for hda_fixup_thinkpad_acpi() */ @@ -765,6 +766,14 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_mute_led_eapd, }, + [CXT_FIXUP_HP_SPECTRE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + /* enable NID 0x1d for the speaker on top */ + { 0x1d, 0x91170111 }, + { } + } + }, }; static const struct snd_pci_quirk cxt5045_fixups[] = { @@ -814,6 +823,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index eaee626ab18503b1d7c5477008a81654843cdff3..f0986cac82f1a5fb464e786b992f312345c3b6f8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5790,6 +5790,11 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {0x14, 0x90170110}, \ {0x15, 0x0221401f} +#define ALC295_STANDARD_PINS \ + {0x12, 0xb7a60130}, \ + {0x14, 0x90170110}, \ + {0x21, 0x04211020} + #define ALC298_STANDARD_PINS \ {0x12, 0x90a60130}, \ {0x21, 0x03211020} @@ -5829,10 +5834,22 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x21, 0x02211030}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x14, 0x90170110}, + {0x1b, 0x02011020}, + {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x14, 0x90170110}, + {0x1b, 0x01011020}, + {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170130}, {0x1b, 0x01014020}, {0x21, 0x0221103f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x14, 0x90170130}, + {0x1b, 0x01011020}, + {0x21, 0x0221103f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170130}, {0x1b, 0x02011020}, @@ -5894,6 +5911,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60180}, {0x14, 0x90170120}, {0x21, 0x02211030}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0xb7a60130}, + {0x14, 0x90170110}, + {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, @@ -6005,6 +6026,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x13, 0x90a60140}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC295_STANDARD_PINS, + {0x17, 0x21014020}, + {0x18, 0x21a19030}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC295_STANDARD_PINS, + {0x17, 0x21014040}, + {0x18, 0x21a19050}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x17, 0x90170110}), diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 0a4ad5feb82e7817f7036f86c973d02b1dfecb9b..12826ac0381f03f428a74216d52c1009837b2ae0 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -75,7 +75,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, removefunc = false; } if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { - if (spec->num_adc_nids > 1) + if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch) codec_dbg(codec, "Skipping micmute LED control due to several ADCs"); else { diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index e07807d96b68069b10c0e1319359b8ed52905ecc..3670086b9227c4f9b3344998e113cc636a611cd4 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c @@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"), }; static const struct snd_soc_dapm_route cs4270_dapm_routes[] = { - { "Capture", NULL, "AINA" }, - { "Capture", NULL, "AINB" }, + { "Capture", NULL, "AINL" }, + { "Capture", NULL, "AINR" }, - { "AOUTA", NULL, "Playback" }, - { "AOUTB", NULL, "Playback" }, + { "AOUTL", NULL, "Playback" }, + { "AOUTR", NULL, "Playback" }, }; /** diff --git a/sound/soc/intel/atom/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c index adb32fefd693a059574674b414abc7d2811985e7..b1e6b8f34a6a797d335e9630b4972842400977f2 100644 --- a/sound/soc/intel/atom/sst/sst_pvt.c +++ b/sound/soc/intel/atom/sst/sst_pvt.c @@ -279,17 +279,15 @@ int sst_prepare_and_post_msg(struct intel_sst_drv *sst, if (response) { ret = sst_wait_timeout(sst, block); - if (ret < 0) { + if (ret < 0) goto out; - } else if(block->data) { - if (!data) - goto out; - *data = kzalloc(block->size, GFP_KERNEL); - if (!(*data)) { + + if (data && block->data) { + *data = kmemdup(block->data, block->size, GFP_KERNEL); + if (!*data) { ret = -ENOMEM; goto out; - } else - memcpy(data, (void *) block->data, block->size); + } } } out: diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index caa69c4598a6f14eb27c91575b49aae9fd7d3e8a..b4844f78266f7612b768743941b2bfbbf88e6d15 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -464,8 +464,10 @@ static int skl_probe(struct pci_dev *pci, skl->nhlt = skl_nhlt_init(bus->dev); - if (skl->nhlt == NULL) + if (skl->nhlt == NULL) { + err = -ENODEV; goto out_free; + } pci_set_drvdata(skl->pci, ebus); diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c index e7b51c5c2c000901f220a3a37a01057859c978ce..6324e81759de184de2ac1feb28e85e33e88c657e 100644 --- a/sound/soc/msm/msm8998.c +++ b/sound/soc/msm/msm8998.c @@ -534,6 +534,7 @@ static struct wcd_mbhc_config wcd_mbhc_cfg = { .key_code[7] = 0, .linein_th = 5000, .moisture_en = true, + .mbhc_micbias = MIC_BIAS_2, .anc_micbias = MIC_BIAS_2, .enable_anc_mic_detect = false, }; diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c index b837265ac3e9afd90dc26e5e00b2b52d0d1d9239..8d0d45d330e762d4bc7b21aea779dab39f6a4c9a 100644 --- a/sound/soc/omap/omap-mcpdm.c +++ b/sound/soc/omap/omap-mcpdm.c @@ -390,8 +390,8 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai) pm_runtime_get_sync(mcpdm->dev); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00); - ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler, - 0, "McPDM", (void *)mcpdm); + ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM", + (void *)mcpdm); pm_runtime_put_sync(mcpdm->dev); @@ -416,6 +416,7 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); + free_irq(mcpdm->irq, (void *)mcpdm); pm_runtime_disable(mcpdm->dev); return 0; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index b1980df07dce5c84c9b99dcadaeec8488f710e4b..8f76f551e22905317630d5f840783ccf1db44b62 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -826,6 +826,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w, case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_pga: + case snd_soc_dapm_out_drv: wname_in_long_name = true; kcname_in_long_name = true; break; @@ -3030,6 +3031,9 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, } mutex_unlock(&card->dapm_mutex); + if (ret) + return ret; + if (invert) ucontrol->value.integer.value[0] = max - val; else @@ -3181,7 +3185,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, if (e->shift_l != e->shift_r) { if (item[1] > e->items) return -EINVAL; - val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l; + val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r; mask |= e->mask << e->shift_r; } diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 6963ba20991c10066fdad2ad65f3102fc5752424..70396d3f64728bad98d7f5a665ebfb66351bfdf9 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1484,6 +1484,7 @@ widget: if (widget == NULL) { dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n", w->name); + ret = -ENOMEM; goto hdr_err; } diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index 1bb896d78d09817eff6fc38af7e2d1d1b3622ef3..1a4999f9d56fa2b0058c7cb5e54a32c4c6e9e57d 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c @@ -575,11 +575,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev) card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) - return NULL; + return ERR_PTR(-ENOMEM); card->dai_link = sun4i_codec_create_link(dev, &card->num_links); if (!card->dai_link) - return NULL; + return ERR_PTR(-ENOMEM); card->dev = dev; card->name = "sun4i-codec"; @@ -661,7 +661,8 @@ static int sun4i_codec_probe(struct platform_device *pdev) } card = sun4i_codec_create_card(&pdev->dev); - if (!card) { + if (IS_ERR(card)) { + ret = PTR_ERR(card); dev_err(&pdev->dev, "Failed to create our card\n"); goto err_unregister_codec; } diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 81b7da8e56d39e352a5b629bbdf73aa460889b4d..183311cb849ede12bb5f2e9c68f9c7ff40e3281b 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -29,7 +29,7 @@ /* This is Line 6's MIDI manufacturer ID. */ -const unsigned char line6_midi_id[] = { +const unsigned char line6_midi_id[3] = { 0x00, 0x01, 0x0c }; EXPORT_SYMBOL_GPL(line6_midi_id); diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index f6c3bf79af9a7d9f7cabe6b83c3573a59964b826..04991b00913222f07f4d6dbcb0dc6b955db4f5a5 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1831,6 +1831,7 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer, } static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, + struct usb_mixer_elem_info *cval, struct snd_kcontrol *kctl) { /* Approximation using 10 ranges based on output measurement on hw v1.2. @@ -1848,10 +1849,19 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, 41, 50, TLV_DB_MINMAX_ITEM(-441, 0), ); - usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n"); - kctl->tlv.p = scale; - kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; - kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; + if (cval->min == 0 && cval->max == 50) { + usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk (0-50 variant)\n"); + kctl->tlv.p = scale; + kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; + kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; + + } else if (cval->min == 0 && cval->max <= 1000) { + /* Some other clearly broken DragonFly variant. + * At least a 0..53 variant (hw v1.0) exists. + */ + usb_audio_info(mixer->chip, "ignoring too narrow dB range on a DragonFly device"); + kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; + } } void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, @@ -1860,8 +1870,8 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, { switch (mixer->chip->usb_id) { case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */ - if (unitid == 7 && cval->min == 0 && cval->max == 50) - snd_dragonfly_quirk_db_scale(mixer, kctl); + if (unitid == 7 && cval->control == UAC_FU_VOLUME) + snd_dragonfly_quirk_db_scale(mixer, cval, kctl); break; } } diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index c60a776e815d72f14b9b6345f2e8a0266f8ec1b6..8a59d4782a0f4d3c33b3e6840cbe265ba8ee4406 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"), AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"), AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), +/* Syntek STK1160 */ +{ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_CLASS | + USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x05e1, + .idProduct = 0x0408, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Syntek", + .product_name = "STK1160", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_AUDIO_ALIGN_TRANSFER + } +}, + /* Digidesign Mbox */ { /* Thanks to Clemens Ladisch */ diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 34846e71fdbdd13ad946daae10f70f75f2a395cf..74c265e0ffa0f3088a88b07e353254694559c668 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -423,7 +423,7 @@ $(LIBTRACEEVENT)-clean: $(call QUIET_CLEAN, libtraceevent) $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null -install-traceevent-plugins: $(LIBTRACEEVENT) +install-traceevent-plugins: libtraceevent_plugins $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins $(LIBAPI): fixdep FORCE diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 3900386a3629410b903223642b449159ecd18e13..d802938644b5cbe2df67afcb4af2db273b7e9dd4 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -684,7 +684,6 @@ static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...) ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent); ui_browser__printf(arg->b, "%s", hpp->buf); - advance_hpp(hpp, ret); return ret; } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 9409d014b46c713de02df828cf9ad7ff726881e8..71df7acf86435bd000567b9a00acec9562b7e417 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -89,6 +89,7 @@ struct intel_pt_decoder { bool pge; bool have_tma; bool have_cyc; + bool fixup_last_mtc; uint64_t pos; uint64_t last_ip; uint64_t ip; @@ -584,10 +585,31 @@ struct intel_pt_calc_cyc_to_tsc_info { uint64_t tsc_timestamp; uint64_t timestamp; bool have_tma; + bool fixup_last_mtc; bool from_mtc; double cbr_cyc_to_tsc; }; +/* + * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower + * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC + * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA + * packet by copying the missing bits from the current MTC assuming the least + * difference between the two, and that the current MTC comes after last_mtc. + */ +static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift, + uint32_t *last_mtc) +{ + uint32_t first_missing_bit = 1U << (16 - mtc_shift); + uint32_t mask = ~(first_missing_bit - 1); + + *last_mtc |= mtc & mask; + if (*last_mtc >= mtc) { + *last_mtc -= first_missing_bit; + *last_mtc &= 0xff; + } +} + static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) { struct intel_pt_decoder *decoder = pkt_info->decoder; @@ -617,6 +639,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) return 0; mtc = pkt_info->packet.payload; + if (decoder->mtc_shift > 8 && data->fixup_last_mtc) { + data->fixup_last_mtc = false; + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, + &data->last_mtc); + } if (mtc > data->last_mtc) mtc_delta = mtc - data->last_mtc; else @@ -685,6 +712,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) data->ctc_delta = 0; data->have_tma = true; + data->fixup_last_mtc = true; return 0; @@ -751,6 +779,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder, .tsc_timestamp = decoder->tsc_timestamp, .timestamp = decoder->timestamp, .have_tma = decoder->have_tma, + .fixup_last_mtc = decoder->fixup_last_mtc, .from_mtc = from_mtc, .cbr_cyc_to_tsc = 0, }; @@ -1241,6 +1270,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder) } decoder->ctc_delta = 0; decoder->have_tma = true; + decoder->fixup_last_mtc = true; intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n", decoder->ctc_timestamp, decoder->last_mtc, ctc_rem); } @@ -1255,6 +1285,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder) mtc = decoder->packet.payload; + if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) { + decoder->fixup_last_mtc = false; + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, + &decoder->last_mtc); + } + if (mtc > decoder->last_mtc) mtc_delta = mtc - decoder->last_mtc; else @@ -1323,6 +1359,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder) timestamp, decoder->timestamp); else decoder->timestamp = timestamp; + + decoder->timestamp_insn_cnt = 0; } /* Walk PSB+ packets when already in sync. */ diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 9227c2f076c36195ec476dc64d6236341026decd..89927b5beebf622cfb20c24cca22f599f74e3cbd 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -238,7 +238,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) } queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; - +next: buffer = auxtrace_buffer__next(queue, buffer); if (!buffer) { if (old_buffer) @@ -261,9 +261,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) return -ENOMEM; - if (old_buffer) - auxtrace_buffer__drop_data(old_buffer); - if (buffer->use_data) { b->len = buffer->use_size; b->buf = buffer->use_data; @@ -273,6 +270,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) } b->ref_timestamp = buffer->reference; + /* + * If in snapshot mode and the buffer has no usable data, get next + * buffer and again check overlap against old_buffer. + */ + if (ptq->pt->snapshot_mode && !b->len) + goto next; + + if (old_buffer) + auxtrace_buffer__drop_data(old_buffer); + if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode && !buffer->consecutive)) { b->consecutive = false; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 4a3a72cb5805e256871abda351792ba23b2d30cd..6ce624cb7001249d703248e7aa566e7941117f04 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -311,6 +311,16 @@ int perf_stat_process_counter(struct perf_stat_config *config, aggr->val = aggr->ena = aggr->run = 0; + /* + * We calculate counter's data every interval, + * and the display code shows ps->res_stats + * avg value. We need to zero the stats for + * interval mode, otherwise overall avg running + * averages will be shown for each interval. + */ + if (config->interval) + init_stats(ps->res_stats); + if (counter->per_pkg) zero_per_pkg(counter); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 475d88d0a1c9a772323b3218cfcf5f5900c0e809..27ae382feb2d780238eb3f0dfe083589070fb4c7 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1091,9 +1091,8 @@ new_symbol: * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - if (!symbol_conf.allow_aliases) - symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); + symbols__fixup_duplicate(&dso->symbols[map->type]); if (kmap) { /* * We need to fixup this here too because we create new diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1d0d8bff4a5b94a1e92af5ab903df30dd73a2e25..754711be8b251b2c0e33d1a57053710b20dc8edf 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -151,6 +151,9 @@ void symbols__fixup_duplicate(struct rb_root *symbols) struct rb_node *nd; struct symbol *curr, *next; + if (symbol_conf.allow_aliases) + return; + nd = rb_first(symbols); while (nd) { @@ -1275,8 +1278,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, if (kallsyms__delta(map, filename, &delta)) return -1; - symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); + symbols__fixup_duplicate(&dso->symbols[map->type]); if (dso->kernel == DSO_TYPE_GUEST_KERNEL) dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index 86e698d07e20d66931f7846b8061c82018927382..499b8819d4c6ab85b2e9ce677b0116b32fa8ff3e 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c @@ -510,10 +510,11 @@ static void slab_stats(struct slabinfo *s) s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total); } - if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) + if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) { printf("\nCmpxchg_double Looping\n------------------------\n"); printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n", s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail); + } } static void report(struct slabinfo *s) diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 487d6357b7e75039ff914077eb11d890f938e51d..453eafd4dd6e5fa3f48b458e2fd62df0792c3554 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -28,6 +28,7 @@ #include #include +#include #include /* These are for GICv2 emulation only */ @@ -36,18 +37,12 @@ #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) #define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1) -/* - * LRs are stored in reverse order in memory. make sure we index them - * correctly. - */ -#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) - static u32 ich_vtr_el2; static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) { struct vgic_lr lr_desc; - u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)]; if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; @@ -111,7 +106,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT; } - vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val; if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);