Loading android/GKI_VERSION +1 −1 Original line number Diff line number Diff line LTS_5.4.197_3970bc62738d LTS_5.4.197_26eb689452c8 arch/arm/configs/vendor/sdxlemur.config +36 −0 Original line number Diff line number Diff line Loading @@ -468,3 +468,39 @@ CONFIG_NET_ACT_CT=y CONFIG_NET_TC_SKB_EXT=y CONFIG_NET_SCH_FIFO=y CONFIG_NET_SCHED_ACT_MPLS_QGKI=y CONFIG_MD=y # CONFIG_BLK_DEV_MD is not set # CONFIG_BCACHE is not set CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=y # CONFIG_DM_DEBUG is not set CONFIG_DM_BUFIO=y # CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set # CONFIG_DM_UNSTRIPED is not set # CONFIG_DM_CRYPT is not set # CONFIG_DM_SNAPSHOT is not set # CONFIG_DM_THIN_PROVISIONING is not set # CONFIG_DM_CACHE is not set # CONFIG_DM_WRITECACHE is not set # CONFIG_DM_ERA is not set # CONFIG_DM_CLONE is not set # CONFIG_DM_MIRROR is not set # CONFIG_DM_RAID is not set # CONFIG_DM_ZERO is not set # CONFIG_DM_MULTIPATH is not set # CONFIG_DM_DELAY is not set # CONFIG_DM_DUST is not set # CONFIG_DM_INIT is not set # CONFIG_DM_UEVENT is not set # CONFIG_DM_FLAKEY is not set CONFIG_DM_VERITY=y # CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set # CONFIG_DM_VERITY_AVB is not set # CONFIG_DM_VERITY_FEC is not set # CONFIG_DM_SWITCH is not set # CONFIG_DM_LOG_WRITES is not set # CONFIG_DM_INTEGRITY is not set # CONFIG_DM_BOW is not set # CONFIG_DEVMEM is not set CONFIG_DAX=y CONFIG_LSM_MMAP_MIN_ADDR=32768 drivers/android/binder.c +98 −39 Original line number Diff line number Diff line Loading @@ -666,20 +666,26 @@ static int to_kernel_prio(int policy, int user_priority) return MAX_USER_RT_PRIO - 1 - user_priority; } static void binder_do_set_priority(struct task_struct *task, struct binder_priority desired, static void binder_do_set_priority(struct binder_thread *thread, const struct binder_priority *desired, bool verify) { struct task_struct *task = thread->task; int priority; /* user-space prio value */ bool has_cap_nice; unsigned int policy = desired.sched_policy; unsigned int policy = desired->sched_policy; if (task->policy == policy && task->normal_prio == desired.prio) if (task->policy == policy && task->normal_prio == desired->prio) { spin_lock(&thread->prio_lock); if (thread->prio_state == BINDER_PRIO_PENDING) thread->prio_state = BINDER_PRIO_SET; spin_unlock(&thread->prio_lock); return; } has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); priority = to_userspace_prio(policy, desired.prio); priority = to_userspace_prio(policy, desired->prio); if (verify && is_rt_policy(policy) && !has_cap_nice) { long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); Loading @@ -704,16 +710,30 @@ static void binder_do_set_priority(struct task_struct *task, } } if (policy != desired.sched_policy || to_kernel_prio(policy, priority) != desired.prio) if (policy != desired->sched_policy || to_kernel_prio(policy, priority) != desired->prio) binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: priority %d not allowed, using %d instead\n", task->pid, desired.prio, task->pid, desired->prio, to_kernel_prio(policy, priority)); trace_binder_set_priority(task->tgid, task->pid, task->normal_prio, to_kernel_prio(policy, priority), desired.prio); desired->prio); spin_lock(&thread->prio_lock); if (!verify && thread->prio_state == BINDER_PRIO_ABORT) { /* * A new priority has been set by an incoming nested * transaction. Abort this priority restore and allow * the transaction to run at the new desired priority. */ spin_unlock(&thread->prio_lock); binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: %s: aborting priority restore\n", thread->pid, __func__); return; } /* Set the actual priority */ if (task->policy != policy || is_rt_policy(policy)) { Loading @@ -727,37 +747,42 @@ static void binder_do_set_priority(struct task_struct *task, } if (is_fair_policy(policy)) set_user_nice(task, priority); thread->prio_state = BINDER_PRIO_SET; spin_unlock(&thread->prio_lock); } static void binder_set_priority(struct task_struct *task, struct binder_priority desired) static void binder_set_priority(struct binder_thread *thread, const struct binder_priority *desired) { binder_do_set_priority(task, desired, /* verify = */ true); binder_do_set_priority(thread, desired, /* verify = */ true); } static void binder_restore_priority(struct task_struct *task, struct binder_priority desired) static void binder_restore_priority(struct binder_thread *thread, const struct binder_priority *desired) { binder_do_set_priority(task, desired, /* verify = */ false); binder_do_set_priority(thread, desired, /* verify = */ false); } static void binder_transaction_priority(struct task_struct *task, static void binder_transaction_priority(struct binder_thread *thread, struct binder_transaction *t, struct binder_priority node_prio, bool inherit_rt) struct binder_node *node) { struct binder_priority desired_prio = t->priority; struct task_struct *task = thread->task; struct binder_priority desired = t->priority; const struct binder_priority node_prio = { .sched_policy = node->sched_policy, .prio = node->min_priority, }; if (t->set_priority_called) return; t->set_priority_called = true; t->saved_priority.sched_policy = task->policy; t->saved_priority.prio = task->normal_prio; if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { desired_prio.prio = NICE_TO_PRIO(0); desired_prio.sched_policy = SCHED_NORMAL; if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) { desired.prio = NICE_TO_PRIO(0); desired.sched_policy = SCHED_NORMAL; } if (node_prio.prio < t->priority.prio || Loading @@ -770,10 +795,29 @@ static void binder_transaction_priority(struct task_struct *task, * SCHED_FIFO, prefer SCHED_FIFO, since it can * run unbounded, unlike SCHED_RR. */ desired_prio = node_prio; desired = node_prio; } binder_set_priority(task, desired_prio); spin_lock(&thread->prio_lock); if (thread->prio_state == BINDER_PRIO_PENDING) { /* * Task is in the process of changing priorities * saving its current values would be incorrect. * Instead, save the pending priority and signal * the task to abort the priority restore. */ t->saved_priority = thread->prio_next; thread->prio_state = BINDER_PRIO_ABORT; binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: saved pending priority %d\n", current->pid, thread->prio_next.prio); } else { t->saved_priority.sched_policy = task->policy; t->saved_priority.prio = task->normal_prio; } spin_unlock(&thread->prio_lock); binder_set_priority(thread, &desired); trace_android_vh_binder_set_priority(t, task); } Loading Loading @@ -1486,6 +1530,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; if (ret && ref == new_ref) { /* * Cleanup the failed reference here as the target * could now be dead and have already released its * references by now. Calling on the new reference * with strong=0 and a tmp_refs will not decrement * the node. The new_ref gets kfree'd below. */ binder_cleanup_ref_olocked(new_ref); ref = NULL; } binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* Loading Loading @@ -2492,14 +2548,11 @@ static int binder_proc_transaction(struct binder_transaction *t, struct binder_thread *thread) { struct binder_node *node = t->buffer->target_node; struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; BUG_ON(!node); binder_node_lock(node); node_prio.prio = node->min_priority; node_prio.sched_policy = node->sched_policy; if (oneway) { BUG_ON(thread); Loading Loading @@ -2527,8 +2580,7 @@ static int binder_proc_transaction(struct binder_transaction *t, thread = binder_select_thread_ilocked(proc); if (thread) { binder_transaction_priority(thread->task, t, node_prio, node->inherit_rt); binder_transaction_priority(thread, t, node); binder_enqueue_thread_work_ilocked(thread, &t->work); } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); Loading Loading @@ -2615,6 +2667,7 @@ static void binder_transaction(struct binder_proc *proc, int t_debug_id = atomic_inc_return(&binder_last_id); char *secctx = NULL; u32 secctx_sz = 0; bool is_nested = false; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; Loading Loading @@ -2791,6 +2844,7 @@ static void binder_transaction(struct binder_proc *proc, atomic_inc(&from->tmp_ref); target_thread = from; spin_unlock(&tmp->lock); is_nested = true; break; } spin_unlock(&tmp->lock); Loading Loading @@ -2855,6 +2909,7 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->is_nested = is_nested; if (!(t->flags & TF_ONE_WAY) && binder_supported_policy(current->policy)) { /* Inherit supported policies for synchronous transactions */ Loading Loading @@ -3192,9 +3247,15 @@ static void binder_transaction(struct binder_proc *proc, binder_enqueue_thread_work_ilocked(target_thread, &t->work); target_proc->outstanding_txns++; binder_inner_proc_unlock(target_proc); if (in_reply_to->is_nested) { spin_lock(&thread->prio_lock); thread->prio_state = BINDER_PRIO_PENDING; thread->prio_next = in_reply_to->saved_priority; spin_unlock(&thread->prio_lock); } wake_up_interruptible_sync(&target_thread->wait); trace_android_vh_binder_restore_priority(in_reply_to, current); binder_restore_priority(current, in_reply_to->saved_priority); binder_restore_priority(thread, &in_reply_to->saved_priority); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); Loading Loading @@ -3308,7 +3369,7 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { trace_android_vh_binder_restore_priority(in_reply_to, current); binder_restore_priority(current, in_reply_to->saved_priority); binder_restore_priority(thread, &in_reply_to->saved_priority); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); Loading Loading @@ -3979,7 +4040,7 @@ static int binder_thread_read(struct binder_proc *proc, binder_stop_on_user_error < 2); } trace_android_vh_binder_restore_priority(NULL, current); binder_restore_priority(current, proc->default_priority); binder_restore_priority(thread, &proc->default_priority); } if (non_block) { Loading Loading @@ -4201,14 +4262,10 @@ static int binder_thread_read(struct binder_proc *proc, BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; struct binder_priority node_prio; trd->target.ptr = target_node->ptr; trd->cookie = target_node->cookie; node_prio.sched_policy = target_node->sched_policy; node_prio.prio = target_node->min_priority; binder_transaction_priority(current, t, node_prio, target_node->inherit_rt); binder_transaction_priority(thread, t, target_node); cmd = BR_TRANSACTION; } else { trd->target.ptr = 0; Loading Loading @@ -4443,6 +4500,8 @@ static struct binder_thread *binder_get_thread_ilocked( thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; spin_lock_init(&thread->prio_lock); thread->prio_state = BINDER_PRIO_SET; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } Loading drivers/android/binder_internal.h +16 −0 Original line number Diff line number Diff line Loading @@ -366,6 +366,12 @@ struct binder_priority { int prio; }; enum binder_prio_state { BINDER_PRIO_SET, /* desired priority set */ BINDER_PRIO_PENDING, /* initiated a saved priority restore */ BINDER_PRIO_ABORT, /* abort the pending priority restore */ }; /** * struct binder_proc - binder process bookkeeping * @proc_node: element for binder_procs list Loading Loading @@ -526,6 +532,12 @@ static inline const struct cred *binder_get_cred(struct binder_proc *proc) * when outstanding transactions are cleaned up * (protected by @proc->inner_lock) * @task: struct task_struct for this thread * @prio_lock: protects thread priority fields * @prio_next: saved priority to be restored next * (protected by @prio_lock) * @prio_state: state of the priority restore process as * defined by enum binder_prio_state * (protected by @prio_lock) * * Bookkeeping structure for binder threads. */ Loading @@ -546,6 +558,9 @@ struct binder_thread { atomic_t tmp_ref; bool is_dead; struct task_struct *task; spinlock_t prio_lock; struct binder_priority prio_next; enum binder_prio_state prio_state; }; /** Loading Loading @@ -582,6 +597,7 @@ struct binder_transaction { struct binder_priority priority; struct binder_priority saved_priority; bool set_priority_called; bool is_nested; kuid_t sender_euid; struct list_head fd_fixups; binder_uintptr_t security_ctx; Loading drivers/base/arch_topology.c +15 −5 Original line number Diff line number Diff line Loading @@ -278,6 +278,16 @@ core_initcall(free_raw_capacity); #endif #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) /* * This function returns the logic cpu number of the node. * There are basically three kinds of return values: * (1) logic cpu number which is > 0. * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but * there is no possible logical CPU in the kernel to match. This happens * when CONFIG_NR_CPUS is configure to be smaller than the number of * CPU nodes in DT. We need to just ignore this case. * (3) -1 if the node does not exist in the device tree */ static int __init get_cpu_for_node(struct device_node *node) { struct device_node *cpu_node; Loading @@ -291,7 +301,8 @@ static int __init get_cpu_for_node(struct device_node *node) if (cpu >= 0) topology_parse_cpu_capacity(cpu_node, cpu); else pr_crit("Unable to find CPU node for %pOF\n", cpu_node); pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", cpu_node, cpumask_pr_args(cpu_possible_mask)); of_node_put(cpu_node); return cpu; Loading @@ -316,9 +327,8 @@ static int __init parse_core(struct device_node *core, int package_id, cpu_topology[cpu].package_id = package_id; cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].thread_id = i; } else { pr_err("%pOF: Can't get CPU for thread\n", t); } else if (cpu != -ENODEV) { pr_err("%pOF: Can't get CPU for thread\n", t); of_node_put(t); return -EINVAL; } Loading @@ -337,7 +347,7 @@ static int __init parse_core(struct device_node *core, int package_id, cpu_topology[cpu].package_id = package_id; cpu_topology[cpu].core_id = core_id; } else if (leaf) { } else if (leaf && cpu != -ENODEV) { pr_err("%pOF: Can't get CPU for leaf core\n", core); return -EINVAL; } Loading Loading
android/GKI_VERSION +1 −1 Original line number Diff line number Diff line LTS_5.4.197_3970bc62738d LTS_5.4.197_26eb689452c8
arch/arm/configs/vendor/sdxlemur.config +36 −0 Original line number Diff line number Diff line Loading @@ -468,3 +468,39 @@ CONFIG_NET_ACT_CT=y CONFIG_NET_TC_SKB_EXT=y CONFIG_NET_SCH_FIFO=y CONFIG_NET_SCHED_ACT_MPLS_QGKI=y CONFIG_MD=y # CONFIG_BLK_DEV_MD is not set # CONFIG_BCACHE is not set CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=y # CONFIG_DM_DEBUG is not set CONFIG_DM_BUFIO=y # CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set # CONFIG_DM_UNSTRIPED is not set # CONFIG_DM_CRYPT is not set # CONFIG_DM_SNAPSHOT is not set # CONFIG_DM_THIN_PROVISIONING is not set # CONFIG_DM_CACHE is not set # CONFIG_DM_WRITECACHE is not set # CONFIG_DM_ERA is not set # CONFIG_DM_CLONE is not set # CONFIG_DM_MIRROR is not set # CONFIG_DM_RAID is not set # CONFIG_DM_ZERO is not set # CONFIG_DM_MULTIPATH is not set # CONFIG_DM_DELAY is not set # CONFIG_DM_DUST is not set # CONFIG_DM_INIT is not set # CONFIG_DM_UEVENT is not set # CONFIG_DM_FLAKEY is not set CONFIG_DM_VERITY=y # CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set # CONFIG_DM_VERITY_AVB is not set # CONFIG_DM_VERITY_FEC is not set # CONFIG_DM_SWITCH is not set # CONFIG_DM_LOG_WRITES is not set # CONFIG_DM_INTEGRITY is not set # CONFIG_DM_BOW is not set # CONFIG_DEVMEM is not set CONFIG_DAX=y CONFIG_LSM_MMAP_MIN_ADDR=32768
drivers/android/binder.c +98 −39 Original line number Diff line number Diff line Loading @@ -666,20 +666,26 @@ static int to_kernel_prio(int policy, int user_priority) return MAX_USER_RT_PRIO - 1 - user_priority; } static void binder_do_set_priority(struct task_struct *task, struct binder_priority desired, static void binder_do_set_priority(struct binder_thread *thread, const struct binder_priority *desired, bool verify) { struct task_struct *task = thread->task; int priority; /* user-space prio value */ bool has_cap_nice; unsigned int policy = desired.sched_policy; unsigned int policy = desired->sched_policy; if (task->policy == policy && task->normal_prio == desired.prio) if (task->policy == policy && task->normal_prio == desired->prio) { spin_lock(&thread->prio_lock); if (thread->prio_state == BINDER_PRIO_PENDING) thread->prio_state = BINDER_PRIO_SET; spin_unlock(&thread->prio_lock); return; } has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); priority = to_userspace_prio(policy, desired.prio); priority = to_userspace_prio(policy, desired->prio); if (verify && is_rt_policy(policy) && !has_cap_nice) { long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); Loading @@ -704,16 +710,30 @@ static void binder_do_set_priority(struct task_struct *task, } } if (policy != desired.sched_policy || to_kernel_prio(policy, priority) != desired.prio) if (policy != desired->sched_policy || to_kernel_prio(policy, priority) != desired->prio) binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: priority %d not allowed, using %d instead\n", task->pid, desired.prio, task->pid, desired->prio, to_kernel_prio(policy, priority)); trace_binder_set_priority(task->tgid, task->pid, task->normal_prio, to_kernel_prio(policy, priority), desired.prio); desired->prio); spin_lock(&thread->prio_lock); if (!verify && thread->prio_state == BINDER_PRIO_ABORT) { /* * A new priority has been set by an incoming nested * transaction. Abort this priority restore and allow * the transaction to run at the new desired priority. */ spin_unlock(&thread->prio_lock); binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: %s: aborting priority restore\n", thread->pid, __func__); return; } /* Set the actual priority */ if (task->policy != policy || is_rt_policy(policy)) { Loading @@ -727,37 +747,42 @@ static void binder_do_set_priority(struct task_struct *task, } if (is_fair_policy(policy)) set_user_nice(task, priority); thread->prio_state = BINDER_PRIO_SET; spin_unlock(&thread->prio_lock); } static void binder_set_priority(struct task_struct *task, struct binder_priority desired) static void binder_set_priority(struct binder_thread *thread, const struct binder_priority *desired) { binder_do_set_priority(task, desired, /* verify = */ true); binder_do_set_priority(thread, desired, /* verify = */ true); } static void binder_restore_priority(struct task_struct *task, struct binder_priority desired) static void binder_restore_priority(struct binder_thread *thread, const struct binder_priority *desired) { binder_do_set_priority(task, desired, /* verify = */ false); binder_do_set_priority(thread, desired, /* verify = */ false); } static void binder_transaction_priority(struct task_struct *task, static void binder_transaction_priority(struct binder_thread *thread, struct binder_transaction *t, struct binder_priority node_prio, bool inherit_rt) struct binder_node *node) { struct binder_priority desired_prio = t->priority; struct task_struct *task = thread->task; struct binder_priority desired = t->priority; const struct binder_priority node_prio = { .sched_policy = node->sched_policy, .prio = node->min_priority, }; if (t->set_priority_called) return; t->set_priority_called = true; t->saved_priority.sched_policy = task->policy; t->saved_priority.prio = task->normal_prio; if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { desired_prio.prio = NICE_TO_PRIO(0); desired_prio.sched_policy = SCHED_NORMAL; if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) { desired.prio = NICE_TO_PRIO(0); desired.sched_policy = SCHED_NORMAL; } if (node_prio.prio < t->priority.prio || Loading @@ -770,10 +795,29 @@ static void binder_transaction_priority(struct task_struct *task, * SCHED_FIFO, prefer SCHED_FIFO, since it can * run unbounded, unlike SCHED_RR. */ desired_prio = node_prio; desired = node_prio; } binder_set_priority(task, desired_prio); spin_lock(&thread->prio_lock); if (thread->prio_state == BINDER_PRIO_PENDING) { /* * Task is in the process of changing priorities * saving its current values would be incorrect. * Instead, save the pending priority and signal * the task to abort the priority restore. */ t->saved_priority = thread->prio_next; thread->prio_state = BINDER_PRIO_ABORT; binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: saved pending priority %d\n", current->pid, thread->prio_next.prio); } else { t->saved_priority.sched_policy = task->policy; t->saved_priority.prio = task->normal_prio; } spin_unlock(&thread->prio_lock); binder_set_priority(thread, &desired); trace_android_vh_binder_set_priority(t, task); } Loading Loading @@ -1486,6 +1530,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; if (ret && ref == new_ref) { /* * Cleanup the failed reference here as the target * could now be dead and have already released its * references by now. Calling on the new reference * with strong=0 and a tmp_refs will not decrement * the node. The new_ref gets kfree'd below. */ binder_cleanup_ref_olocked(new_ref); ref = NULL; } binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* Loading Loading @@ -2492,14 +2548,11 @@ static int binder_proc_transaction(struct binder_transaction *t, struct binder_thread *thread) { struct binder_node *node = t->buffer->target_node; struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; BUG_ON(!node); binder_node_lock(node); node_prio.prio = node->min_priority; node_prio.sched_policy = node->sched_policy; if (oneway) { BUG_ON(thread); Loading Loading @@ -2527,8 +2580,7 @@ static int binder_proc_transaction(struct binder_transaction *t, thread = binder_select_thread_ilocked(proc); if (thread) { binder_transaction_priority(thread->task, t, node_prio, node->inherit_rt); binder_transaction_priority(thread, t, node); binder_enqueue_thread_work_ilocked(thread, &t->work); } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); Loading Loading @@ -2615,6 +2667,7 @@ static void binder_transaction(struct binder_proc *proc, int t_debug_id = atomic_inc_return(&binder_last_id); char *secctx = NULL; u32 secctx_sz = 0; bool is_nested = false; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; Loading Loading @@ -2791,6 +2844,7 @@ static void binder_transaction(struct binder_proc *proc, atomic_inc(&from->tmp_ref); target_thread = from; spin_unlock(&tmp->lock); is_nested = true; break; } spin_unlock(&tmp->lock); Loading Loading @@ -2855,6 +2909,7 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->is_nested = is_nested; if (!(t->flags & TF_ONE_WAY) && binder_supported_policy(current->policy)) { /* Inherit supported policies for synchronous transactions */ Loading Loading @@ -3192,9 +3247,15 @@ static void binder_transaction(struct binder_proc *proc, binder_enqueue_thread_work_ilocked(target_thread, &t->work); target_proc->outstanding_txns++; binder_inner_proc_unlock(target_proc); if (in_reply_to->is_nested) { spin_lock(&thread->prio_lock); thread->prio_state = BINDER_PRIO_PENDING; thread->prio_next = in_reply_to->saved_priority; spin_unlock(&thread->prio_lock); } wake_up_interruptible_sync(&target_thread->wait); trace_android_vh_binder_restore_priority(in_reply_to, current); binder_restore_priority(current, in_reply_to->saved_priority); binder_restore_priority(thread, &in_reply_to->saved_priority); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); Loading Loading @@ -3308,7 +3369,7 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { trace_android_vh_binder_restore_priority(in_reply_to, current); binder_restore_priority(current, in_reply_to->saved_priority); binder_restore_priority(thread, &in_reply_to->saved_priority); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); Loading Loading @@ -3979,7 +4040,7 @@ static int binder_thread_read(struct binder_proc *proc, binder_stop_on_user_error < 2); } trace_android_vh_binder_restore_priority(NULL, current); binder_restore_priority(current, proc->default_priority); binder_restore_priority(thread, &proc->default_priority); } if (non_block) { Loading Loading @@ -4201,14 +4262,10 @@ static int binder_thread_read(struct binder_proc *proc, BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; struct binder_priority node_prio; trd->target.ptr = target_node->ptr; trd->cookie = target_node->cookie; node_prio.sched_policy = target_node->sched_policy; node_prio.prio = target_node->min_priority; binder_transaction_priority(current, t, node_prio, target_node->inherit_rt); binder_transaction_priority(thread, t, target_node); cmd = BR_TRANSACTION; } else { trd->target.ptr = 0; Loading Loading @@ -4443,6 +4500,8 @@ static struct binder_thread *binder_get_thread_ilocked( thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; spin_lock_init(&thread->prio_lock); thread->prio_state = BINDER_PRIO_SET; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } Loading
drivers/android/binder_internal.h +16 −0 Original line number Diff line number Diff line Loading @@ -366,6 +366,12 @@ struct binder_priority { int prio; }; enum binder_prio_state { BINDER_PRIO_SET, /* desired priority set */ BINDER_PRIO_PENDING, /* initiated a saved priority restore */ BINDER_PRIO_ABORT, /* abort the pending priority restore */ }; /** * struct binder_proc - binder process bookkeeping * @proc_node: element for binder_procs list Loading Loading @@ -526,6 +532,12 @@ static inline const struct cred *binder_get_cred(struct binder_proc *proc) * when outstanding transactions are cleaned up * (protected by @proc->inner_lock) * @task: struct task_struct for this thread * @prio_lock: protects thread priority fields * @prio_next: saved priority to be restored next * (protected by @prio_lock) * @prio_state: state of the priority restore process as * defined by enum binder_prio_state * (protected by @prio_lock) * * Bookkeeping structure for binder threads. */ Loading @@ -546,6 +558,9 @@ struct binder_thread { atomic_t tmp_ref; bool is_dead; struct task_struct *task; spinlock_t prio_lock; struct binder_priority prio_next; enum binder_prio_state prio_state; }; /** Loading Loading @@ -582,6 +597,7 @@ struct binder_transaction { struct binder_priority priority; struct binder_priority saved_priority; bool set_priority_called; bool is_nested; kuid_t sender_euid; struct list_head fd_fixups; binder_uintptr_t security_ctx; Loading
drivers/base/arch_topology.c +15 −5 Original line number Diff line number Diff line Loading @@ -278,6 +278,16 @@ core_initcall(free_raw_capacity); #endif #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) /* * This function returns the logic cpu number of the node. * There are basically three kinds of return values: * (1) logic cpu number which is > 0. * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but * there is no possible logical CPU in the kernel to match. This happens * when CONFIG_NR_CPUS is configure to be smaller than the number of * CPU nodes in DT. We need to just ignore this case. * (3) -1 if the node does not exist in the device tree */ static int __init get_cpu_for_node(struct device_node *node) { struct device_node *cpu_node; Loading @@ -291,7 +301,8 @@ static int __init get_cpu_for_node(struct device_node *node) if (cpu >= 0) topology_parse_cpu_capacity(cpu_node, cpu); else pr_crit("Unable to find CPU node for %pOF\n", cpu_node); pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", cpu_node, cpumask_pr_args(cpu_possible_mask)); of_node_put(cpu_node); return cpu; Loading @@ -316,9 +327,8 @@ static int __init parse_core(struct device_node *core, int package_id, cpu_topology[cpu].package_id = package_id; cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].thread_id = i; } else { pr_err("%pOF: Can't get CPU for thread\n", t); } else if (cpu != -ENODEV) { pr_err("%pOF: Can't get CPU for thread\n", t); of_node_put(t); return -EINVAL; } Loading @@ -337,7 +347,7 @@ static int __init parse_core(struct device_node *core, int package_id, cpu_topology[cpu].package_id = package_id; cpu_topology[cpu].core_id = core_id; } else if (leaf) { } else if (leaf && cpu != -ENODEV) { pr_err("%pOF: Can't get CPU for leaf core\n", core); return -EINVAL; } Loading