Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 4220ba2d authored by Michael Bestas's avatar Michael Bestas
Browse files

Merge tag 'LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0' of...

Merge tag 'LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4 into android13-5.4-lahaina

"LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0"

* tag 'LA.UM.9.14.r1-20700-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4:
  msm: adsprpc: fix UAF process init_mem
  msm: ipa3: Add multi IDU support for external router mode FR
  cpu-topology: Don't error on more than CONFIG_NR_CPUS CPUs in device tree
  soc: qcom: socinfo: correct the name of softsku_id
  cnss2: Add code to fallback to non-contiguous FW mem allocation
  defconfig: sdxlemur: Enable configs on sdxlemur
  soc: qcom: socinfo: Get SKU ID from kernel command line
  BACKPORT: f2fs: do not set compression bit if kernel doesn't support
  UPSTREAM: f2fs: fix UAF in f2fs_available_free_memory
  ANDROID: f2fs: check nr_pages for readahead
  UPSTREAM: f2fs: guarantee to write dirty data when enabling checkpoint back
  FROMGIT: f2fs: flush data when enabling checkpoint back
  BACKPORT: f2fs: introduce FI_COMPRESS_RELEASED instead of using IMMUTABLE bit
  BACKPORT: f2fs: enforce the immutable flag on open files
  BACKPORT: f2fs: change i_compr_blocks of inode to atomic value
  BACKPORT: f2fs: make file immutable even if releasing zero compression block
  BACKPORT: f2fs: compress: remove unneeded preallocation
  ANDROID: binder: fix pending prio state for early exit
  ANDROID: binder: fix race in priority restore
  ANDROID: binder: switch task argument for binder_thread
  ANDROID: binder: pass desired priority by reference
  ANDROID: binder: fold common setup of node_prio
  BACKPORT: Bluetooth: L2CAP: Fix use-after-free caused by l2cap_chan_put
  FROMLIST: binder: fix UAF of ref->proc caused by race condition

Change-Id: I18e9ed7b3659a920fb9048976d171d3666fe4ed3
parents 982e25d1 676ce741
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
LTS_5.4.197_3970bc62738d
LTS_5.4.197_26eb689452c8
+36 −0
Original line number Original line Diff line number Diff line
@@ -468,3 +468,39 @@ CONFIG_NET_ACT_CT=y
CONFIG_NET_TC_SKB_EXT=y
CONFIG_NET_TC_SKB_EXT=y
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCHED_ACT_MPLS_QGKI=y
CONFIG_NET_SCHED_ACT_MPLS_QGKI=y
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
# CONFIG_BCACHE is not set
CONFIG_BLK_DEV_DM_BUILTIN=y
CONFIG_BLK_DEV_DM=y
# CONFIG_DM_DEBUG is not set
CONFIG_DM_BUFIO=y
# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
# CONFIG_DM_UNSTRIPED is not set
# CONFIG_DM_CRYPT is not set
# CONFIG_DM_SNAPSHOT is not set
# CONFIG_DM_THIN_PROVISIONING is not set
# CONFIG_DM_CACHE is not set
# CONFIG_DM_WRITECACHE is not set
# CONFIG_DM_ERA is not set
# CONFIG_DM_CLONE is not set
# CONFIG_DM_MIRROR is not set
# CONFIG_DM_RAID is not set
# CONFIG_DM_ZERO is not set
# CONFIG_DM_MULTIPATH is not set
# CONFIG_DM_DELAY is not set
# CONFIG_DM_DUST is not set
# CONFIG_DM_INIT is not set
# CONFIG_DM_UEVENT is not set
# CONFIG_DM_FLAKEY is not set
CONFIG_DM_VERITY=y
# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set
# CONFIG_DM_VERITY_AVB is not set
# CONFIG_DM_VERITY_FEC is not set
# CONFIG_DM_SWITCH is not set
# CONFIG_DM_LOG_WRITES is not set
# CONFIG_DM_INTEGRITY is not set
# CONFIG_DM_BOW is not set
# CONFIG_DEVMEM is not set
CONFIG_DAX=y
CONFIG_LSM_MMAP_MIN_ADDR=32768
+98 −39
Original line number Original line Diff line number Diff line
@@ -666,20 +666,26 @@ static int to_kernel_prio(int policy, int user_priority)
		return MAX_USER_RT_PRIO - 1 - user_priority;
		return MAX_USER_RT_PRIO - 1 - user_priority;
}
}


static void binder_do_set_priority(struct task_struct *task,
static void binder_do_set_priority(struct binder_thread *thread,
				   struct binder_priority desired,
				   const struct binder_priority *desired,
				   bool verify)
				   bool verify)
{
{
	struct task_struct *task = thread->task;
	int priority; /* user-space prio value */
	int priority; /* user-space prio value */
	bool has_cap_nice;
	bool has_cap_nice;
	unsigned int policy = desired.sched_policy;
	unsigned int policy = desired->sched_policy;


	if (task->policy == policy && task->normal_prio == desired.prio)
	if (task->policy == policy && task->normal_prio == desired->prio) {
		spin_lock(&thread->prio_lock);
		if (thread->prio_state == BINDER_PRIO_PENDING)
			thread->prio_state = BINDER_PRIO_SET;
		spin_unlock(&thread->prio_lock);
		return;
		return;
	}


	has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
	has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);


	priority = to_userspace_prio(policy, desired.prio);
	priority = to_userspace_prio(policy, desired->prio);


	if (verify && is_rt_policy(policy) && !has_cap_nice) {
	if (verify && is_rt_policy(policy) && !has_cap_nice) {
		long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
		long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
@@ -704,16 +710,30 @@ static void binder_do_set_priority(struct task_struct *task,
		}
		}
	}
	}


	if (policy != desired.sched_policy ||
	if (policy != desired->sched_policy ||
	    to_kernel_prio(policy, priority) != desired.prio)
	    to_kernel_prio(policy, priority) != desired->prio)
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
			     "%d: priority %d not allowed, using %d instead\n",
			     "%d: priority %d not allowed, using %d instead\n",
			      task->pid, desired.prio,
			      task->pid, desired->prio,
			      to_kernel_prio(policy, priority));
			      to_kernel_prio(policy, priority));


	trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
	trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
				  to_kernel_prio(policy, priority),
				  to_kernel_prio(policy, priority),
				  desired.prio);
				  desired->prio);

	spin_lock(&thread->prio_lock);
	if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
		/*
		 * A new priority has been set by an incoming nested
		 * transaction. Abort this priority restore and allow
		 * the transaction to run at the new desired priority.
		 */
		spin_unlock(&thread->prio_lock);
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
			"%d: %s: aborting priority restore\n",
			thread->pid, __func__);
		return;
	}


	/* Set the actual priority */
	/* Set the actual priority */
	if (task->policy != policy || is_rt_policy(policy)) {
	if (task->policy != policy || is_rt_policy(policy)) {
@@ -727,37 +747,42 @@ static void binder_do_set_priority(struct task_struct *task,
	}
	}
	if (is_fair_policy(policy))
	if (is_fair_policy(policy))
		set_user_nice(task, priority);
		set_user_nice(task, priority);

	thread->prio_state = BINDER_PRIO_SET;
	spin_unlock(&thread->prio_lock);
}
}


static void binder_set_priority(struct task_struct *task,
static void binder_set_priority(struct binder_thread *thread,
				struct binder_priority desired)
				const struct binder_priority *desired)
{
{
	binder_do_set_priority(task, desired, /* verify = */ true);
	binder_do_set_priority(thread, desired, /* verify = */ true);
}
}


static void binder_restore_priority(struct task_struct *task,
static void binder_restore_priority(struct binder_thread *thread,
				    struct binder_priority desired)
				    const struct binder_priority *desired)
{
{
	binder_do_set_priority(task, desired, /* verify = */ false);
	binder_do_set_priority(thread, desired, /* verify = */ false);
}
}


static void binder_transaction_priority(struct task_struct *task,
static void binder_transaction_priority(struct binder_thread *thread,
					struct binder_transaction *t,
					struct binder_transaction *t,
					struct binder_priority node_prio,
					struct binder_node *node)
					bool inherit_rt)
{
{
	struct binder_priority desired_prio = t->priority;
	struct task_struct *task = thread->task;
	struct binder_priority desired = t->priority;
	const struct binder_priority node_prio = {
		.sched_policy = node->sched_policy,
		.prio = node->min_priority,
	};


	if (t->set_priority_called)
	if (t->set_priority_called)
		return;
		return;


	t->set_priority_called = true;
	t->set_priority_called = true;
	t->saved_priority.sched_policy = task->policy;
	t->saved_priority.prio = task->normal_prio;


	if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
	if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
		desired_prio.prio = NICE_TO_PRIO(0);
		desired.prio = NICE_TO_PRIO(0);
		desired_prio.sched_policy = SCHED_NORMAL;
		desired.sched_policy = SCHED_NORMAL;
	}
	}


	if (node_prio.prio < t->priority.prio ||
	if (node_prio.prio < t->priority.prio ||
@@ -770,10 +795,29 @@ static void binder_transaction_priority(struct task_struct *task,
		 * SCHED_FIFO, prefer SCHED_FIFO, since it can
		 * SCHED_FIFO, prefer SCHED_FIFO, since it can
		 * run unbounded, unlike SCHED_RR.
		 * run unbounded, unlike SCHED_RR.
		 */
		 */
		desired_prio = node_prio;
		desired = node_prio;
	}
	}


	binder_set_priority(task, desired_prio);
	spin_lock(&thread->prio_lock);
	if (thread->prio_state == BINDER_PRIO_PENDING) {
		/*
		 * Task is in the process of changing priorities
		 * saving its current values would be incorrect.
		 * Instead, save the pending priority and signal
		 * the task to abort the priority restore.
		 */
		t->saved_priority = thread->prio_next;
		thread->prio_state = BINDER_PRIO_ABORT;
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
			"%d: saved pending priority %d\n",
			current->pid, thread->prio_next.prio);
	} else {
		t->saved_priority.sched_policy = task->policy;
		t->saved_priority.prio = task->normal_prio;
	}
	spin_unlock(&thread->prio_lock);

	binder_set_priority(thread, &desired);
	trace_android_vh_binder_set_priority(t, task);
	trace_android_vh_binder_set_priority(t, task);
}
}


@@ -1486,6 +1530,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
	}
	}
	ret = binder_inc_ref_olocked(ref, strong, target_list);
	ret = binder_inc_ref_olocked(ref, strong, target_list);
	*rdata = ref->data;
	*rdata = ref->data;
	if (ret && ref == new_ref) {
		/*
		 * Cleanup the failed reference here as the target
		 * could now be dead and have already released its
		 * references by now. Calling on the new reference
		 * with strong=0 and a tmp_refs will not decrement
		 * the node. The new_ref gets kfree'd below.
		 */
		binder_cleanup_ref_olocked(new_ref);
		ref = NULL;
	}

	binder_proc_unlock(proc);
	binder_proc_unlock(proc);
	if (new_ref && ref != new_ref)
	if (new_ref && ref != new_ref)
		/*
		/*
@@ -2492,14 +2548,11 @@ static int binder_proc_transaction(struct binder_transaction *t,
				    struct binder_thread *thread)
				    struct binder_thread *thread)
{
{
	struct binder_node *node = t->buffer->target_node;
	struct binder_node *node = t->buffer->target_node;
	struct binder_priority node_prio;
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool pending_async = false;
	bool pending_async = false;


	BUG_ON(!node);
	BUG_ON(!node);
	binder_node_lock(node);
	binder_node_lock(node);
	node_prio.prio = node->min_priority;
	node_prio.sched_policy = node->sched_policy;


	if (oneway) {
	if (oneway) {
		BUG_ON(thread);
		BUG_ON(thread);
@@ -2527,8 +2580,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
		thread = binder_select_thread_ilocked(proc);
		thread = binder_select_thread_ilocked(proc);


	if (thread) {
	if (thread) {
		binder_transaction_priority(thread->task, t, node_prio,
		binder_transaction_priority(thread, t, node);
					    node->inherit_rt);
		binder_enqueue_thread_work_ilocked(thread, &t->work);
		binder_enqueue_thread_work_ilocked(thread, &t->work);
	} else if (!pending_async) {
	} else if (!pending_async) {
		binder_enqueue_work_ilocked(&t->work, &proc->todo);
		binder_enqueue_work_ilocked(&t->work, &proc->todo);
@@ -2615,6 +2667,7 @@ static void binder_transaction(struct binder_proc *proc,
	int t_debug_id = atomic_inc_return(&binder_last_id);
	int t_debug_id = atomic_inc_return(&binder_last_id);
	char *secctx = NULL;
	char *secctx = NULL;
	u32 secctx_sz = 0;
	u32 secctx_sz = 0;
	bool is_nested = false;


	e = binder_transaction_log_add(&binder_transaction_log);
	e = binder_transaction_log_add(&binder_transaction_log);
	e->debug_id = t_debug_id;
	e->debug_id = t_debug_id;
@@ -2791,6 +2844,7 @@ static void binder_transaction(struct binder_proc *proc,
					atomic_inc(&from->tmp_ref);
					atomic_inc(&from->tmp_ref);
					target_thread = from;
					target_thread = from;
					spin_unlock(&tmp->lock);
					spin_unlock(&tmp->lock);
					is_nested = true;
					break;
					break;
				}
				}
				spin_unlock(&tmp->lock);
				spin_unlock(&tmp->lock);
@@ -2855,6 +2909,7 @@ static void binder_transaction(struct binder_proc *proc,
	t->to_thread = target_thread;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->code = tr->code;
	t->flags = tr->flags;
	t->flags = tr->flags;
	t->is_nested = is_nested;
	if (!(t->flags & TF_ONE_WAY) &&
	if (!(t->flags & TF_ONE_WAY) &&
	    binder_supported_policy(current->policy)) {
	    binder_supported_policy(current->policy)) {
		/* Inherit supported policies for synchronous transactions */
		/* Inherit supported policies for synchronous transactions */
@@ -3192,9 +3247,15 @@ static void binder_transaction(struct binder_proc *proc,
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
		target_proc->outstanding_txns++;
		target_proc->outstanding_txns++;
		binder_inner_proc_unlock(target_proc);
		binder_inner_proc_unlock(target_proc);
		if (in_reply_to->is_nested) {
			spin_lock(&thread->prio_lock);
			thread->prio_state = BINDER_PRIO_PENDING;
			thread->prio_next = in_reply_to->saved_priority;
			spin_unlock(&thread->prio_lock);
		}
		wake_up_interruptible_sync(&target_thread->wait);
		wake_up_interruptible_sync(&target_thread->wait);
		trace_android_vh_binder_restore_priority(in_reply_to, current);
		trace_android_vh_binder_restore_priority(in_reply_to, current);
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_restore_priority(thread, &in_reply_to->saved_priority);
		binder_free_transaction(in_reply_to);
		binder_free_transaction(in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) {
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
		BUG_ON(t->buffer->async_transaction != 0);
@@ -3308,7 +3369,7 @@ static void binder_transaction(struct binder_proc *proc,
	BUG_ON(thread->return_error.cmd != BR_OK);
	BUG_ON(thread->return_error.cmd != BR_OK);
	if (in_reply_to) {
	if (in_reply_to) {
		trace_android_vh_binder_restore_priority(in_reply_to, current);
		trace_android_vh_binder_restore_priority(in_reply_to, current);
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_restore_priority(thread, &in_reply_to->saved_priority);
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
		binder_enqueue_thread_work(thread, &thread->return_error.work);
		binder_enqueue_thread_work(thread, &thread->return_error.work);
		binder_send_failed_reply(in_reply_to, return_error);
		binder_send_failed_reply(in_reply_to, return_error);
@@ -3979,7 +4040,7 @@ static int binder_thread_read(struct binder_proc *proc,
						 binder_stop_on_user_error < 2);
						 binder_stop_on_user_error < 2);
		}
		}
		trace_android_vh_binder_restore_priority(NULL, current);
		trace_android_vh_binder_restore_priority(NULL, current);
		binder_restore_priority(current, proc->default_priority);
		binder_restore_priority(thread, &proc->default_priority);
	}
	}


	if (non_block) {
	if (non_block) {
@@ -4201,14 +4262,10 @@ static int binder_thread_read(struct binder_proc *proc,
		BUG_ON(t->buffer == NULL);
		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
		if (t->buffer->target_node) {
			struct binder_node *target_node = t->buffer->target_node;
			struct binder_node *target_node = t->buffer->target_node;
			struct binder_priority node_prio;


			trd->target.ptr = target_node->ptr;
			trd->target.ptr = target_node->ptr;
			trd->cookie =  target_node->cookie;
			trd->cookie =  target_node->cookie;
			node_prio.sched_policy = target_node->sched_policy;
			binder_transaction_priority(thread, t, target_node);
			node_prio.prio = target_node->min_priority;
			binder_transaction_priority(current, t, node_prio,
						    target_node->inherit_rt);
			cmd = BR_TRANSACTION;
			cmd = BR_TRANSACTION;
		} else {
		} else {
			trd->target.ptr = 0;
			trd->target.ptr = 0;
@@ -4443,6 +4500,8 @@ static struct binder_thread *binder_get_thread_ilocked(
	thread->return_error.cmd = BR_OK;
	thread->return_error.cmd = BR_OK;
	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
	thread->reply_error.cmd = BR_OK;
	thread->reply_error.cmd = BR_OK;
	spin_lock_init(&thread->prio_lock);
	thread->prio_state = BINDER_PRIO_SET;
	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
	return thread;
	return thread;
}
}
+16 −0
Original line number Original line Diff line number Diff line
@@ -366,6 +366,12 @@ struct binder_priority {
	int prio;
	int prio;
};
};


enum binder_prio_state {
	BINDER_PRIO_SET,	/* desired priority set */
	BINDER_PRIO_PENDING,	/* initiated a saved priority restore */
	BINDER_PRIO_ABORT,	/* abort the pending priority restore */
};

/**
/**
 * struct binder_proc - binder process bookkeeping
 * struct binder_proc - binder process bookkeeping
 * @proc_node:            element for binder_procs list
 * @proc_node:            element for binder_procs list
@@ -526,6 +532,12 @@ static inline const struct cred *binder_get_cred(struct binder_proc *proc)
 *                        when outstanding transactions are cleaned up
 *                        when outstanding transactions are cleaned up
 *                        (protected by @proc->inner_lock)
 *                        (protected by @proc->inner_lock)
 * @task:                 struct task_struct for this thread
 * @task:                 struct task_struct for this thread
 * @prio_lock:            protects thread priority fields
 * @prio_next:            saved priority to be restored next
 *                        (protected by @prio_lock)
 * @prio_state:           state of the priority restore process as
 *                        defined by enum binder_prio_state
 *                        (protected by @prio_lock)
 *
 *
 * Bookkeeping structure for binder threads.
 * Bookkeeping structure for binder threads.
 */
 */
@@ -546,6 +558,9 @@ struct binder_thread {
	atomic_t tmp_ref;
	atomic_t tmp_ref;
	bool is_dead;
	bool is_dead;
	struct task_struct *task;
	struct task_struct *task;
	spinlock_t prio_lock;
	struct binder_priority prio_next;
	enum binder_prio_state prio_state;
};
};


/**
/**
@@ -582,6 +597,7 @@ struct binder_transaction {
	struct binder_priority	priority;
	struct binder_priority	priority;
	struct binder_priority	saved_priority;
	struct binder_priority	saved_priority;
	bool    set_priority_called;
	bool    set_priority_called;
	bool    is_nested;
	kuid_t	sender_euid;
	kuid_t	sender_euid;
	struct list_head fd_fixups;
	struct list_head fd_fixups;
	binder_uintptr_t security_ctx;
	binder_uintptr_t security_ctx;
+15 −5
Original line number Original line Diff line number Diff line
@@ -278,6 +278,16 @@ core_initcall(free_raw_capacity);
#endif
#endif


#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
/*
 * This function returns the logic cpu number of the node.
 * There are basically three kinds of return values:
 * (1) logic cpu number which is > 0.
 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
 * there is no possible logical CPU in the kernel to match. This happens
 * when CONFIG_NR_CPUS is configure to be smaller than the number of
 * CPU nodes in DT. We need to just ignore this case.
 * (3) -1 if the node does not exist in the device tree
 */
static int __init get_cpu_for_node(struct device_node *node)
static int __init get_cpu_for_node(struct device_node *node)
{
{
	struct device_node *cpu_node;
	struct device_node *cpu_node;
@@ -291,7 +301,8 @@ static int __init get_cpu_for_node(struct device_node *node)
	if (cpu >= 0)
	if (cpu >= 0)
		topology_parse_cpu_capacity(cpu_node, cpu);
		topology_parse_cpu_capacity(cpu_node, cpu);
	else
	else
		pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
		pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
			cpu_node, cpumask_pr_args(cpu_possible_mask));


	of_node_put(cpu_node);
	of_node_put(cpu_node);
	return cpu;
	return cpu;
@@ -316,9 +327,8 @@ static int __init parse_core(struct device_node *core, int package_id,
				cpu_topology[cpu].package_id = package_id;
				cpu_topology[cpu].package_id = package_id;
				cpu_topology[cpu].core_id = core_id;
				cpu_topology[cpu].core_id = core_id;
				cpu_topology[cpu].thread_id = i;
				cpu_topology[cpu].thread_id = i;
			} else {
			} else if (cpu != -ENODEV) {
				pr_err("%pOF: Can't get CPU for thread\n",
				pr_err("%pOF: Can't get CPU for thread\n", t);
				       t);
				of_node_put(t);
				of_node_put(t);
				return -EINVAL;
				return -EINVAL;
			}
			}
@@ -337,7 +347,7 @@ static int __init parse_core(struct device_node *core, int package_id,


		cpu_topology[cpu].package_id = package_id;
		cpu_topology[cpu].package_id = package_id;
		cpu_topology[cpu].core_id = core_id;
		cpu_topology[cpu].core_id = core_id;
	} else if (leaf) {
	} else if (leaf && cpu != -ENODEV) {
		pr_err("%pOF: Can't get CPU for leaf core\n", core);
		pr_err("%pOF: Can't get CPU for leaf core\n", core);
		return -EINVAL;
		return -EINVAL;
	}
	}
Loading