Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 366e92d4 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge branch 'android11-5.4' into branch 'android11-5.4-lts'



Sync up with android11-5.4 for the following commits:

7e6cbbe7 Merge tag 'android11-5.4.210_r00' into android11-5.4
297aa834 ANDROID: incfs: Add check for ATTR_KILL_SUID and ATTR_MODE in incfs_setattr
26eb6894 BACKPORT: f2fs: do not set compression bit if kernel doesn't support
17bdd623 UPSTREAM: f2fs: fix UAF in f2fs_available_free_memory
7e4f9722 ANDROID: f2fs: check nr_pages for readahead
f711e743 UPSTREAM: f2fs: guarantee to write dirty data when enabling checkpoint back
d7b2931c FROMGIT: f2fs: flush data when enabling checkpoint back
601b2ed3 BACKPORT: f2fs: introduce FI_COMPRESS_RELEASED instead of using IMMUTABLE bit
d2972f90 BACKPORT: f2fs: enforce the immutable flag on open files
d2e72fa0 BACKPORT: f2fs: change i_compr_blocks of inode to atomic value
86290591 BACKPORT: f2fs: make file immutable even if releasing zero compression block
3cf0f824 BACKPORT: f2fs: compress: remove unneeded preallocation
012ab662 ANDROID: binder: fix pending prio state for early exit
da97a108 ANDROID: binder: fix race in priority restore
308230b9 ANDROID: binder: switch task argument for binder_thread
807b6742 ANDROID: binder: pass desired priority by reference
88c3fd64 ANDROID: binder: fold common setup of node_prio
fffb2b5b BACKPORT: Bluetooth: L2CAP: Fix use-after-free caused by l2cap_chan_put
458b37a8 FROMLIST: binder: fix UAF of ref->proc caused by race condition

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
Change-Id: Ib2da4c15fc4b6ad84bc36ba9b2f8b3d0a2f84e0a
parents 79028819 7e6cbbe7
Loading
Loading
Loading
Loading
+86 −39
Original line number Diff line number Diff line
@@ -650,20 +650,26 @@ static int to_kernel_prio(int policy, int user_priority)
		return MAX_USER_RT_PRIO - 1 - user_priority;
}

static void binder_do_set_priority(struct task_struct *task,
				   struct binder_priority desired,
static void binder_do_set_priority(struct binder_thread *thread,
				   const struct binder_priority *desired,
				   bool verify)
{
	struct task_struct *task = thread->task;
	int priority; /* user-space prio value */
	bool has_cap_nice;
	unsigned int policy = desired.sched_policy;
	unsigned int policy = desired->sched_policy;

	if (task->policy == policy && task->normal_prio == desired.prio)
	if (task->policy == policy && task->normal_prio == desired->prio) {
		spin_lock(&thread->prio_lock);
		if (thread->prio_state == BINDER_PRIO_PENDING)
			thread->prio_state = BINDER_PRIO_SET;
		spin_unlock(&thread->prio_lock);
		return;
	}

	has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);

	priority = to_userspace_prio(policy, desired.prio);
	priority = to_userspace_prio(policy, desired->prio);

	if (verify && is_rt_policy(policy) && !has_cap_nice) {
		long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
@@ -688,16 +694,30 @@ static void binder_do_set_priority(struct task_struct *task,
		}
	}

	if (policy != desired.sched_policy ||
	    to_kernel_prio(policy, priority) != desired.prio)
	if (policy != desired->sched_policy ||
	    to_kernel_prio(policy, priority) != desired->prio)
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
			     "%d: priority %d not allowed, using %d instead\n",
			      task->pid, desired.prio,
			      task->pid, desired->prio,
			      to_kernel_prio(policy, priority));

	trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
				  to_kernel_prio(policy, priority),
				  desired.prio);
				  desired->prio);

	spin_lock(&thread->prio_lock);
	if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
		/*
		 * A new priority has been set by an incoming nested
		 * transaction. Abort this priority restore and allow
		 * the transaction to run at the new desired priority.
		 */
		spin_unlock(&thread->prio_lock);
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
			"%d: %s: aborting priority restore\n",
			thread->pid, __func__);
		return;
	}

	/* Set the actual priority */
	if (task->policy != policy || is_rt_policy(policy)) {
@@ -711,37 +731,42 @@ static void binder_do_set_priority(struct task_struct *task,
	}
	if (is_fair_policy(policy))
		set_user_nice(task, priority);

	thread->prio_state = BINDER_PRIO_SET;
	spin_unlock(&thread->prio_lock);
}

static void binder_set_priority(struct task_struct *task,
				struct binder_priority desired)
static void binder_set_priority(struct binder_thread *thread,
				const struct binder_priority *desired)
{
	binder_do_set_priority(task, desired, /* verify = */ true);
	binder_do_set_priority(thread, desired, /* verify = */ true);
}

static void binder_restore_priority(struct task_struct *task,
				    struct binder_priority desired)
static void binder_restore_priority(struct binder_thread *thread,
				    const struct binder_priority *desired)
{
	binder_do_set_priority(task, desired, /* verify = */ false);
	binder_do_set_priority(thread, desired, /* verify = */ false);
}

static void binder_transaction_priority(struct task_struct *task,
static void binder_transaction_priority(struct binder_thread *thread,
					struct binder_transaction *t,
					struct binder_priority node_prio,
					bool inherit_rt)
					struct binder_node *node)
{
	struct binder_priority desired_prio = t->priority;
	struct task_struct *task = thread->task;
	struct binder_priority desired = t->priority;
	const struct binder_priority node_prio = {
		.sched_policy = node->sched_policy,
		.prio = node->min_priority,
	};

	if (t->set_priority_called)
		return;

	t->set_priority_called = true;
	t->saved_priority.sched_policy = task->policy;
	t->saved_priority.prio = task->normal_prio;

	if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
		desired_prio.prio = NICE_TO_PRIO(0);
		desired_prio.sched_policy = SCHED_NORMAL;
	if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
		desired.prio = NICE_TO_PRIO(0);
		desired.sched_policy = SCHED_NORMAL;
	}

	if (node_prio.prio < t->priority.prio ||
@@ -754,10 +779,29 @@ static void binder_transaction_priority(struct task_struct *task,
		 * SCHED_FIFO, prefer SCHED_FIFO, since it can
		 * run unbounded, unlike SCHED_RR.
		 */
		desired_prio = node_prio;
		desired = node_prio;
	}

	spin_lock(&thread->prio_lock);
	if (thread->prio_state == BINDER_PRIO_PENDING) {
		/*
		 * Task is in the process of changing priorities
		 * saving its current values would be incorrect.
		 * Instead, save the pending priority and signal
		 * the task to abort the priority restore.
		 */
		t->saved_priority = thread->prio_next;
		thread->prio_state = BINDER_PRIO_ABORT;
		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
			"%d: saved pending priority %d\n",
			current->pid, thread->prio_next.prio);
	} else {
		t->saved_priority.sched_policy = task->policy;
		t->saved_priority.prio = task->normal_prio;
	}
	spin_unlock(&thread->prio_lock);

	binder_set_priority(task, desired_prio);
	binder_set_priority(thread, &desired);
	trace_android_vh_binder_set_priority(t, task);
}

@@ -2488,14 +2532,11 @@ static int binder_proc_transaction(struct binder_transaction *t,
				    struct binder_thread *thread)
{
	struct binder_node *node = t->buffer->target_node;
	struct binder_priority node_prio;
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool pending_async = false;

	BUG_ON(!node);
	binder_node_lock(node);
	node_prio.prio = node->min_priority;
	node_prio.sched_policy = node->sched_policy;

	if (oneway) {
		BUG_ON(thread);
@@ -2523,8 +2564,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
		thread = binder_select_thread_ilocked(proc);

	if (thread) {
		binder_transaction_priority(thread->task, t, node_prio,
					    node->inherit_rt);
		binder_transaction_priority(thread, t, node);
		binder_enqueue_thread_work_ilocked(thread, &t->work);
	} else if (!pending_async) {
		binder_enqueue_work_ilocked(&t->work, &proc->todo);
@@ -2611,6 +2651,7 @@ static void binder_transaction(struct binder_proc *proc,
	int t_debug_id = atomic_inc_return(&binder_last_id);
	char *secctx = NULL;
	u32 secctx_sz = 0;
	bool is_nested = false;

	e = binder_transaction_log_add(&binder_transaction_log);
	e->debug_id = t_debug_id;
@@ -2787,6 +2828,7 @@ static void binder_transaction(struct binder_proc *proc,
					atomic_inc(&from->tmp_ref);
					target_thread = from;
					spin_unlock(&tmp->lock);
					is_nested = true;
					break;
				}
				spin_unlock(&tmp->lock);
@@ -2851,6 +2893,7 @@ static void binder_transaction(struct binder_proc *proc,
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->is_nested = is_nested;
	if (!(t->flags & TF_ONE_WAY) &&
	    binder_supported_policy(current->policy)) {
		/* Inherit supported policies for synchronous transactions */
@@ -3188,9 +3231,15 @@ static void binder_transaction(struct binder_proc *proc,
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
		target_proc->outstanding_txns++;
		binder_inner_proc_unlock(target_proc);
		if (in_reply_to->is_nested) {
			spin_lock(&thread->prio_lock);
			thread->prio_state = BINDER_PRIO_PENDING;
			thread->prio_next = in_reply_to->saved_priority;
			spin_unlock(&thread->prio_lock);
		}
		wake_up_interruptible_sync(&target_thread->wait);
		trace_android_vh_binder_restore_priority(in_reply_to, current);
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_restore_priority(thread, &in_reply_to->saved_priority);
		binder_free_transaction(in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
@@ -3304,7 +3353,7 @@ static void binder_transaction(struct binder_proc *proc,
	BUG_ON(thread->return_error.cmd != BR_OK);
	if (in_reply_to) {
		trace_android_vh_binder_restore_priority(in_reply_to, current);
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_restore_priority(thread, &in_reply_to->saved_priority);
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
		binder_enqueue_thread_work(thread, &thread->return_error.work);
		binder_send_failed_reply(in_reply_to, return_error);
@@ -3975,7 +4024,7 @@ static int binder_thread_read(struct binder_proc *proc,
						 binder_stop_on_user_error < 2);
		}
		trace_android_vh_binder_restore_priority(NULL, current);
		binder_restore_priority(current, proc->default_priority);
		binder_restore_priority(thread, &proc->default_priority);
	}

	if (non_block) {
@@ -4197,14 +4246,10 @@ static int binder_thread_read(struct binder_proc *proc,
		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
			struct binder_node *target_node = t->buffer->target_node;
			struct binder_priority node_prio;

			trd->target.ptr = target_node->ptr;
			trd->cookie =  target_node->cookie;
			node_prio.sched_policy = target_node->sched_policy;
			node_prio.prio = target_node->min_priority;
			binder_transaction_priority(current, t, node_prio,
						    target_node->inherit_rt);
			binder_transaction_priority(thread, t, target_node);
			cmd = BR_TRANSACTION;
		} else {
			trd->target.ptr = 0;
@@ -4434,6 +4479,8 @@ static struct binder_thread *binder_get_thread_ilocked(
	thread->return_error.cmd = BR_OK;
	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
	thread->reply_error.cmd = BR_OK;
	spin_lock_init(&thread->prio_lock);
	thread->prio_state = BINDER_PRIO_SET;
	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
	return thread;
}
+16 −0
Original line number Diff line number Diff line
@@ -366,6 +366,12 @@ struct binder_priority {
	int prio;
};

enum binder_prio_state {
	BINDER_PRIO_SET,	/* desired priority set */
	BINDER_PRIO_PENDING,	/* initiated a saved priority restore */
	BINDER_PRIO_ABORT,	/* abort the pending priority restore */
};

/**
 * struct binder_proc - binder process bookkeeping
 * @proc_node:            element for binder_procs list
@@ -526,6 +532,12 @@ static inline const struct cred *binder_get_cred(struct binder_proc *proc)
 *                        when outstanding transactions are cleaned up
 *                        (protected by @proc->inner_lock)
 * @task:                 struct task_struct for this thread
 * @prio_lock:            protects thread priority fields
 * @prio_next:            saved priority to be restored next
 *                        (protected by @prio_lock)
 * @prio_state:           state of the priority restore process as
 *                        defined by enum binder_prio_state
 *                        (protected by @prio_lock)
 *
 * Bookkeeping structure for binder threads.
 */
@@ -546,6 +558,9 @@ struct binder_thread {
	atomic_t tmp_ref;
	bool is_dead;
	struct task_struct *task;
	spinlock_t prio_lock;
	struct binder_priority prio_next;
	enum binder_prio_state prio_state;
};

/**
@@ -582,6 +597,7 @@ struct binder_transaction {
	struct binder_priority	priority;
	struct binder_priority	saved_priority;
	bool    set_priority_called;
	bool    is_nested;
	kuid_t	sender_euid;
	struct list_head fd_fixups;
	binder_uintptr_t security_ctx;
+4 −24
Original line number Diff line number Diff line
@@ -809,6 +809,10 @@ static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
					ret++;
			}
		}

		f2fs_bug_on(F2FS_I_SB(inode),
			!compr && ret != cc->cluster_size &&
			!is_inode_flag_set(cc->inode, FI_COMPRESS_RELEASED));
	}
fail:
	f2fs_put_dnode(&dn);
@@ -879,21 +883,16 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
	struct address_space *mapping = cc->inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	sector_t last_block_in_bio;
	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
	pgoff_t start_idx = start_idx_of_cluster(cc);
	int i, ret;
	bool prealloc;

retry:
	ret = f2fs_cluster_blocks(cc, false);
	if (ret <= 0)
		return ret;

	/* compressed case */
	prealloc = (ret < cc->cluster_size);

	ret = f2fs_init_compress_ctx(cc);
	if (ret)
		return ret;
@@ -949,25 +948,6 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
		}
	}

	if (prealloc) {
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);

		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);

		for (i = cc->cluster_size - 1; i > 0; i--) {
			ret = f2fs_get_block(&dn, start_idx + i);
			if (ret) {
				i = cc->cluster_size;
				break;
			}

			if (dn.data_blkaddr != NEW_ADDR)
				break;
		}

		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
	}

	if (likely(!ret)) {
		*fsdata = cc->rpages;
		*pagep = cc->rpages[offset_in_cluster(cc, index)];
+4 −0
Original line number Diff line number Diff line
@@ -2359,6 +2359,10 @@ int f2fs_mpage_readpages(struct address_space *mapping,
	unsigned max_nr_pages = nr_pages;
	int ret = 0;

	/* this is real from f2fs_merkle_tree_readahead() in old kernel only. */
	if (!nr_pages)
		return 0;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
+22 −8
Original line number Diff line number Diff line
@@ -762,6 +762,7 @@ enum {
	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
	FI_MMAP_FILE,		/* indicate file was mmapped */
	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
	FI_MAX,			/* max flag, never be used */
};

@@ -814,7 +815,7 @@ struct f2fs_inode_info {
	struct timespec64 i_disk_time[4];/* inode disk times */

	/* for file compress */
	u64 i_compr_blocks;			/* # of compressed blocks */
	atomic_t i_compr_blocks;		/* # of compressed blocks */
	unsigned char i_compress_algorithm;	/* algorithm type */
	unsigned char i_log_cluster_size;	/* log of cluster size */
	unsigned int i_cluster_size;		/* cluster size */
@@ -2669,6 +2670,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
	case FI_DATA_EXIST:
	case FI_INLINE_DOTS:
	case FI_PIN_FILE:
	case FI_COMPRESS_RELEASED:
		f2fs_mark_inode_dirty_sync(inode, true);
	}
}
@@ -2790,6 +2792,8 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
		set_bit(FI_EXTRA_ATTR, fi->flags);
	if (ri->i_inline & F2FS_PIN_FILE)
		set_bit(FI_PIN_FILE, fi->flags);
	if (ri->i_inline & F2FS_COMPRESS_RELEASED)
		set_bit(FI_COMPRESS_RELEASED, fi->flags);
}

static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
@@ -2810,6 +2814,8 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
		ri->i_inline |= F2FS_EXTRA_ATTR;
	if (is_inode_flag_set(inode, FI_PIN_FILE))
		ri->i_inline |= F2FS_PIN_FILE;
	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
		ri->i_inline |= F2FS_COMPRESS_RELEASED;
}

static inline int f2fs_has_extra_attr(struct inode *inode)
@@ -3932,8 +3938,9 @@ static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
#endif

static inline void set_compress_context(struct inode *inode)
static inline int set_compress_context(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	F2FS_I(inode)->i_compress_algorithm =
@@ -3946,19 +3953,25 @@ static inline void set_compress_context(struct inode *inode)
	set_inode_flag(inode, FI_COMPRESSED_FILE);
	stat_inc_compr_inode(inode);
	f2fs_mark_inode_dirty_sync(inode, true);
	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

static inline u64 f2fs_disable_compressed_file(struct inode *inode)
static inline u32 f2fs_disable_compressed_file(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	u32 i_compr_blocks;

	if (!f2fs_compressed_file(inode))
		return 0;
	if (S_ISREG(inode->i_mode)) {
		if (get_dirty_pages(inode))
			return 1;
		if (fi->i_compr_blocks)
			return fi->i_compr_blocks;
		i_compr_blocks = atomic_read(&fi->i_compr_blocks);
		if (i_compr_blocks)
			return i_compr_blocks;
	}

	fi->i_flags &= ~F2FS_COMPR_FL;
@@ -4076,16 +4089,17 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode,
						u64 blocks, bool add)
{
	int diff = F2FS_I(inode)->i_cluster_size - blocks;
	struct f2fs_inode_info *fi = F2FS_I(inode);

	/* don't update i_compr_blocks if saved blocks were released */
	if (!add && !F2FS_I(inode)->i_compr_blocks)
	if (!add && !atomic_read(&fi->i_compr_blocks))
		return;

	if (add) {
		F2FS_I(inode)->i_compr_blocks += diff;
		atomic_add(diff, &fi->i_compr_blocks);
		stat_add_compr_blocks(inode, diff);
	} else {
		F2FS_I(inode)->i_compr_blocks -= diff;
		atomic_sub(diff, &fi->i_compr_blocks);
		stat_sub_compr_blocks(inode, diff);
	}
	f2fs_mark_inode_dirty_sync(inode, true);
Loading