Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dbf37153 authored by Riley Andrews's avatar Riley Andrews Committed by alexax66
Browse files

android: binder: Disable preemption while holding the global binder lock.

Change-Id: I66ed44990d5c347d197e61dc49a37b5228c748d0

Used commit: https://github.com/omnirom/android_kernel_htc_flounder/commit/f681f0264fd2c51aa12190ff9be04622a7c8ca3f
parent 267a8f02
Loading
Loading
Loading
Loading
+114 −40
Original line number Diff line number Diff line
@@ -421,6 +421,7 @@ static inline void binder_lock(const char *tag)
{
	trace_binder_lock(tag);
	mutex_lock(&binder_main_lock);
	preempt_disable();
	trace_binder_locked(tag);
}

@@ -428,8 +429,62 @@ static inline void binder_unlock(const char *tag)
{
	trace_binder_unlock(tag);
	mutex_unlock(&binder_main_lock);
	preempt_enable();
}

static inline void *kzalloc_preempt_disabled(size_t size)
{
	void *ptr;

	ptr = kzalloc(size, GFP_NOWAIT);
	if (ptr)
		return ptr;

	preempt_enable_no_resched();
	ptr = kzalloc(size, GFP_KERNEL);
	preempt_disable();

	return ptr;
}

static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
{
	long ret;

	preempt_enable_no_resched();
	ret = copy_to_user(to, from, n);
	preempt_disable();
	return ret;
}

static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
{
	long ret;

	preempt_enable_no_resched();
	ret = copy_from_user(to, from, n);
	preempt_disable();
	return ret;
}

#define get_user_preempt_disabled(x, ptr)	\
({						\
	int __ret;				\
	preempt_enable_no_resched();		\
	__ret = get_user(x, ptr);		\
	preempt_disable();			\
	__ret;					\
})

#define put_user_preempt_disabled(x, ptr)	\
({						\
	int __ret;				\
	preempt_enable_no_resched();		\
	__ret = put_user(x, ptr);		\
	preempt_disable();			\
	__ret;					\
})

static void binder_set_nice(long nice)
{
	long min_nice;
@@ -563,6 +618,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
	else
		mm = get_task_mm(proc->tsk);

	preempt_enable_no_resched();

	if (mm) {
		down_write(&mm->mmap_sem);
		vma = proc->vma;
@@ -615,6 +672,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
		up_write(&mm->mmap_sem);
		mmput(mm);
	}

	preempt_disable();

	return 0;

free_range:
@@ -637,6 +697,9 @@ err_no_vma:
		up_write(&mm->mmap_sem);
		mmput(mm);
	}

	preempt_disable();

	return -ENOMEM;
}

@@ -893,7 +956,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
			return NULL;
	}

	node = kzalloc(sizeof(*node), GFP_KERNEL);
	node = kzalloc_preempt_disabled(sizeof(*node));
	if (node == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_NODE);
@@ -1030,7 +1093,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
		else
			return ref;
	}
	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
	new_ref = kzalloc_preempt_disabled(sizeof(*ref));
	if (new_ref == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_REF);
@@ -1418,14 +1481,14 @@ static void binder_transaction(struct binder_proc *proc,
	e->to_proc = target_proc->pid;

	/* TODO: reuse incoming transaction for reply */
	t = kzalloc(sizeof(*t), GFP_KERNEL);
	t = kzalloc_preempt_disabled(sizeof(*t));
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION);

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
@@ -1482,14 +1545,14 @@ static void binder_transaction(struct binder_proc *proc,
	offp = (binder_size_t *)(t->buffer->data +
				 ALIGN(tr->data_size, sizeof(void *)));

	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
	if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
		binder_user_error("%d:%d got transaction with invalid data ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
	if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				proc->pid, thread->pid);
@@ -1683,9 +1746,7 @@ static void binder_transaction(struct binder_proc *proc,
	list_add_tail(&tcomplete->entry, &thread->todo);
	if (target_wait) {
		if (reply || !(t->flags & TF_ONE_WAY)) {
			preempt_disable();
			wake_up_interruptible_sync(target_wait);
			sched_preempt_enable_no_resched();
		}
		else {
			wake_up_interruptible(target_wait);
@@ -1747,7 +1808,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user(cmd, (uint32_t __user *)ptr))
		if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		trace_binder_command(cmd);
@@ -1765,7 +1826,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
			struct binder_ref *ref;
			const char *debug_string;

			if (get_user(target, (uint32_t __user *)ptr))
			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (target == 0 && binder_context_mgr_node &&
@@ -1815,10 +1876,10 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
			binder_uintptr_t cookie;
			struct binder_node *node;

			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
			if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			node = binder_get_node(proc, node_ptr);
@@ -1876,7 +1937,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
			binder_uintptr_t data_ptr;
			struct binder_buffer *buffer;

			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
			if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);

@@ -1920,7 +1981,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
		case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))
			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
@@ -1970,10 +2031,10 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
			struct binder_ref *ref;
			struct binder_ref_death *death;

			if (get_user(target, (uint32_t __user *)ptr))
			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			ref = binder_get_ref(proc, target);
@@ -2002,7 +2063,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
						proc->pid, thread->pid);
					break;
				}
				death = kzalloc(sizeof(*death), GFP_KERNEL);
				death = kzalloc_preempt_disabled(sizeof(*death));
				if (death == NULL) {
					thread->return_error = BR_ERROR;
					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2055,7 +2116,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
			struct binder_work *w;
			binder_uintptr_t cookie;
			struct binder_ref_death *death = NULL;
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
				return -EFAULT;

			ptr += sizeof(cookie);
@@ -2134,7 +2195,7 @@ static int binder_thread_read(struct binder_proc *proc,
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
		if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
@@ -2145,7 +2206,7 @@ retry:

	if (thread->return_error != BR_OK && ptr < end) {
		if (thread->return_error2 != BR_OK) {
			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
			if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			binder_stat_br(proc, thread, thread->return_error2);
@@ -2153,7 +2214,7 @@ retry:
				goto done;
			thread->return_error2 = BR_OK;
		}
		if (put_user(thread->return_error, (uint32_t __user *)ptr))
		if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		binder_stat_br(proc, thread, thread->return_error);
@@ -2227,7 +2288,7 @@ retry:
		} break;
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			cmd = BR_TRANSACTION_COMPLETE;
			if (put_user(cmd, (uint32_t __user *)ptr))
			if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);

@@ -2268,14 +2329,14 @@ retry:
				node->has_weak_ref = 0;
			}
			if (cmd != BR_NOOP) {
				if (put_user(cmd, (uint32_t __user *)ptr))
				if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(uint32_t);
				if (put_user(node->ptr,
				if (put_user_preempt_disabled(node->ptr,
					     (binder_uintptr_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(binder_uintptr_t);
				if (put_user(node->cookie,
				if (put_user_preempt_disabled(node->cookie,
					     (binder_uintptr_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(binder_uintptr_t);
@@ -2315,10 +2376,10 @@ retry:
				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
			else
				cmd = BR_DEAD_BINDER;
			if (put_user(cmd, (uint32_t __user *)ptr))
			if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (put_user(death->cookie,
			if (put_user_preempt_disabled(death->cookie,
				     (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
@@ -2384,10 +2445,10 @@ retry:
					ALIGN(t->buffer->data_size,
					    sizeof(void *));

		if (put_user(cmd, (uint32_t __user *)ptr))
		if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (copy_to_user(ptr, &tr, sizeof(tr)))
		if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
			return -EFAULT;
		ptr += sizeof(tr);

@@ -2429,7 +2490,7 @@ done:
		binder_debug(BINDER_DEBUG_THREADS,
			     "%d:%d BR_SPAWN_LOOPER\n",
			     proc->pid, thread->pid);
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
		if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
			return -EFAULT;
		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
	}
@@ -2503,7 +2564,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
			break;
	}
	if (*p == NULL) {
		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
		thread = kzalloc_preempt_disabled(sizeof(*thread));
		if (thread == NULL)
			return NULL;
		binder_stats_created(BINDER_STAT_THREAD);
@@ -2623,7 +2684,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			ret = -EINVAL;
			goto err;
		}
		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
	if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
			ret = -EFAULT;
			goto err;
		}
@@ -2638,7 +2699,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			trace_binder_write_done(ret);
			if (ret < 0) {
				bwr.read_consumed = 0;
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
@@ -2649,7 +2710,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			if (!list_empty(&proc->todo))
				wake_up_interruptible(&proc->wait);
			if (ret < 0) {
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
@@ -2659,14 +2720,14 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			     proc->pid, thread->pid,
			     (u64)bwr.write_consumed, (u64)bwr.write_size,
			     (u64)bwr.read_consumed, (u64)bwr.read_size);
		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
			ret = -EFAULT;
			goto err;
		}
		break;
	}
	case BINDER_SET_MAX_THREADS:
		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
		if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
			ret = -EINVAL;
			goto err;
		}
@@ -2711,7 +2772,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
			ret = -EINVAL;
			goto err;
		}
		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {

		if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION,
			    &((struct binder_version *)ubuf)->protocol_version)) {
			ret = -EINVAL;
			goto err;
		}
@@ -2770,6 +2833,7 @@ static struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	struct vm_struct *area;
	struct binder_proc *proc = filp->private_data;
	const char *failure_string;
@@ -2830,7 +2894,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
	vma->vm_ops = &binder_vm_ops;
	vma->vm_private_data = proc;

	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
	/* binder_update_page_range assumes preemption is disabled */
	preempt_disable();
	ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
	preempt_enable_no_resched();
	if (ret) {
		ret = -ENOMEM;
		failure_string = "alloc small buf";
		goto err_alloc_small_buf_failed;
@@ -3095,8 +3163,12 @@ static void binder_deferred_func(struct work_struct *work)

	int defer;
	do {
		binder_lock(__func__);
		trace_binder_lock(__func__);
		mutex_lock(&binder_main_lock);
		trace_binder_locked(__func__);

		mutex_lock(&binder_deferred_lock);
		preempt_disable();
		if (!hlist_empty(&binder_deferred_list)) {
			proc = hlist_entry(binder_deferred_list.first,
					struct binder_proc, deferred_work_node);
@@ -3122,7 +3194,9 @@ static void binder_deferred_func(struct work_struct *work)
		if (defer & BINDER_DEFERRED_RELEASE)
			binder_deferred_release(proc); /* frees proc */

		binder_unlock(__func__);
		trace_binder_unlock(__func__);
		mutex_unlock(&binder_main_lock);
		preempt_enable_no_resched();
		if (files)
			put_files_struct(files);
	} while (proc);
+9 −1
Original line number Diff line number Diff line
@@ -36,6 +36,12 @@
#define F2FS_NODE_INO(sbi)	(sbi->node_ino_num)
#define F2FS_META_INO(sbi)	(sbi->meta_ino_num)

#define F2FS_IO_SIZE(sbi)	(1 << (sbi)->write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi)	(1 << ((sbi)->write_io_size_bits + 2)) /* KB */
#define F2FS_IO_SIZE_BYTES(sbi)	(1 << ((sbi)->write_io_size_bits + 12)) /* B */
#define F2FS_IO_SIZE_BITS(sbi)	((sbi)->write_io_size_bits) /* power of 2 */
#define F2FS_IO_SIZE_MASK(sbi)	(F2FS_IO_SIZE(sbi) - 1)

/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO		(GFP_NOFS | __GFP_ZERO)
#define GFP_F2FS_HIGH_ZERO	(GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
@@ -108,6 +114,7 @@ struct f2fs_super_block {
/*
 * For checkpoint
 */
#define CP_NAT_BITS_FLAG	0x00000080
#define CP_CRC_RECOVERY_FLAG	0x00000040
#define CP_FASTBOOT_FLAG	0x00000020
#define CP_FSCK_FLAG		0x00000010
@@ -271,7 +278,8 @@ struct f2fs_node {
/*
 * For NAT entries
 */
#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_BITMAP_SIZE	((NAT_ENTRY_PER_BLOCK + 7) / 8)

struct f2fs_nat_entry {
	__u8 version;		/* latest version of cached nat entry */
+146 −0
Original line number Diff line number Diff line
/*
 * fscrypt_common.h: common declarations for per-file encryption
 *
 * Copyright (C) 2015, Google, Inc.
 *
 * Written by Michael Halcrow, 2015.
 * Modified by Jaegeuk Kim, 2015.
 */

#ifndef _LINUX_FSCRYPT_COMMON_H
#define _LINUX_FSCRYPT_COMMON_H

#include <linux/key.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/dcache.h>
#include <crypto/skcipher.h>
#include <uapi/linux/fs.h>

#define FS_CRYPTO_BLOCK_SIZE		16

struct fscrypt_info;

struct fscrypt_ctx {
	union {
		struct {
			struct page *bounce_page;	/* Ciphertext page */
			struct page *control_page;	/* Original page  */
		} w;
		struct {
			struct bio *bio;
			struct work_struct work;
		} r;
		struct list_head free_list;	/* Free list */
	};
	u8 flags;				/* Flags */
};

/**
 * For encrypted symlinks, the ciphertext length is stored at the beginning
 * of the string in little-endian format.
 */
struct fscrypt_symlink_data {
	__le16 len;
	char encrypted_path[1];
} __packed;

/**
 * This function is used to calculate the disk space required to
 * store a filename of length l in encrypted symlink format.
 */
static inline u32 fscrypt_symlink_data_len(u32 l)
{
	if (l < FS_CRYPTO_BLOCK_SIZE)
		l = FS_CRYPTO_BLOCK_SIZE;
	return (l + sizeof(struct fscrypt_symlink_data) - 1);
}

struct fscrypt_str {
	unsigned char *name;
	u32 len;
};

struct fscrypt_name {
	const struct qstr *usr_fname;
	struct fscrypt_str disk_name;
	u32 hash;
	u32 minor_hash;
	struct fscrypt_str crypto_buf;
};

#define FSTR_INIT(n, l)		{ .name = n, .len = l }
#define FSTR_TO_QSTR(f)		QSTR_INIT((f)->name, (f)->len)
#define fname_name(p)		((p)->disk_name.name)
#define fname_len(p)		((p)->disk_name.len)

/*
 * fscrypt superblock flags
 */
#define FS_CFLG_OWN_PAGES (1U << 1)

/*
 * crypto opertions for filesystems
 */
struct fscrypt_operations {
	unsigned int flags;
	const char *key_prefix;
	int (*get_context)(struct inode *, void *, size_t);
	int (*prepare_context)(struct inode *);
	int (*set_context)(struct inode *, const void *, size_t, void *);
	int (*dummy_context)(struct inode *);
	bool (*is_encrypted)(struct inode *);
	bool (*empty_dir)(struct inode *);
	unsigned (*max_namelen)(struct inode *);
};

static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
	if (inode->i_sb->s_cop->dummy_context &&
				inode->i_sb->s_cop->dummy_context(inode))
		return true;
	return false;
}

static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
{
	return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
}

static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
{
	return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
}

static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
	if (str->len == 1 && str->name[0] == '.')
		return true;

	if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
		return true;

	return false;
}

static inline struct page *fscrypt_control_page(struct page *page)
{
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
#else
	WARN_ON_ONCE(1);
	return ERR_PTR(-EINVAL);
#endif
}

static inline int fscrypt_has_encryption_key(const struct inode *inode)
{
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
	return (inode->i_crypt_info != NULL);
#else
	return 0;
#endif
}

#endif	/* _LINUX_FSCRYPT_COMMON_H */
+168 −0
Original line number Diff line number Diff line
/*
 * fscrypt_notsupp.h
 *
 * This stubs out the fscrypt functions for filesystems configured without
 * encryption support.
 */

#ifndef _LINUX_FSCRYPT_NOTSUPP_H
#define _LINUX_FSCRYPT_NOTSUPP_H

#include <linux/fscrypt_common.h>

/* crypto.c */
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
						  gfp_t gfp_flags)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
	return;
}

static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
						struct page *page,
						unsigned int len,
						unsigned int offs,
						u64 lblk_num, gfp_t gfp_flags)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline int fscrypt_decrypt_page(const struct inode *inode,
				       struct page *page,
				       unsigned int len, unsigned int offs,
				       u64 lblk_num)
{
	return -EOPNOTSUPP;
}


static inline void fscrypt_restore_control_page(struct page *page)
{
	return;
}

static inline void fscrypt_set_d_op(struct dentry *dentry)
{
	return;
}

static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
{
	return;
}

/* policy.c */
static inline int fscrypt_ioctl_set_policy(struct file *filp,
					   const void __user *arg)
{
	return -EOPNOTSUPP;
}

static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
{
	return -EOPNOTSUPP;
}

static inline int fscrypt_has_permitted_context(struct inode *parent,
						struct inode *child)
{
	return 0;
}

static inline int fscrypt_inherit_context(struct inode *parent,
					  struct inode *child,
					  void *fs_data, bool preload)
{
	return -EOPNOTSUPP;
}

/* keyinfo.c */
static inline int fscrypt_get_encryption_info(struct inode *inode)
{
	return -EOPNOTSUPP;
}

static inline void fscrypt_put_encryption_info(struct inode *inode,
					       struct fscrypt_info *ci)
{
	return;
}

 /* fname.c */
static inline int fscrypt_setup_filename(struct inode *dir,
					 const struct qstr *iname,
					 int lookup, struct fscrypt_name *fname)
{
	if (dir->i_sb->s_cop->is_encrypted(dir))
		return -EOPNOTSUPP;

	memset(fname, 0, sizeof(struct fscrypt_name));
	fname->usr_fname = iname;
	fname->disk_name.name = (unsigned char *)iname->name;
	fname->disk_name.len = iname->len;
	return 0;
}

static inline void fscrypt_free_filename(struct fscrypt_name *fname)
{
	return;
}

static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
					       u32 ilen)
{
	/* never happens */
	WARN_ON(1);
	return 0;
}

static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
					     u32 ilen,
					     struct fscrypt_str *crypto_str)
{
	return -EOPNOTSUPP;
}

static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
{
	return;
}

static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
					    u32 hash, u32 minor_hash,
					    const struct fscrypt_str *iname,
					    struct fscrypt_str *oname)
{
	return -EOPNOTSUPP;
}

static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
					    const struct qstr *iname,
					    struct fscrypt_str *oname)
{
	return -EOPNOTSUPP;
}

/* bio.c */
static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
					     struct bio *bio)
{
	return;
}

static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
	return;
}

static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
					sector_t pblk, unsigned int len)
{
	return -EOPNOTSUPP;
}

#endif	/* _LINUX_FSCRYPT_NOTSUPP_H */
+66 −0
Original line number Diff line number Diff line
/*
 * fscrypt_supp.h
 *
 * This is included by filesystems configured with encryption support.
 */

#ifndef _LINUX_FSCRYPT_SUPP_H
#define _LINUX_FSCRYPT_SUPP_H

#include <linux/fscrypt_common.h>

/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
						unsigned int, unsigned int,
						u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
				unsigned int, u64);
extern void fscrypt_restore_control_page(struct page *);

extern const struct dentry_operations fscrypt_d_ops;

static inline void fscrypt_set_d_op(struct dentry *dentry)
{
	d_set_d_op(dentry, &fscrypt_d_ops);
}

static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
{
	spin_lock(&dentry->d_lock);
	dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
	spin_unlock(&dentry->d_lock);
}

/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
					void *, bool);
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);

/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
				int lookup, struct fscrypt_name *);
extern void fscrypt_free_filename(struct fscrypt_name *);
extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
				struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
			const struct fscrypt_str *, struct fscrypt_str *);
extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
			struct fscrypt_str *);

/* bio.c */
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
				 unsigned int);

#endif	/* _LINUX_FSCRYPT_SUPP_H */