Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f083441b authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds
Browse files

fanotify: use fanotify event structure for permission response processing



Currently, fanotify creates new structure to track the fact that
permission event has been reported to userspace and someone is waiting
for a response to it.  As event structures are now completely in the
hands of each notification framework, we can use the event structure for
this tracking instead of allocating a new structure.

Since this makes the event structures for normal events and permission
events even more different and the structures have different lifetime
rules, we split them into two separate structures (where permission
event structure contains the structure for a normal event).  This makes
normal events 8 bytes smaller and the code a tad bit cleaner.

[akpm@linux-foundation.org: fix build]
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Cc: Eric Paris <eparis@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3298cf37
Loading
Loading
Loading
Loading
+45 −18
Original line number Diff line number Diff line
@@ -60,8 +60,8 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
}

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static int fanotify_get_response_from_access(struct fsnotify_group *group,
					     struct fanotify_event_info *event)
static int fanotify_get_response(struct fsnotify_group *group,
				 struct fanotify_perm_event_info *event)
{
	int ret;

@@ -142,6 +142,40 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
	return false;
}

struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
						 struct path *path)
{
	struct fanotify_event_info *event;

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (mask & FAN_ALL_PERM_EVENTS) {
		struct fanotify_perm_event_info *pevent;

		pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
					  GFP_KERNEL);
		if (!pevent)
			return NULL;
		event = &pevent->fae;
		pevent->response = 0;
		goto init;
	}
#endif
	event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
	if (!event)
		return NULL;
init: __maybe_unused
	fsnotify_init_event(&event->fse, inode, mask);
	event->tgid = get_pid(task_tgid(current));
	if (path) {
		event->path = *path;
		path_get(&event->path);
	} else {
		event->path.mnt = NULL;
		event->path.dentry = NULL;
	}
	return event;
}

static int fanotify_handle_event(struct fsnotify_group *group,
				 struct inode *inode,
				 struct fsnotify_mark *inode_mark,
@@ -171,25 +205,11 @@ static int fanotify_handle_event(struct fsnotify_group *group,
	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
		 mask);

	event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
	event = fanotify_alloc_event(inode, mask, data);
	if (unlikely(!event))
		return -ENOMEM;

	fsn_event = &event->fse;
	fsnotify_init_event(fsn_event, inode, mask);
	event->tgid = get_pid(task_tgid(current));
	if (data_type == FSNOTIFY_EVENT_PATH) {
		struct path *path = data;
		event->path = *path;
		path_get(&event->path);
	} else {
		event->path.mnt = NULL;
		event->path.dentry = NULL;
	}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	event->response = 0;
#endif

	ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
	if (ret) {
		/* Permission events shouldn't be merged */
@@ -202,7 +222,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (mask & FAN_ALL_PERM_EVENTS) {
		ret = fanotify_get_response_from_access(group, event);
		ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event));
		fsnotify_destroy_event(group, fsn_event);
	}
#endif
@@ -225,6 +245,13 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event)
	event = FANOTIFY_E(fsn_event);
	path_put(&event->path);
	put_pid(event->tgid);
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
		kmem_cache_free(fanotify_perm_event_cachep,
				FANOTIFY_PE(fsn_event));
		return;
	}
#endif
	kmem_cache_free(fanotify_event_cachep, event);
}

+27 −7
Original line number Diff line number Diff line
@@ -3,13 +3,12 @@
#include <linux/slab.h>

extern struct kmem_cache *fanotify_event_cachep;
extern struct kmem_cache *fanotify_perm_event_cachep;

/*
 * Lifetime of the structure differs for normal and permission events. In both
 * cases the structure is allocated in fanotify_handle_event(). For normal
 * events the structure is freed immediately after reporting it to userspace.
 * For permission events we free it only after we receive response from
 * userspace.
 * Structure for normal fanotify events. It gets allocated in
 * fanotify_handle_event() and freed when the information is retrieved by
 * userspace
 */
struct fanotify_event_info {
	struct fsnotify_event fse;
@@ -19,12 +18,33 @@ struct fanotify_event_info {
	 */
	struct path path;
	struct pid *tgid;
};

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	u32 response;	/* userspace answer to question */
#endif
/*
 * Structure for permission fanotify events. It gets allocated and freed in
 * fanotify_handle_event() since we wait there for user response. When the
 * information is retrieved by userspace the structure is moved from
 * group->notification_list to group->fanotify_data.access_list to wait for
 * user response.
 */
struct fanotify_perm_event_info {
	struct fanotify_event_info fae;
	int response;	/* userspace answer to question */
	int fd;		/* fd we passed to userspace for this event */
};

static inline struct fanotify_perm_event_info *
FANOTIFY_PE(struct fsnotify_event *fse)
{
	return container_of(fse, struct fanotify_perm_event_info, fae.fse);
}
#endif

static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
{
	return container_of(fse, struct fanotify_event_info, fse);
}

struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
						 struct path *path);
+44 −79
Original line number Diff line number Diff line
@@ -28,14 +28,8 @@
extern const struct fsnotify_ops fanotify_fsnotify_ops;

static struct kmem_cache *fanotify_mark_cache __read_mostly;
static struct kmem_cache *fanotify_response_event_cache __read_mostly;
struct kmem_cache *fanotify_event_cachep __read_mostly;

struct fanotify_response_event {
	struct list_head list;
	__s32 fd;
	struct fanotify_event_info *event;
};
struct kmem_cache *fanotify_perm_event_cachep __read_mostly;

/*
 * Get an fsnotify notification event if one exists and is small
@@ -135,33 +129,34 @@ static int fill_event_metadata(struct fsnotify_group *group,
}

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
						  __s32 fd)
static struct fanotify_perm_event_info *dequeue_event(
				struct fsnotify_group *group, int fd)
{
	struct fanotify_response_event *re, *return_re = NULL;
	struct fanotify_perm_event_info *event, *return_e = NULL;

	mutex_lock(&group->fanotify_data.access_mutex);
	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
		if (re->fd != fd)
	list_for_each_entry(event, &group->fanotify_data.access_list,
			    fae.fse.list) {
		if (event->fd != fd)
			continue;

		list_del_init(&re->list);
		return_re = re;
		list_del_init(&event->fae.fse.list);
		return_e = event;
		break;
	}
	mutex_unlock(&group->fanotify_data.access_mutex);

	pr_debug("%s: found return_re=%p\n", __func__, return_re);
	pr_debug("%s: found return_re=%p\n", __func__, return_e);

	return return_re;
	return return_e;
}

static int process_access_response(struct fsnotify_group *group,
				   struct fanotify_response *response_struct)
{
	struct fanotify_response_event *re;
	__s32 fd = response_struct->fd;
	__u32 response = response_struct->response;
	struct fanotify_perm_event_info *event;
	int fd = response_struct->fd;
	int response = response_struct->response;

	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
		 fd, response);
@@ -181,50 +176,15 @@ static int process_access_response(struct fsnotify_group *group,
	if (fd < 0)
		return -EINVAL;

	re = dequeue_re(group, fd);
	if (!re)
	event = dequeue_event(group, fd);
	if (!event)
		return -ENOENT;

	re->event->response = response;

	event->response = response;
	wake_up(&group->fanotify_data.access_waitq);

	kmem_cache_free(fanotify_response_event_cache, re);

	return 0;
}

static int prepare_for_access_response(struct fsnotify_group *group,
				       struct fsnotify_event *event,
				       __s32 fd)
{
	struct fanotify_response_event *re;

	if (!(event->mask & FAN_ALL_PERM_EVENTS))
		return 0;

	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
	if (!re)
		return -ENOMEM;

	re->event = FANOTIFY_E(event);
	re->fd = fd;

	mutex_lock(&group->fanotify_data.access_mutex);
	list_add_tail(&re->list, &group->fanotify_data.access_list);
	mutex_unlock(&group->fanotify_data.access_mutex);

	return 0;
}

#else
static int prepare_for_access_response(struct fsnotify_group *group,
				       struct fsnotify_event *event,
				       __s32 fd)
{
	return 0;
}

#endif

static ssize_t copy_event_to_user(struct fsnotify_group *group,
@@ -247,9 +207,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
			 fanotify_event_metadata.event_len))
		goto out_close_fd;

	ret = prepare_for_access_response(group, event, fd);
	if (ret)
		goto out_close_fd;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (event->mask & FAN_ALL_PERM_EVENTS) {
		struct fanotify_perm_event_info *pevent;

		pevent = FANOTIFY_PE(event);
		pevent->fd = fd;
		mutex_lock(&group->fanotify_data.access_mutex);
		list_add_tail(&pevent->fae.fse.list,
			      &group->fanotify_data.access_list);
		mutex_unlock(&group->fanotify_data.access_mutex);
	}
#endif

	if (fd != FAN_NOFD)
		fd_install(fd, f);
@@ -263,7 +232,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
out:
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	if (event->mask & FAN_ALL_PERM_EVENTS) {
		FANOTIFY_E(event)->response = FAN_DENY;
		FANOTIFY_PE(event)->response = FAN_DENY;
		wake_up(&group->fanotify_data.access_waitq);
	}
#endif
@@ -312,8 +281,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
				break;
			ret = copy_event_to_user(group, kevent, buf);
			/*
			 * Permission events get destroyed after we
			 * receive response
			 * Permission events get queued to wait for response.
			 * Other events can be destroyed now.
			 */
			if (!(kevent->mask & FAN_ALL_PERM_EVENTS))
				fsnotify_destroy_event(group, kevent);
@@ -375,20 +344,19 @@ static int fanotify_release(struct inode *ignored, struct file *file)
	struct fsnotify_group *group = file->private_data;

#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	struct fanotify_response_event *re, *lre;
	struct fanotify_perm_event_info *event, *next;

	mutex_lock(&group->fanotify_data.access_mutex);

	atomic_inc(&group->fanotify_data.bypass_perm);

	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
			 re, re->event);

		list_del_init(&re->list);
		re->event->response = FAN_ALLOW;
	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
				 fae.fse.list) {
		pr_debug("%s: found group=%p event=%p\n", __func__, group,
			 event);

		kmem_cache_free(fanotify_response_event_cache, re);
		list_del_init(&event->fae.fse.list);
		event->response = FAN_ALLOW;
	}
	mutex_unlock(&group->fanotify_data.access_mutex);

@@ -723,20 +691,15 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
	group->fanotify_data.user = user;
	atomic_inc(&user->fanotify_listeners);

	oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
	if (unlikely(!oevent)) {
		fd = -ENOMEM;
		goto out_destroy_group;
	}
	group->overflow_event = &oevent->fse;
	fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
	oevent->tgid = get_pid(task_tgid(current));
	oevent->path.mnt = NULL;
	oevent->path.dentry = NULL;

	group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	oevent->response = 0;
	mutex_init(&group->fanotify_data.access_mutex);
	init_waitqueue_head(&group->fanotify_data.access_waitq);
	INIT_LIST_HEAD(&group->fanotify_data.access_list);
@@ -912,9 +875,11 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
static int __init fanotify_user_setup(void)
{
	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
						   SLAB_PANIC);
	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
						SLAB_PANIC);
#endif

	return 0;
}