Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52cef755 authored by Eric Paris's avatar Eric Paris
Browse files

inotify: seperate new watch creation updating existing watches



There is nothing known wrong with the inotify watch addition/modification
but this patch seperates the two code paths to make them each easy to
verify as correct.

Signed-off-by: default avatarEric Paris <eparis@redhat.com>
parent 1e23502c
Loading
Loading
Loading
Loading
+103 −69
Original line number Diff line number Diff line
@@ -431,15 +431,76 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry)
	kmem_cache_free(inotify_inode_mark_cachep, ientry);
}

static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
static int inotify_update_existing_watch(struct fsnotify_group *group,
					 struct inode *inode,
					 u32 arg)
{
	struct fsnotify_mark_entry *entry = NULL;
	struct fsnotify_mark_entry *entry;
	struct inotify_inode_mark_entry *ientry;
	struct inotify_inode_mark_entry *tmp_ientry;
	int ret = 0;
	__u32 old_mask, new_mask;
	__u32 mask;
	int add = (arg & IN_MASK_ADD);
	int ret;

	/* don't allow invalid bits: we don't want flags set */
	mask = inotify_arg_to_mask(arg);
	if (unlikely(!mask))
		return -EINVAL;

	spin_lock(&inode->i_lock);
	entry = fsnotify_find_mark_entry(group, inode);
	spin_unlock(&inode->i_lock);
	if (!entry)
		return -ENOENT;

	ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);

	spin_lock(&entry->lock);

	old_mask = entry->mask;
	if (add) {
		entry->mask |= mask;
		new_mask = entry->mask;
	} else {
		entry->mask = mask;
		new_mask = entry->mask;
	}

	spin_unlock(&entry->lock);

	if (old_mask != new_mask) {
		/* more bits in old than in new? */
		int dropped = (old_mask & ~new_mask);
		/* more bits in this entry than the inode's mask? */
		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
		/* more bits in this entry than the group? */
		int do_group = (new_mask & ~group->mask);

		/* update the inode with this new entry */
		if (dropped || do_inode)
			fsnotify_recalc_inode_mask(inode);

		/* update the group mask with the new mask */
		if (dropped || do_group)
			fsnotify_recalc_group_mask(group);
	}

	/* return the wd */
	ret = ientry->wd;

	/* match the get from fsnotify_find_mark_entry() */
	fsnotify_put_mark(entry);

	return ret;
}

static int inotify_new_watch(struct fsnotify_group *group,
			     struct inode *inode,
			     u32 arg)
{
	struct inotify_inode_mark_entry *tmp_ientry;
	__u32 mask;
	__u32 old_mask, new_mask;
	int ret;

	/* don't allow invalid bits: we don't want flags set */
	mask = inotify_arg_to_mask(arg);
@@ -449,17 +510,11 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
	tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
	if (unlikely(!tmp_ientry))
		return -ENOMEM;
	/* we set the mask at the end after attaching it */

	fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
	tmp_ientry->fsn_entry.mask = mask;
	tmp_ientry->wd = -1;

find_entry:
	spin_lock(&inode->i_lock);
	entry = fsnotify_find_mark_entry(group, inode);
	spin_unlock(&inode->i_lock);
	if (entry) {
		ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
	} else {
	ret = -ENOSPC;
	if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
		goto out_err;
@@ -474,80 +529,59 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
				&tmp_ientry->wd);
	spin_unlock(&group->inotify_data.idr_lock);
	if (ret) {
		/* idr was out of memory allocate and try again */
		if (ret == -EAGAIN)
			goto retry;
		goto out_err;
	}

	/* we are on the idr, now get on the inode */
	ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
	if (ret) {
		/* we failed to get on the inode, get off the idr */
		inotify_remove_from_idr(group, tmp_ientry);
			if (ret == -EEXIST)
				goto find_entry;
		goto out_err;
	}

		/* tmp_ientry has been added to the inode, so we are all set up.
		 * now we just need to make sure tmp_ientry doesn't get freed and
		 * we need to set up entry and ientry so the generic code can
		 * do its thing. */
		ientry = tmp_ientry;
		entry = &ientry->fsn_entry;
		tmp_ientry = NULL;

		atomic_inc(&group->inotify_data.user->inotify_watches);

		/* update the idr hint */
		group->inotify_data.last_wd = ientry->wd;

	/* we put the mark on the idr, take a reference */
		fsnotify_get_mark(entry);
	}
	fsnotify_get_mark(&tmp_ientry->fsn_entry);

	ret = ientry->wd;

	spin_lock(&entry->lock);
	/* update the idr hint, who cares about races, it's just a hint */
	group->inotify_data.last_wd = tmp_ientry->wd;

	old_mask = entry->mask;
	if (add) {
		entry->mask |= mask;
		new_mask = entry->mask;
	} else {
		entry->mask = mask;
		new_mask = entry->mask;
	}
	/* increment the number of watches the user has */
	atomic_inc(&group->inotify_data.user->inotify_watches);

	spin_unlock(&entry->lock);
	/* return the watch descriptor for this new entry */
	ret = tmp_ientry->wd;

	if (old_mask != new_mask) {
		/* more bits in old than in new? */
		int dropped = (old_mask & ~new_mask);
		/* more bits in this entry than the inode's mask? */
		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
		/* more bits in this entry than the group? */
		int do_group = (new_mask & ~group->mask);
	/* match the ref from fsnotify_init_markentry() */
	fsnotify_put_mark(&tmp_ientry->fsn_entry);

		/* update the inode with this new entry */
		if (dropped || do_inode)
			fsnotify_recalc_inode_mask(inode);
out_err:
	if (ret < 0)
		kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);

		/* update the group mask with the new mask */
		if (dropped || do_group)
			fsnotify_recalc_group_mask(group);
	return ret;
}

	/* this either matches fsnotify_find_mark_entry, or init_mark_entry
	 * depending on which path we took... */
	fsnotify_put_mark(entry);
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
	int ret = 0;

out_err:
	/* could be an error, could be that we found an existing mark */
	if (tmp_ientry) {
		/* on the idr but didn't make it on the inode */
		if (tmp_ientry->wd != -1)
			inotify_remove_from_idr(group, tmp_ientry);
		kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
	}
retry:
	/* try to update and existing watch with the new arg */
	ret = inotify_update_existing_watch(group, inode, arg);
	/* no mark present, try to add a new one */
	if (ret == -ENOENT)
		ret = inotify_new_watch(group, inode, arg);
	/*
	 * inotify_new_watch could race with another thread which did an
	 * inotify_new_watch between the update_existing and the add watch
	 * here, go back and try to update an existing mark again.
	 */
	if (ret == -EEXIST)
		goto retry;

	return ret;
}