Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 12a5b529 authored by Al Viro's avatar Al Viro
Browse files

fix copy_tree() regression



Since 3.14 we had copy_tree() get the shadowing wrong - if we had one
vfsmount shadowing another (i.e. if A is a slave of B, C is mounted
on A/foo, then D got mounted on B/foo creating D' on A/foo shadowed
by C), copy_tree() of A would make a copy of D' shadow the the copy of
C, not the other way around.

It's easy to fix, fortunately - just make sure that mount follows
the one that shadows it in mnt_child as well as in mnt_hash, and when
copy_tree() decides to attach a new mount, check if the last child
it has added to the same parent should be shadowing the new one.
And if it should, just use the same logics commit_tree() has - put the
new mount into the hash and children lists right after the one that
should shadow it.

Cc: stable@vger.kernel.org [3.14 and later]
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 60bb4529
Loading
Loading
Loading
Loading
+24 −7
Original line number Original line Diff line number Diff line
@@ -778,6 +778,20 @@ static void attach_mnt(struct mount *mnt,
	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
}
}


static void attach_shadowed(struct mount *mnt,
			struct mount *parent,
			struct mount *shadows)
{
	if (shadows) {
		hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
		list_add(&mnt->mnt_child, &shadows->mnt_child);
	} else {
		hlist_add_head_rcu(&mnt->mnt_hash,
				m_hash(&parent->mnt, mnt->mnt_mountpoint));
		list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
	}
}

/*
/*
 * vfsmount lock must be held for write
 * vfsmount lock must be held for write
 */
 */
@@ -796,12 +810,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)


	list_splice(&head, n->list.prev);
	list_splice(&head, n->list.prev);


	if (shadows)
	attach_shadowed(mnt, parent, shadows);
		hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
	else
		hlist_add_head_rcu(&mnt->mnt_hash,
				m_hash(&parent->mnt, mnt->mnt_mountpoint));
	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
	touch_mnt_namespace(n);
	touch_mnt_namespace(n);
}
}


@@ -1474,6 +1483,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
			continue;
			continue;


		for (s = r; s; s = next_mnt(s, r)) {
		for (s = r; s; s = next_mnt(s, r)) {
			struct mount *t = NULL;
			if (!(flag & CL_COPY_UNBINDABLE) &&
			if (!(flag & CL_COPY_UNBINDABLE) &&
			    IS_MNT_UNBINDABLE(s)) {
			    IS_MNT_UNBINDABLE(s)) {
				s = skip_mnt_tree(s);
				s = skip_mnt_tree(s);
@@ -1495,7 +1505,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
				goto out;
				goto out;
			lock_mount_hash();
			lock_mount_hash();
			list_add_tail(&q->mnt_list, &res->mnt_list);
			list_add_tail(&q->mnt_list, &res->mnt_list);
			attach_mnt(q, parent, p->mnt_mp);
			mnt_set_mountpoint(parent, p->mnt_mp, q);
			if (!list_empty(&parent->mnt_mounts)) {
				t = list_last_entry(&parent->mnt_mounts,
					struct mount, mnt_child);
				if (t->mnt_mp != p->mnt_mp)
					t = NULL;
			}
			attach_shadowed(q, parent, t);
			unlock_mount_hash();
			unlock_mount_hash();
		}
		}
	}
	}