Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e0d7ea4 authored by Joel Fernandes (Google)'s avatar Joel Fernandes (Google) Committed by Joel Fernandes
Browse files

UPSTREAM: mm/memfd: make F_SEAL_FUTURE_WRITE seal more robust

A better way to do F_SEAL_FUTURE_WRITE seal was discussed [1] last week
where we don't need to modify core VFS structures to get the same
behavior of the seal. This solves several side-effects pointed out by
Andy [2].

[1] https://lore.kernel.org/lkml/20181111173650.GA256781@google.com/
[2] https://lore.kernel.org/lkml/69CE06CC-E47C-4992-848A-66EB23EE6C74@amacapital.net/



Suggested-by: default avatarAndy Lutomirski <luto@kernel.org>
Fixes: 5e653c2923fd ("mm: Add an F_SEAL_FUTURE_WRITE seal to memfd")
Change-id: I5d2414cfcf8ac42d3632d0b0dc960c742d490e2f
Verified with test program at: https://lore.kernel.org/patchwork/patch/1008117/
Backport link: https://lore.kernel.org/patchwork/patch/1014892/


Bug: 113362644
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
parent 1dc8ca44
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -530,7 +530,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
		inode_lock(inode);

		/* protected by i_mutex */
		if (info->seals & F_SEAL_WRITE) {
		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
			inode_unlock(inode);
			return -EPERM;
		}
+0 −19
Original line number Diff line number Diff line
@@ -220,25 +220,6 @@ static int memfd_add_seals(struct file *file, unsigned int seals)
		}
	}

	if ((seals & F_SEAL_FUTURE_WRITE) &&
	    !(*file_seals & F_SEAL_FUTURE_WRITE)) {
		/*
		 * The FUTURE_WRITE seal also prevents growing and shrinking
		 * so we need them to be already set, or requested now.
		 */
		int test_seals = (seals | *file_seals) &
				 (F_SEAL_GROW | F_SEAL_SHRINK);

		if (test_seals != (F_SEAL_GROW | F_SEAL_SHRINK)) {
			error = -EINVAL;
			goto unlock;
		}

		spin_lock(&file->f_lock);
		file->f_mode &= ~(FMODE_WRITE | FMODE_PWRITE);
		spin_unlock(&file->f_lock);
	}

	*file_seals |= seals;
	error = 0;

+21 −3
Original line number Diff line number Diff line
@@ -2169,6 +2169,23 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)

static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct shmem_inode_info *info = SHMEM_I(file_inode(file));

	/*
	 * New PROT_READ and MAP_SHARED mmaps are not allowed when "future
	 * write" seal active.
	 */
	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE) &&
	    (info->seals & F_SEAL_FUTURE_WRITE))
		return -EPERM;

	/*
	 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED read-only
	 * mapping, take care to not allow mprotect to revert protections.
	 */
	if (info->seals & F_SEAL_FUTURE_WRITE)
		vma->vm_flags &= ~(VM_MAYWRITE);

	file_accessed(file);
	vma->vm_ops = &shmem_vm_ops;
	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
@@ -2422,8 +2439,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
	pgoff_t index = pos >> PAGE_SHIFT;

	/* i_mutex is held by caller */
	if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
		if (info->seals & F_SEAL_WRITE)
	if (unlikely(info->seals & (F_SEAL_GROW |
				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
			return -EPERM;
		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
			return -EPERM;
@@ -2686,7 +2704,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);

		/* protected by i_mutex */
		if (info->seals & F_SEAL_WRITE) {
		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
			error = -EPERM;
			goto out;
		}