Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a37e940 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull iov_iter hardening from Al Viro:
 "This is the iov_iter/uaccess/hardening pile.

  For one thing, it trims the inline part of copy_to_user/copy_from_user
  to the minimum that *does* need to be inlined - object size checks,
  basically. For another, it sanitizes the checks for iov_iter
  primitives. There are 4 groups of checks: access_ok(), might_fault(),
  object size and KASAN.

   - access_ok() had been verified by whoever had set the iov_iter up.
     However, that has happened in a function far away, so proving that
     there's no path to actual copying bypassing those checks is hard
     and proving that iov_iter has not been buggered in the meanwhile is
     also not pleasant. So we want those redone in actual
     copyin/copyout.

   - might_fault() is better off consolidated - we know whether it needs
     to be checked as soon as we enter iov_iter primitive and observe
     the iov_iter flavour. No need to wait until the copyin/copyout. The
     call chains are short enough to make sure we won't miss anything -
     in fact, it's more robust that way, since there are cases where we
     do e.g. forced fault-in before getting to copyin/copyout. It's not
     quite what we need to check (in particular, combination of
     iovec-backed and set_fs(KERNEL_DS) is almost certainly a bug, not a
     cause to skip checks), but that's for later series. For now let's
     keep might_fault().

   - KASAN checks belong in copyin/copyout - at the same level where
     other iov_iter flavours would've hit them in memcpy().

   - object size checks should apply to *all* iov_iter flavours, not
     just iovec-backed ones.

  There are two groups of primitives - one gets the kernel object
  described as pointer + size (copy_to_iter(), etc.) while another gets
  it as page + offset + size (copy_page_to_iter(), etc.)

  For the first group the checks are best done where we actually have a
  chance to find the object size. In other words, those belong in inline
  wrappers in uio.h, before calling into iov_iter.c. Same kind as we
  have for inlined part of copy_to_user().

  For the second group there is no object to look at - offset in page is
  just a number, it bears no type information. So we do them in the
  common helper called by iov_iter.c primitives of that kind. All it
  currently does is checking that we are not trying to access outside of
  the compound page; eventually we might want to add some sanity checks
  on the page involved.

  So the things we need in copyin/copyout part of iov_iter.c do not
  quite match anything in uaccess.h (we want no zeroing, we *do* want
  access_ok() and KASAN and we want no might_fault() or object size
  checks done on that level). OTOH, these needs are simple enough to
  provide a couple of helpers (static in iov_iter.c) doing just what we
  need..."

* 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  iov_iter: saner checks on copyin/copyout
  iov_iter: sanity checks for copy to/from page primitives
  iov_iter/hardening: move object size checks to inlined part
  copy_{to,from}_user(): consolidate object size checks
  copy_{from,to}_user(): move kasan checks and might_fault() out-of-line
parents da029c11 09fc68dc
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -113,6 +113,33 @@ static inline void check_object_size(const void *ptr, unsigned long n,
{ }
#endif /* CONFIG_HARDENED_USERCOPY */

extern void __compiletime_error("copy source size is too small")
__bad_copy_from(void);
extern void __compiletime_error("copy destination size is too small")
__bad_copy_to(void);

static inline void copy_overflow(int size, unsigned long count)
{
	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}

static __always_inline bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
	int sz = __compiletime_object_size(addr);
	if (unlikely(sz >= 0 && sz < bytes)) {
		if (!__builtin_constant_p(bytes))
			copy_overflow(sz, bytes);
		else if (is_source)
			__bad_copy_from();
		else
			__bad_copy_to();
		return false;
	}
	check_object_size(addr, bytes, is_source);
	return true;
}

#ifndef arch_setup_new_exec
static inline void arch_setup_new_exec(void) { }
#endif
+10 −34
Original line number Diff line number Diff line
@@ -109,8 +109,11 @@ static inline unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned long res = n;
	if (likely(access_ok(VERIFY_READ, from, n)))
	might_fault();
	if (likely(access_ok(VERIFY_READ, from, n))) {
		kasan_check_write(to, n);
		res = raw_copy_from_user(to, from, n);
	}
	if (unlikely(res))
		memset(to + (n - res), 0, res);
	return res;
@@ -124,8 +127,11 @@ _copy_from_user(void *, const void __user *, unsigned long);
static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
	if (access_ok(VERIFY_WRITE, to, n))
	might_fault();
	if (access_ok(VERIFY_WRITE, to, n)) {
		kasan_check_read(from, n);
		n = raw_copy_to_user(to, from, n);
	}
	return n;
}
#else
@@ -133,49 +139,19 @@ extern unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif

extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);

static inline void copy_user_overflow(int size, unsigned long count)
{
	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}

static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
	int sz = __compiletime_object_size(to);

	might_fault();
	kasan_check_write(to, n);

	if (likely(sz < 0 || sz >= n)) {
		check_object_size(to, n, false);
	if (likely(check_copy_size(to, n, false)))
		n = _copy_from_user(to, from, n);
	} else if (!__builtin_constant_p(n))
		copy_user_overflow(sz, n);
	else
		__bad_copy_user();

	return n;
}

static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
	int sz = __compiletime_object_size(from);

	kasan_check_read(from, n);
	might_fault();

	if (likely(sz < 0 || sz >= n)) {
		check_object_size(from, n, true);
	if (likely(check_copy_size(from, n, true)))
		n = _copy_to_user(to, from, n);
	} else if (!__builtin_constant_p(n))
		copy_user_overflow(sz, n);
	else
		__bad_copy_user();

	return n;
}
#ifdef CONFIG_COMPAT
+64 −10
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#define __LINUX_UIO_H

#include <linux/kernel.h>
#include <linux/thread_info.h>
#include <uapi/linux/uio.h>

struct page;
@@ -91,10 +92,58 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i);
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);

size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);

static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, true)))
		return bytes;
	else
		return _copy_to_iter(addr, bytes, i);
}

static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return bytes;
	else
		return _copy_from_iter(addr, bytes, i);
}

static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return false;
	else
		return _copy_from_iter_full(addr, bytes, i);
}

static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return bytes;
	else
		return _copy_from_iter_nocache(addr, bytes, i);
}

static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return false;
	else
		return _copy_from_iter_full_nocache(addr, bytes, i);
}

#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
 * Note, users like pmem that depend on the stricter semantics of
@@ -102,15 +151,20 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
 * destination is flushed from the cache on return.
 */
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#else
static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes,
				       struct iov_iter *i)
#define _copy_from_iter_flushcache _copy_from_iter_nocache
#endif

static __always_inline __must_check
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
	return copy_from_iter_nocache(addr, bytes, i);
	if (unlikely(!check_copy_size(addr, bytes, false)))
		return bytes;
	else
		return _copy_from_iter_flushcache(addr, bytes, i);
}
#endif
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);

size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
+69 −29
Original line number Diff line number Diff line
@@ -130,6 +130,24 @@
	}							\
}

static int copyout(void __user *to, const void *from, size_t n)
{
	if (access_ok(VERIFY_WRITE, to, n)) {
		kasan_check_read(from, n);
		n = raw_copy_to_user(to, from, n);
	}
	return n;
}

static int copyin(void *to, const void __user *from, size_t n)
{
	if (access_ok(VERIFY_READ, from, n)) {
		kasan_check_write(to, n);
		n = raw_copy_from_user(to, from, n);
	}
	return n;
}

static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
@@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
	if (unlikely(!bytes))
		return 0;

	might_fault();
	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
@@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
		from = kaddr + offset;

		/* first chunk, usually the only one */
		left = __copy_to_user_inatomic(buf, from, copy);
		left = copyout(buf, from, copy);
		copy -= left;
		skip += copy;
		from += copy;
@@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = __copy_to_user_inatomic(buf, from, copy);
			left = copyout(buf, from, copy);
			copy -= left;
			skip = copy;
			from += copy;
@@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b

	kaddr = kmap(page);
	from = kaddr + offset;
	left = __copy_to_user(buf, from, copy);
	left = copyout(buf, from, copy);
	copy -= left;
	skip += copy;
	from += copy;
@@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = __copy_to_user(buf, from, copy);
		left = copyout(buf, from, copy);
		copy -= left;
		skip = copy;
		from += copy;
@@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
	if (unlikely(!bytes))
		return 0;

	might_fault();
	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
@@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
		to = kaddr + offset;

		/* first chunk, usually the only one */
		left = __copy_from_user_inatomic(to, buf, copy);
		left = copyin(to, buf, copy);
		copy -= left;
		skip += copy;
		to += copy;
@@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = __copy_from_user_inatomic(to, buf, copy);
			left = copyin(to, buf, copy);
			copy -= left;
			skip = copy;
			to += copy;
@@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t

	kaddr = kmap(page);
	to = kaddr + offset;
	left = __copy_from_user(to, buf, copy);
	left = copyin(to, buf, copy);
	copy -= left;
	skip += copy;
	to += copy;
@@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = __copy_from_user(to, buf, copy);
		left = copyin(to, buf, copy);
		copy -= left;
		skip = copy;
		to += copy;
@@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
	return bytes;
}

size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
	const char *from = addr;
	if (unlikely(i->type & ITER_PIPE))
		return copy_pipe_to_iter(addr, bytes, i);
	if (iter_is_iovec(i))
		might_fault();
	iterate_and_advance(i, bytes, v,
		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
			       v.iov_len),
		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
		memcpy_to_page(v.bv_page, v.bv_offset,
			       (from += v.bv_len) - v.bv_len, v.bv_len),
		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
@@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)

	return bytes;
}
EXPORT_SYMBOL(copy_to_iter);
EXPORT_SYMBOL(_copy_to_iter);

size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
	char *to = addr;
	if (unlikely(i->type & ITER_PIPE)) {
		WARN_ON(1);
		return 0;
	}
	if (iter_is_iovec(i))
		might_fault();
	iterate_and_advance(i, bytes, v,
		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
				 v.iov_len),
		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
@@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)

	return bytes;
}
EXPORT_SYMBOL(copy_from_iter);
EXPORT_SYMBOL(_copy_from_iter);

bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
	char *to = addr;
	if (unlikely(i->type & ITER_PIPE)) {
@@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
	if (unlikely(i->count < bytes))
		return false;

	if (iter_is_iovec(i))
		might_fault();
	iterate_all_kinds(i, bytes, v, ({
		if (__copy_from_user((to += v.iov_len) - v.iov_len,
		if (copyin((to += v.iov_len) - v.iov_len,
				      v.iov_base, v.iov_len))
			return false;
		0;}),
@@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
	iov_iter_advance(i, bytes);
	return true;
}
EXPORT_SYMBOL(copy_from_iter_full);
EXPORT_SYMBOL(_copy_from_iter_full);

size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
	char *to = addr;
	if (unlikely(i->type & ITER_PIPE)) {
@@ -613,10 +637,10 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)

	return bytes;
}
EXPORT_SYMBOL(copy_from_iter_nocache);
EXPORT_SYMBOL(_copy_from_iter_nocache);

#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
	char *to = addr;
	if (unlikely(i->type & ITER_PIPE)) {
@@ -634,10 +658,10 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)

	return bytes;
}
EXPORT_SYMBOL_GPL(copy_from_iter_flushcache);
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif

bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
	char *to = addr;
	if (unlikely(i->type & ITER_PIPE)) {
@@ -659,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
	iov_iter_advance(i, bytes);
	return true;
}
EXPORT_SYMBOL(copy_from_iter_full_nocache);
EXPORT_SYMBOL(_copy_from_iter_full_nocache);

static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
	size_t v = n + offset;
	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
		return true;
	WARN_ON(1);
	return false;
}

size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	if (unlikely(!page_copy_sane(page, offset, bytes)))
		return 0;
	if (i->type & (ITER_BVEC|ITER_KVEC)) {
		void *kaddr = kmap_atomic(page);
		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
@@ -679,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	if (unlikely(!page_copy_sane(page, offset, bytes)))
		return 0;
	if (unlikely(i->type & ITER_PIPE)) {
		WARN_ON(1);
		return 0;
	}
	if (i->type & (ITER_BVEC|ITER_KVEC)) {
		void *kaddr = kmap_atomic(page);
		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
		kunmap_atomic(kaddr);
		return wanted;
	} else
@@ -722,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
	if (unlikely(i->type & ITER_PIPE))
		return pipe_zero(bytes, i);
	iterate_and_advance(i, bytes, v,
		__clear_user(v.iov_base, v.iov_len),
		clear_user(v.iov_base, v.iov_len),
		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
		memset(v.iov_base, 0, v.iov_len)
	)
@@ -735,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
		struct iov_iter *i, unsigned long offset, size_t bytes)
{
	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
	if (unlikely(!page_copy_sane(page, offset, bytes))) {
		kunmap_atomic(kaddr);
		return 0;
	}
	if (unlikely(i->type & ITER_PIPE)) {
		kunmap_atomic(kaddr);
		WARN_ON(1);
		return 0;
	}
	iterate_all_kinds(i, bytes, v,
		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
					  v.iov_base, v.iov_len),
		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+8 −2
Original line number Diff line number Diff line
@@ -6,8 +6,11 @@
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned long res = n;
	if (likely(access_ok(VERIFY_READ, from, n)))
	might_fault();
	if (likely(access_ok(VERIFY_READ, from, n))) {
		kasan_check_write(to, n);
		res = raw_copy_from_user(to, from, n);
	}
	if (unlikely(res))
		memset(to + (n - res), 0, res);
	return res;
@@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user);
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
{
	if (likely(access_ok(VERIFY_WRITE, to, n)))
	might_fault();
	if (likely(access_ok(VERIFY_WRITE, to, n))) {
		kasan_check_read(from, n);
		n = raw_copy_to_user(to, from, n);
	}
	return n;
}
EXPORT_SYMBOL(_copy_to_user);