Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3255aa2e authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86, mm: pass in 'total' to __copy_from_user_*nocache()



Impact: cleanup, enable future change

Add a 'total bytes copied' parameter to __copy_from_user_*nocache(),
and update all the callsites.

The parameter is not used yet - architecture code can use it to
more intelligently decide whether the copy should be cached or
non-temporal.

Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 95f66b37
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
}

static __always_inline unsigned long __copy_from_user_nocache(void *to,
				const void __user *from, unsigned long n)
		const void __user *from, unsigned long n, unsigned long total)
{
	might_fault();
	if (__builtin_constant_p(n)) {
@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,

static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
				  unsigned long n)
				  unsigned long n, unsigned long total)
{
       return __copy_from_user_ll_nocache_nozero(to, from, n);
}
+2 −3
Original line number Diff line number Diff line
@@ -189,7 +189,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);

static inline int __copy_from_user_nocache(void *dst, const void __user *src,
					   unsigned size)
				   unsigned size, unsigned long total)
{
	might_sleep();
	/*
@@ -205,8 +205,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
}

static inline int __copy_from_user_inatomic_nocache(void *dst,
						    const void __user *src,
						    unsigned size)
	    const void __user *src, unsigned size, unsigned total)
{
	if (likely(size >= PAGE_SIZE))
		return __copy_user_nocache(dst, src, size, 0);
+1 −1
Original line number Diff line number Diff line
@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping,

	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
						      user_data, length);
						      user_data, length, length);
	io_mapping_unmap_atomic(vaddr_atomic);
	if (unwritten)
		return -EFAULT;
+2 −2
Original line number Diff line number Diff line
@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS

static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
				const void __user *from, unsigned long n)
		const void __user *from, unsigned long n, unsigned long total)
{
	return __copy_from_user_inatomic(to, from, n);
}

static inline unsigned long __copy_from_user_nocache(void *to,
				const void __user *from, unsigned long n)
		const void __user *from, unsigned long n, unsigned long total)
{
	return __copy_from_user(to, from, n);
}
+6 −4
Original line number Diff line number Diff line
@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
			const struct iovec *iov, size_t base, size_t bytes)
{
	size_t copied = 0, left = 0;
	size_t copied = 0, left = 0, total = bytes;

	while (bytes) {
		char __user *buf = iov->iov_base + base;
		int copy = min(bytes, iov->iov_len - base);

		base = 0;
		left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
		left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
		copied += copy;
		bytes -= copy;
		vaddr += copy;
@@ -1851,8 +1851,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
	if (likely(i->nr_segs == 1)) {
		int left;
		char __user *buf = i->iov->iov_base + i->iov_offset;

		left = __copy_from_user_inatomic_nocache(kaddr + offset,
							buf, bytes);
							buf, bytes, bytes);
		copied = bytes - left;
	} else {
		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1881,8 @@ size_t iov_iter_copy_from_user(struct page *page,
	if (likely(i->nr_segs == 1)) {
		int left;
		char __user *buf = i->iov->iov_base + i->iov_offset;
		left = __copy_from_user_nocache(kaddr + offset, buf, bytes);

		left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
		copied = bytes - left;
	} else {
		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
Loading