Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 325fda71 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds
Browse files

devmem: check vmalloc address on kmem read/write



Otherwise vmalloc_to_page() will BUG().

This also makes the kmem read/write implementation aligned with mem(4):
"References to nonexistent locations cause errors to be returned." Here we
return -ENXIO (inspired by Hugh) if no bytes have been transfered to/from
user space, otherwise return partial read/write results.

Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: <stable@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 931e80e4
Loading
Loading
Loading
Loading
+18 −10
Original line number Diff line number Diff line
@@ -395,6 +395,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
	unsigned long p = *ppos;
	ssize_t low_count, read, sz;
	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
	int err = 0;

	read = 0;
	if (p < (unsigned long) high_memory) {
@@ -441,12 +442,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
			return -ENOMEM;
		while (count > 0) {
			sz = size_inside_page(p, count);
			if (!is_vmalloc_or_module_addr((void *)p)) {
				err = -ENXIO;
				break;
			}
			sz = vread(kbuf, (char *)p, sz);
			if (!sz)
				break;
			if (copy_to_user(buf, kbuf, sz)) {
				free_page((unsigned long)kbuf);
				return -EFAULT;
				err = -EFAULT;
				break;
			}
			count -= sz;
			buf += sz;
@@ -456,7 +461,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
		free_page((unsigned long)kbuf);
	}
	*ppos = p;
 	return read;
	return read ? read : err;
}


@@ -520,6 +525,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
	ssize_t wrote = 0;
	ssize_t virtr = 0;
	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
	int err = 0;

	if (p < (unsigned long) high_memory) {
		unsigned long to_write = min_t(unsigned long, count,
@@ -540,12 +546,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
			unsigned long sz = size_inside_page(p, count);
			unsigned long n;

			if (!is_vmalloc_or_module_addr((void *)p)) {
				err = -ENXIO;
				break;
			}
			n = copy_from_user(kbuf, buf, sz);
			if (n) {
				if (wrote + virtr)
				err = -EFAULT;
				break;
				free_page((unsigned long)kbuf);
				return -EFAULT;
			}
			sz = vwrite(kbuf, (char *)p, sz);
			count -= sz;
@@ -557,7 +565,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
	}

	*ppos = p;
 	return virtr + wrote;
	return virtr + wrote ? : err;
}
#endif