Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0aea86a2 authored by Catalin Marinas's avatar Catalin Marinas
Browse files

arm64: User access library functions



This patch add support for various user access functions. These
functions use the standard LDR/STR instructions and not the LDRT/STRT
variants in order to allow kernel addresses (after set_fs(KERNEL_DS)).

Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarTony Lindgren <tony@atomide.com>
Acked-by: default avatarNicolas Pitre <nico@linaro.org>
Acked-by: default avatarOlof Johansson <olof@lixom.net>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 2c020ed8
Loading
Loading
Loading
Loading
+297 −0
Original line number Diff line number Diff line
/*
 * Based on arch/arm/include/asm/uaccess.h
 *
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H

/*
 * User space memory access functions
 */
#include <linux/string.h>
#include <linux/thread_info.h>

#include <asm/ptrace.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>

#define VERIFY_READ 0
#define VERIFY_WRITE 1

/*
 * The exception table consists of pairs of addresses: the first is the
 * address of an instruction that is allowed to fault, and the second is
 * the address at which the program should continue.  No registers are
 * modified, so it is entirely up to the continuation code to figure out
 * what to do.
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 */

struct exception_table_entry
{
	unsigned long insn, fixup;
};

extern int fixup_exception(struct pt_regs *regs);

#define KERNEL_DS	(-1UL)
#define get_ds()	(KERNEL_DS)

#define USER_DS		TASK_SIZE_64
#define get_fs()	(current_thread_info()->addr_limit)

static inline void set_fs(mm_segment_t fs)
{
	current_thread_info()->addr_limit = fs;
}

#define segment_eq(a,b)	((a) == (b))

/*
 * Return 1 if addr < current->addr_limit, 0 otherwise.
 */
#define __addr_ok(addr)							\
({									\
	unsigned long flag;						\
	asm("cmp %1, %0; cset %0, lo"					\
		: "=&r" (flag)						\
		: "r" (addr), "0" (current_thread_info()->addr_limit)	\
		: "cc");						\
	flag;								\
})

/*
 * Test whether a block of memory is a valid user space address.
 * Returns 1 if the range is valid, 0 otherwise.
 *
 * This is equivalent to the following test:
 * (u65)addr + (u65)size < (u65)current->addr_limit
 *
 * This needs 65-bit arithmetic.
 */
#define __range_ok(addr, size)						\
({									\
	unsigned long flag, roksum;					\
	__chk_user_ptr(addr);						\
	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc"		\
		: "=&r" (flag), "=&r" (roksum)				\
		: "1" (addr), "Ir" (size),				\
		  "r" (current_thread_info()->addr_limit)		\
		: "cc");						\
	flag;								\
})

#define access_ok(type, addr, size)	__range_ok(addr, size)

/*
 * The "__xxx" versions of the user access functions do not verify the address
 * space - it must have been done previously with a separate "access_ok()"
 * call.
 *
 * The "__xxx_error" versions set the third argument to -EFAULT if an error
 * occurs, and leave it unchanged on success.
 */
#define __get_user_asm(instr, reg, x, addr, err)			\
	asm volatile(							\
	"1:	" instr "	" reg "1, [%2]\n"			\
	"2:\n"								\
	"	.section .fixup, \"ax\"\n"				\
	"	.align	2\n"						\
	"3:	mov	%w0, %3\n"					\
	"	mov	%1, #0\n"					\
	"	b	2b\n"						\
	"	.previous\n"						\
	"	.section __ex_table,\"a\"\n"				\
	"	.align	3\n"						\
	"	.quad	1b, 3b\n"					\
	"	.previous"						\
	: "+r" (err), "=&r" (x)						\
	: "r" (addr), "i" (-EFAULT))

#define __get_user_err(x, ptr, err)					\
do {									\
	unsigned long __gu_val;						\
	__chk_user_ptr(ptr);						\
	switch (sizeof(*(ptr))) {					\
	case 1:								\
		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
		break;							\
	case 2:								\
		__get_user_asm("ldrh", "%w", __gu_val, (ptr), (err));	\
		break;							\
	case 4:								\
		__get_user_asm("ldr", "%w", __gu_val, (ptr), (err));	\
		break;							\
	case 8:								\
		__get_user_asm("ldr", "%",  __gu_val, (ptr), (err));	\
		break;							\
	default:							\
		BUILD_BUG();						\
	}								\
	(x) = (__typeof__(*(ptr)))__gu_val;				\
} while (0)

#define __get_user(x, ptr)						\
({									\
	int __gu_err = 0;						\
	__get_user_err((x), (ptr), __gu_err);				\
	__gu_err;							\
})

#define __get_user_error(x, ptr, err)					\
({									\
	__get_user_err((x), (ptr), (err));				\
	(void)0;							\
})

#define __get_user_unaligned __get_user

#define get_user(x, ptr)						\
({									\
	might_sleep();							\
	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ?			\
		__get_user((x), (ptr)) :				\
		((x) = 0, -EFAULT);					\
})

#define __put_user_asm(instr, reg, x, addr, err)			\
	asm volatile(							\
	"1:	" instr "	" reg "1, [%2]\n"			\
	"2:\n"								\
	"	.section .fixup,\"ax\"\n"				\
	"	.align	2\n"						\
	"3:	mov	%w0, %3\n"					\
	"	b	2b\n"						\
	"	.previous\n"						\
	"	.section __ex_table,\"a\"\n"				\
	"	.align	3\n"						\
	"	.quad	1b, 3b\n"					\
	"	.previous"						\
	: "+r" (err)							\
	: "r" (x), "r" (addr), "i" (-EFAULT))

#define __put_user_err(x, ptr, err)					\
do {									\
	__typeof__(*(ptr)) __pu_val = (x);				\
	__chk_user_ptr(ptr);						\
	switch (sizeof(*(ptr))) {					\
	case 1:								\
		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
		break;							\
	case 2:								\
		__put_user_asm("strh", "%w", __pu_val, (ptr), (err));	\
		break;							\
	case 4:								\
		__put_user_asm("str",  "%w", __pu_val, (ptr), (err));	\
		break;							\
	case 8:								\
		__put_user_asm("str",  "%", __pu_val, (ptr), (err));	\
		break;							\
	default:							\
		BUILD_BUG();						\
	}								\
} while (0)

#define __put_user(x, ptr)						\
({									\
	int __pu_err = 0;						\
	__put_user_err((x), (ptr), __pu_err);				\
	__pu_err;							\
})

#define __put_user_error(x, ptr, err)					\
({									\
	__put_user_err((x), (ptr), (err));				\
	(void)0;							\
})

#define __put_user_unaligned __put_user

#define put_user(x, ptr)						\
({									\
	might_sleep();							\
	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?		\
		__put_user((x), (ptr)) :				\
		-EFAULT;						\
})

extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);

extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
extern unsigned long __must_check __strnlen_user(const char __user *s, long n);

static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
	if (access_ok(VERIFY_READ, from, n))
		n = __copy_from_user(to, from, n);
	else /* security hole - plug it */
		memset(to, 0, n);
	return n;
}

static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
	if (access_ok(VERIFY_WRITE, to, n))
		n = __copy_to_user(to, from, n);
	return n;
}

static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
		n = __copy_in_user(to, from, n);
	return n;
}

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
	if (access_ok(VERIFY_WRITE, to, n))
		n = __clear_user(to, n);
	return n;
}

static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
{
	long res = -EFAULT;
	if (access_ok(VERIFY_READ, src, 1))
		res = __strncpy_from_user(dst, src, count);
	return res;
}

#define strlen_user(s)	strnlen_user(s, ~0UL >> 1)

static inline long __must_check strnlen_user(const char __user *s, long n)
{
	unsigned long res = 0;

	if (__addr_ok(s))
		res = __strnlen_user(s, n);

	return res;
}

#endif /* __ASM_UACCESS_H */
+58 −0
Original line number Diff line number Diff line
/*
 * Based on arch/arm/lib/clear_user.S
 *
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include <linux/linkage.h>
#include <asm/assembler.h>

	.text

/* Prototype: int __clear_user(void *addr, size_t sz)
 * Purpose  : clear some user memory
 * Params   : addr - user memory address to clear
 *          : sz   - number of bytes to clear
 * Returns  : number of bytes NOT cleared
 *
 * Alignment fixed up by hardware.
 */
ENTRY(__clear_user)
	mov	x2, x1			// save the size for fixup return
	subs	x1, x1, #8
	b.mi	2f
1:
USER(9f, str	xzr, [x0], #8	)
	subs	x1, x1, #8
	b.pl	1b
2:	adds	x1, x1, #4
	b.mi	3f
USER(9f, str	wzr, [x0], #4	)
	sub	x1, x1, #4
3:	adds	x1, x1, #2
	b.mi	4f
USER(9f, strh	wzr, [x0], #2	)
	sub	x1, x1, #2
4:	adds	x1, x1, #1
	b.mi	5f
	strb	wzr, [x0]
5:	mov	x0, #0
	ret
ENDPROC(__clear_user)

	.section .fixup,"ax"
	.align	2
9:	mov	x0, x2			// return the original size
	ret
	.previous
+66 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/linkage.h>
#include <asm/assembler.h>

/*
 * Copy from user space to a kernel buffer (alignment handled by the hardware)
 *
 * Parameters:
 *	x0 - to
 *	x1 - from
 *	x2 - n
 * Returns:
 *	x0 - bytes not copied
 */
ENTRY(__copy_from_user)
	add	x4, x1, x2			// upper user buffer boundary
	subs	x2, x2, #8
	b.mi	2f
1:
USER(9f, ldr	x3, [x1], #8	)
	subs	x2, x2, #8
	str	x3, [x0], #8
	b.pl	1b
2:	adds	x2, x2, #4
	b.mi	3f
USER(9f, ldr	w3, [x1], #4	)
	sub	x2, x2, #4
	str	w3, [x0], #4
3:	adds	x2, x2, #2
	b.mi	4f
USER(9f, ldrh	w3, [x1], #2	)
	sub	x2, x2, #2
	strh	w3, [x0], #2
4:	adds	x2, x2, #1
	b.mi	5f
USER(9f, ldrb	w3, [x1]	)
	strb	w3, [x0]
5:	mov	x0, #0
	ret
ENDPROC(__copy_from_user)

	.section .fixup,"ax"
	.align	2
9:	sub	x2, x4, x1
	mov	x3, x2
10:	strb	wzr, [x0], #1			// zero remaining buffer space
	subs	x3, x3, #1
	b.ne	10b
	mov	x0, x2				// bytes not copied
	ret
	.previous
+63 −0
Original line number Diff line number Diff line
/*
 * Copy from user space to user space
 *
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/linkage.h>
#include <asm/assembler.h>

/*
 * Copy from user space to user space (alignment handled by the hardware)
 *
 * Parameters:
 *	x0 - to
 *	x1 - from
 *	x2 - n
 * Returns:
 *	x0 - bytes not copied
 */
ENTRY(__copy_in_user)
	add	x4, x0, x2			// upper user buffer boundary
	subs	x2, x2, #8
	b.mi	2f
1:
USER(9f, ldr	x3, [x1], #8	)
	subs	x2, x2, #8
USER(9f, str	x3, [x0], #8	)
	b.pl	1b
2:	adds	x2, x2, #4
	b.mi	3f
USER(9f, ldr	w3, [x1], #4	)
	sub	x2, x2, #4
USER(9f, str	w3, [x0], #4	)
3:	adds	x2, x2, #2
	b.mi	4f
USER(9f, ldrh	w3, [x1], #2	)
	sub	x2, x2, #2
USER(9f, strh	w3, [x0], #2	)
4:	adds	x2, x2, #1
	b.mi	5f
USER(9f, ldrb	w3, [x1]	)
USER(9f, strb	w3, [x0]	)
5:	mov	x0, #0
	ret
ENDPROC(__copy_in_user)

	.section .fixup,"ax"
	.align	2
9:	sub	x0, x4, x0			// bytes not copied
	ret
	.previous
+61 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/linkage.h>
#include <asm/assembler.h>

/*
 * Copy to user space from a kernel buffer (alignment handled by the hardware)
 *
 * Parameters:
 *	x0 - to
 *	x1 - from
 *	x2 - n
 * Returns:
 *	x0 - bytes not copied
 */
ENTRY(__copy_to_user)
	add	x4, x0, x2			// upper user buffer boundary
	subs	x2, x2, #8
	b.mi	2f
1:
	ldr	x3, [x1], #8
	subs	x2, x2, #8
USER(9f, str	x3, [x0], #8	)
	b.pl	1b
2:	adds	x2, x2, #4
	b.mi	3f
	ldr	w3, [x1], #4
	sub	x2, x2, #4
USER(9f, str	w3, [x0], #4	)
3:	adds	x2, x2, #2
	b.mi	4f
	ldrh	w3, [x1], #2
	sub	x2, x2, #2
USER(9f, strh	w3, [x0], #2	)
4:	adds	x2, x2, #1
	b.mi	5f
	ldrb	w3, [x1]
USER(9f, strb	w3, [x0]	)
5:	mov	x0, #0
	ret
ENDPROC(__copy_to_user)

	.section .fixup,"ax"
	.align	2
9:	sub	x0, x4, x0			// bytes not copied
	ret
	.previous
Loading