Loading arch/mips/include/asm/uaccess.h +263 −1 Original line number Original line Diff line number Diff line Loading @@ -13,7 +13,6 @@ #include <linux/kernel.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/errno.h> #include <linux/thread_info.h> #include <linux/thread_info.h> #include <asm-generic/uaccess.h> /* /* * The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be Loading Loading @@ -373,6 +372,269 @@ do { \ extern void __put_user_unknown(void); extern void __put_user_unknown(void); /* * put_user_unaligned: - Write a simple value into user space. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Returns zero on success, or -EFAULT on error. */ #define put_user_unaligned(x,ptr) \ __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) /* * get_user_unaligned: - Get a simple variable from user space. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define get_user_unaligned(x,ptr) \ __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) /* * __put_user_unaligned: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. */ #define __put_user_unaligned(x,ptr) \ __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) /* * __get_user_unaligned: - Get a simple variable from user space, with less checking. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define __get_user_unaligned(x,ptr) \ __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */ #ifdef CONFIG_32BIT #define __GET_USER_UNALIGNED_DW(val, ptr) \ __get_user_unaligned_asm_ll32(val, ptr) #endif #ifdef CONFIG_64BIT #define __GET_USER_UNALIGNED_DW(val, ptr) \ __get_user_unaligned_asm(val, "uld", ptr) #endif extern void __get_user_unaligned_unknown(void); #define __get_user_unaligned_common(val, size, ptr) \ do { \ switch (size) { \ case 1: __get_user_asm(val, "lb", ptr); break; \ case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ default: __get_user_unaligned_unknown(); break; \ } \ } while (0) #define __get_user_unaligned_nocheck(x,ptr,size) \ ({ \ int __gu_err; \ \ __get_user_unaligned_common((x), size, ptr); \ __gu_err; \ }) #define __get_user_unaligned_check(x,ptr,size) \ ({ \ int __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ __get_user_unaligned_common((x), size, __gu_ptr); \ \ __gu_err; \ }) #define __get_user_unaligned_asm(val, insn, addr) \ { \ long __gu_tmp; \ \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " "__UA_ADDR "\t1b, 3b \n" \ " "__UA_ADDR "\t1b + 4, 3b \n" \ " .previous \n" \ : "=r" (__gu_err), "=r" (__gu_tmp) \ : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ \ (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* * Get a long long 64 using 32 bit registers. */ #define __get_user_unaligned_asm_ll32(val, addr) \ { \ unsigned long long __gu_tmp; \ \ __asm__ __volatile__( \ "1: ulw %1, (%3) \n" \ "2: ulw %D1, 4(%3) \n" \ " move %0, $0 \n" \ "3: .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \ " move %D1, $0 \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " " __UA_ADDR " 1b, 4b \n" \ " " __UA_ADDR " 1b + 4, 4b \n" \ " " __UA_ADDR " 2b, 4b \n" \ " " __UA_ADDR " 2b + 4, 4b \n" \ " .previous \n" \ : "=r" (__gu_err), "=&r" (__gu_tmp) \ : "0" (0), "r" (addr), "i" (-EFAULT)); \ (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */ #ifdef CONFIG_32BIT #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) #endif #ifdef CONFIG_64BIT #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) #endif #define __put_user_unaligned_nocheck(x,ptr,size) \ ({ \ __typeof__(*(ptr)) __pu_val; \ int __pu_err = 0; \ \ __pu_val = (x); \ switch (size) { \ case 1: __put_user_asm("sb", ptr); break; \ case 2: __put_user_unaligned_asm("ush", ptr); break; \ case 4: __put_user_unaligned_asm("usw", ptr); break; \ case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ default: __put_user_unaligned_unknown(); break; \ } \ __pu_err; \ }) #define __put_user_unaligned_check(x,ptr,size) \ ({ \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ int __pu_err = -EFAULT; \ \ if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ switch (size) { \ case 1: __put_user_asm("sb", __pu_addr); break; \ case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \ case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \ case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \ default: __put_user_unaligned_unknown(); break; \ } \ } \ __pu_err; \ }) #define __put_user_unaligned_asm(insn, ptr) \ { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " " __UA_ADDR " 1b, 3b \n" \ " .previous \n" \ : "=r" (__pu_err) \ : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ "i" (-EFAULT)); \ } #define __put_user_unaligned_asm_ll32(ptr) \ { \ __asm__ __volatile__( \ "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ "2: sw %D2, 4(%3) \n" \ "3: \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " " __UA_ADDR " 1b, 4b \n" \ " " __UA_ADDR " 1b + 4, 4b \n" \ " " __UA_ADDR " 2b, 4b \n" \ " " __UA_ADDR " 2b + 4, 4b \n" \ " .previous" \ : "=r" (__pu_err) \ : "0" (0), "r" (__pu_val), "r" (ptr), \ "i" (-EFAULT)); \ } extern void __put_user_unaligned_unknown(void); /* /* * We're generating jump to subroutines which will be outside the range of * We're generating jump to subroutines which will be outside the range of * jump instructions * jump instructions Loading Loading
arch/mips/include/asm/uaccess.h +263 −1 Original line number Original line Diff line number Diff line Loading @@ -13,7 +13,6 @@ #include <linux/kernel.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/errno.h> #include <linux/thread_info.h> #include <linux/thread_info.h> #include <asm-generic/uaccess.h> /* /* * The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be Loading Loading @@ -373,6 +372,269 @@ do { \ extern void __put_user_unknown(void); extern void __put_user_unknown(void); /* * put_user_unaligned: - Write a simple value into user space. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Returns zero on success, or -EFAULT on error. */ #define put_user_unaligned(x,ptr) \ __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) /* * get_user_unaligned: - Get a simple variable from user space. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define get_user_unaligned(x,ptr) \ __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) /* * __put_user_unaligned: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. */ #define __put_user_unaligned(x,ptr) \ __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) /* * __get_user_unaligned: - Get a simple variable from user space, with less checking. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define __get_user_unaligned(x,ptr) \ __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */ #ifdef CONFIG_32BIT #define __GET_USER_UNALIGNED_DW(val, ptr) \ __get_user_unaligned_asm_ll32(val, ptr) #endif #ifdef CONFIG_64BIT #define __GET_USER_UNALIGNED_DW(val, ptr) \ __get_user_unaligned_asm(val, "uld", ptr) #endif extern void __get_user_unaligned_unknown(void); #define __get_user_unaligned_common(val, size, ptr) \ do { \ switch (size) { \ case 1: __get_user_asm(val, "lb", ptr); break; \ case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ default: __get_user_unaligned_unknown(); break; \ } \ } while (0) #define __get_user_unaligned_nocheck(x,ptr,size) \ ({ \ int __gu_err; \ \ __get_user_unaligned_common((x), size, ptr); \ __gu_err; \ }) #define __get_user_unaligned_check(x,ptr,size) \ ({ \ int __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ __get_user_unaligned_common((x), size, __gu_ptr); \ \ __gu_err; \ }) #define __get_user_unaligned_asm(val, insn, addr) \ { \ long __gu_tmp; \ \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " "__UA_ADDR "\t1b, 3b \n" \ " "__UA_ADDR "\t1b + 4, 3b \n" \ " .previous \n" \ : "=r" (__gu_err), "=r" (__gu_tmp) \ : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ \ (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* * Get a long long 64 using 32 bit registers. */ #define __get_user_unaligned_asm_ll32(val, addr) \ { \ unsigned long long __gu_tmp; \ \ __asm__ __volatile__( \ "1: ulw %1, (%3) \n" \ "2: ulw %D1, 4(%3) \n" \ " move %0, $0 \n" \ "3: .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \ " move %D1, $0 \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " " __UA_ADDR " 1b, 4b \n" \ " " __UA_ADDR " 1b + 4, 4b \n" \ " " __UA_ADDR " 2b, 4b \n" \ " " __UA_ADDR " 2b + 4, 4b \n" \ " .previous \n" \ : "=r" (__gu_err), "=&r" (__gu_tmp) \ : "0" (0), "r" (addr), "i" (-EFAULT)); \ (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */ #ifdef CONFIG_32BIT #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) #endif #ifdef CONFIG_64BIT #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) #endif #define __put_user_unaligned_nocheck(x,ptr,size) \ ({ \ __typeof__(*(ptr)) __pu_val; \ int __pu_err = 0; \ \ __pu_val = (x); \ switch (size) { \ case 1: __put_user_asm("sb", ptr); break; \ case 2: __put_user_unaligned_asm("ush", ptr); break; \ case 4: __put_user_unaligned_asm("usw", ptr); break; \ case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ default: __put_user_unaligned_unknown(); break; \ } \ __pu_err; \ }) #define __put_user_unaligned_check(x,ptr,size) \ ({ \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ int __pu_err = -EFAULT; \ \ if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ switch (size) { \ case 1: __put_user_asm("sb", __pu_addr); break; \ case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \ case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \ case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \ default: __put_user_unaligned_unknown(); break; \ } \ } \ __pu_err; \ }) #define __put_user_unaligned_asm(insn, ptr) \ { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " " __UA_ADDR " 1b, 3b \n" \ " .previous \n" \ : "=r" (__pu_err) \ : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ "i" (-EFAULT)); \ } #define __put_user_unaligned_asm_ll32(ptr) \ { \ __asm__ __volatile__( \ "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ "2: sw %D2, 4(%3) \n" \ "3: \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " " __UA_ADDR " 1b, 4b \n" \ " " __UA_ADDR " 1b + 4, 4b \n" \ " " __UA_ADDR " 2b, 4b \n" \ " " __UA_ADDR " 2b + 4, 4b \n" \ " .previous" \ : "=r" (__pu_err) \ : "0" (0), "r" (__pu_val), "r" (ptr), \ "i" (-EFAULT)); \ } extern void __put_user_unaligned_unknown(void); /* /* * We're generating jump to subroutines which will be outside the range of * We're generating jump to subroutines which will be outside the range of * jump instructions * jump instructions Loading