Loading Makefile +1 −1 Original line number Diff line number Diff line VERSION = 4 PATCHLEVEL = 4 SUBLEVEL = 59 SUBLEVEL = 63 EXTRAVERSION = NAME = Blurry Fish Butt Loading arch/arm/kvm/mmu.c +10 −3 Original line number Diff line number Diff line Loading @@ -803,6 +803,7 @@ void stage2_unmap_vm(struct kvm *kvm) int idx; idx = srcu_read_lock(&kvm->srcu); down_read(¤t->mm->mmap_sem); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); Loading @@ -810,6 +811,7 @@ void stage2_unmap_vm(struct kvm *kvm) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); up_read(¤t->mm->mmap_sem); srcu_read_unlock(&kvm->srcu, idx); } Loading Loading @@ -1771,6 +1773,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (KVM_PHYS_SIZE >> PAGE_SHIFT)) return -EFAULT; down_read(¤t->mm->mmap_sem); /* * A memory region could potentially cover multiple VMAs, and any holes * between them, so iterate over all of them to find out if we can map Loading Loading @@ -1814,8 +1817,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) return -EINVAL; if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { ret = -EINVAL; goto out; } ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, Loading @@ -1827,7 +1832,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } while (hva < reg_end); if (change == KVM_MR_FLAGS_ONLY) return ret; goto out; spin_lock(&kvm->mmu_lock); if (ret) Loading @@ -1835,6 +1840,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, else stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); out: up_read(¤t->mm->mmap_sem); return ret; } Loading arch/metag/include/asm/uaccess.h +8 −7 Original line number Diff line number Diff line Loading @@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) extern unsigned long __must_check __copy_user_zeroing(void *to, const void __user *from, extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) return __copy_user_zeroing(to, from, n); memset(to, 0, n); return n; res = raw_copy_from_user(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } #define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) #define __copy_from_user_inatomic __copy_from_user extern unsigned long __must_check __copy_user(void __user *to, Loading arch/metag/lib/usercopy.c +120 −192 Original line number Diff line number Diff line Loading @@ -29,7 +29,6 @@ COPY \ "1:\n" \ " .section .fixup,\"ax\"\n" \ " MOV D1Ar1,#0\n" \ FIXUP \ " MOVT D1Ar1,#HI(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \ Loading Loading @@ -260,27 +259,31 @@ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #32\n" \ "23:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "SUB %3, %3, #32\n" \ "24:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "25:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "26:\n" \ "SUB %3, %3, #32\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ "25:\n" \ "27:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "26:\n" \ "28:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "29:\n" \ "SUB %3, %3, #32\n" \ "27:\n" \ "30:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "31:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "32:\n" \ "SUB %0, %0, #8\n" \ "29:\n" \ "33:\n" \ "SETL [%0++], D0.7, D1.7\n" \ "SUB %3, %3, #32\n" \ "1:" \ Loading Loading @@ -312,11 +315,15 @@ " .long 26b,3b\n" \ " .long 27b,3b\n" \ " .long 28b,3b\n" \ " .long 29b,4b\n" \ " .long 29b,3b\n" \ " .long 30b,3b\n" \ " .long 31b,3b\n" \ " .long 32b,3b\n" \ " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ : "D1Ar1", "D0Ar2", "memory") : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * Loading @@ -342,7 +349,7 @@ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ "AND D0Ar2, D0Ar2, #0x7\n" \ "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ Loading Loading @@ -403,47 +410,55 @@ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ "23:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "24:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ "25:\n" \ "24:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "26:\n" \ "25:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "26:\n" \ "SUB %3, %3, #16\n" \ "27:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "29:\n" \ "SUB %3, %3, #16\n" \ "30:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "31:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "32:\n" \ "SUB %3, %3, #16\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ "29:\n" \ "33:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "30:\n" \ "34:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "35:\n" \ "SUB %3, %3, #16\n" \ "31:\n" \ "36:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "32:\n" \ "37:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "38:\n" \ "SUB %3, %3, #16\n" \ "33:\n" \ "39:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "34:\n" \ "40:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "41:\n" \ "SUB %3, %3, #16\n" \ "35:\n" \ "42:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "36:\n" \ "43:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "44:\n" \ "SUB %0, %0, #4\n" \ "37:\n" \ "45:\n" \ "SETD [%0++], D0.7\n" \ "SUB %3, %3, #16\n" \ "1:" \ Loading Loading @@ -483,11 +498,19 @@ " .long 34b,3b\n" \ " .long 35b,3b\n" \ " .long 36b,3b\n" \ " .long 37b,4b\n" \ " .long 37b,3b\n" \ " .long 38b,3b\n" \ " .long 39b,3b\n" \ " .long 40b,3b\n" \ " .long 41b,3b\n" \ " .long 42b,3b\n" \ " .long 43b,3b\n" \ " .long 44b,3b\n" \ " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ : "D1Ar1", "D0Ar2", "memory") : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * Loading @@ -513,7 +536,7 @@ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ "AND D0Ar2, D0Ar2, #0x7\n" \ "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ Loading @@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, if ((unsigned long) src & 1) { __asm_copy_to_user_1(dst, src, retn); n--; if (retn) return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ while (n > 0) { __asm_copy_to_user_1(dst, src, retn); n--; if (retn) return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; if (retn) return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ while (n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; if (retn) return retn + n; } } Loading @@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; if (retn) return retn + n; } } if (n >= RAPF_MIN_BUF_SIZE) { Loading @@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; if (retn) return retn + n; } } #endif Loading @@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 16) { __asm_copy_to_user_16(dst, src, retn); n -= 16; if (retn) return retn + n; } while (n >= 4) { __asm_copy_to_user_4(dst, src, retn); n -= 4; if (retn) return retn + n; } switch (n) { Loading @@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, break; } /* * If we get here, retn correctly reflects the number of failing * bytes. */ return retn; } EXPORT_SYMBOL(__copy_user); Loading @@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_user_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "2: SETB [%0++],D1Ar1\n", \ "3: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ "3: ADD %2,%2,#1\n", \ " .long 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "2: SETW [%0++],D1Ar1\n" COPY, \ "3: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ "3: ADD %2,%2,#2\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ Loading @@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_from_user_2x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ "5: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ "5: ADD %2,%2,#1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "2: SETD [%0++],D1Ar1\n" COPY, \ "3: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ "3: ADD %2,%2,#4\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_5(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ "5: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_4x_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "4: SETW [%0++],D1Ar1\n" COPY, \ "5: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ " .long 4b,5b\n" TENTRY) #define __asm_copy_from_user_6(to, from, ret) \ __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_7(to, from, ret) \ __asm_copy_from_user_6x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "6: SETB [%0++],D1Ar1\n", \ "7: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 6b,7b\n") #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_4x_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "4: SETD [%0++],D1Ar1\n" COPY, \ "5: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ " .long 4b,5b\n" TENTRY) #define __asm_copy_from_user_8(to, from, ret) \ __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_9(to, from, ret) \ __asm_copy_from_user_8x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "6: SETB [%0++],D1Ar1\n", \ "7: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 6b,7b\n") #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_8x_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "6: SETW [%0++],D1Ar1\n" COPY, \ "7: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ " .long 6b,7b\n" TENTRY) #define __asm_copy_from_user_10(to, from, ret) \ __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_11(to, from, ret) \ __asm_copy_from_user_10x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "8: SETB [%0++],D1Ar1\n", \ "9: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 8b,9b\n") #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_8x_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "6: SETD [%0++],D1Ar1\n" COPY, \ "7: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ " .long 6b,7b\n" TENTRY) #define __asm_copy_from_user_12(to, from, ret) \ __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_13(to, from, ret) \ __asm_copy_from_user_12x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "8: SETB [%0++],D1Ar1\n", \ "9: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 8b,9b\n") #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_12x_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "8: SETW [%0++],D1Ar1\n" COPY, \ "9: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ " .long 8b,9b\n" TENTRY) #define __asm_copy_from_user_14(to, from, ret) \ __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_15(to, from, ret) \ __asm_copy_from_user_14x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "10: SETB [%0++],D1Ar1\n", \ "11: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 10b,11b\n") #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_12x_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "8: SETD [%0++],D1Ar1\n" COPY, \ "9: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ " .long 8b,9b\n" TENTRY) #define __asm_copy_from_user_16(to, from, ret) \ __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ " GETL D0Ar2,D1Ar1,[%1++]\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \ "1:\n" \ " .section .fixup,\"ax\"\n" \ " MOV D1Ar1,#0\n" \ " MOV D0Ar2,#0\n" \ "3: ADD %2,%2,#8\n" \ " SETL [%0++],D0Ar2,D1Ar1\n" \ " MOVT D0Ar2,#HI(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \ " .previous\n" \ Loading @@ -789,35 +711,56 @@ EXPORT_SYMBOL(__copy_user); * * Rationale: * A fault occurs while reading from user buffer, which is the * source. Since the fault is at a single address, we only * need to rewind by 8 bytes. * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be * corrected. * corrected, but the source must be rewound to the beginning of * the block, which is LSM_STEP*8 bytes. * LSM_STEP is bits 10:8 in TXSTATUS which is already read * and stored in D0Ar2 * * NOTE: If a fault occurs at the last operation in M{G,S}ETL * LSM_STEP will be 0. ie: we do 4 writes in our case, if * a fault happens at the 4th write, LSM_STEP will be 0 * instead of 4. The code copes with that. */ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "SUB %1, %1, #8\n") "LSR D0Ar2, D0Ar2, #5\n" \ "ANDS D0Ar2, D0Ar2, #0x38\n" \ "ADDZ D0Ar2, D0Ar2, #32\n" \ "SUB %1, %1, D0Ar2\n") /* rewind 'from' pointer when a fault occurs * * Rationale: * A fault occurs while reading from user buffer, which is the * source. Since the fault is at a single address, we only * need to rewind by 4 bytes. * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be * corrected. * corrected, but the source must be rewound to the beginning of * the block, which is LSM_STEP*4 bytes. * LSM_STEP is bits 10:8 in TXSTATUS which is already read * and stored in D0Ar2 * * NOTE: If a fault occurs at the last operation in M{G,S}ETL * LSM_STEP will be 0. ie: we do 4 writes in our case, if * a fault happens at the 4th write, LSM_STEP will be 0 * instead of 4. The code copes with that. */ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "SUB %1, %1, #4\n") "LSR D0Ar2, D0Ar2, #6\n" \ "ANDS D0Ar2, D0Ar2, #0x1c\n" \ "ADDZ D0Ar2, D0Ar2, #16\n" \ "SUB %1, %1, D0Ar2\n") /* Copy from user to kernel, zeroing the bytes that were inaccessible in userland. The return-value is the number of bytes that were inaccessible. */ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* * Copy from user to kernel. The return-value is the number of bytes that were * inaccessible. */ unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, unsigned long n) { register char *dst asm ("A0.2") = pdst; Loading @@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if ((unsigned long) src & 1) { __asm_copy_from_user_1(dst, src, retn); n--; if (retn) return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ Loading @@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) goto copy_exception_bytes; return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ Loading @@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) goto copy_exception_bytes; return retn + n; } } /* We only need one check after the unalignment-adjustments, because if both adjustments were done, either both or neither reference had an exception. */ if (retn != 0) goto copy_exception_bytes; #ifdef USE_RAPF /* 64 bit copy loop */ if (!(((unsigned long) src | (unsigned long) dst) & 7)) { Loading @@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) goto copy_exception_bytes; return retn + n; } } Loading @@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) goto copy_exception_bytes; return retn + n; } } #endif Loading @@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) goto copy_exception_bytes; return retn + n; } /* If we get here, there were no memory read faults. */ Loading @@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; copy_exception_bytes: /* We already have "retn" bytes cleared, and need to clear the remaining "n" bytes. A non-optimized simple byte-for-byte in-line memset is preferred here, since this isn't speed-critical code and we'd rather have this a leaf-function than calling memset. */ { char *endp; for (endp = dst + n; dst < endp; dst++) *dst = 0; } return retn + n; } EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(raw_copy_from_user); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ Loading arch/mips/Kconfig +2 −1 Original line number Diff line number Diff line Loading @@ -9,6 +9,7 @@ config MIPS select HAVE_CONTEXT_TRACKING select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_OPROFILE select HAVE_PERF_EVENTS select PERF_USE_VMALLOC Loading Loading @@ -1463,7 +1464,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA select GENERIC_CSUM select MIPS_O32_FP64_SUPPORT if MIPS32_O32 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 help Choose this option to build a kernel for release 6 or later of the MIPS64 architecture. New MIPS processors, starting with the Warrior Loading Loading
Makefile +1 −1 Original line number Diff line number Diff line VERSION = 4 PATCHLEVEL = 4 SUBLEVEL = 59 SUBLEVEL = 63 EXTRAVERSION = NAME = Blurry Fish Butt Loading
arch/arm/kvm/mmu.c +10 −3 Original line number Diff line number Diff line Loading @@ -803,6 +803,7 @@ void stage2_unmap_vm(struct kvm *kvm) int idx; idx = srcu_read_lock(&kvm->srcu); down_read(¤t->mm->mmap_sem); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); Loading @@ -810,6 +811,7 @@ void stage2_unmap_vm(struct kvm *kvm) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); up_read(¤t->mm->mmap_sem); srcu_read_unlock(&kvm->srcu, idx); } Loading Loading @@ -1771,6 +1773,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (KVM_PHYS_SIZE >> PAGE_SHIFT)) return -EFAULT; down_read(¤t->mm->mmap_sem); /* * A memory region could potentially cover multiple VMAs, and any holes * between them, so iterate over all of them to find out if we can map Loading Loading @@ -1814,8 +1817,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) return -EINVAL; if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { ret = -EINVAL; goto out; } ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, Loading @@ -1827,7 +1832,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } while (hva < reg_end); if (change == KVM_MR_FLAGS_ONLY) return ret; goto out; spin_lock(&kvm->mmu_lock); if (ret) Loading @@ -1835,6 +1840,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, else stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); out: up_read(¤t->mm->mmap_sem); return ret; } Loading
arch/metag/include/asm/uaccess.h +8 −7 Original line number Diff line number Diff line Loading @@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) extern unsigned long __must_check __copy_user_zeroing(void *to, const void __user *from, extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) return __copy_user_zeroing(to, from, n); memset(to, 0, n); return n; res = raw_copy_from_user(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } #define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) #define __copy_from_user_inatomic __copy_from_user extern unsigned long __must_check __copy_user(void __user *to, Loading
arch/metag/lib/usercopy.c +120 −192 Original line number Diff line number Diff line Loading @@ -29,7 +29,6 @@ COPY \ "1:\n" \ " .section .fixup,\"ax\"\n" \ " MOV D1Ar1,#0\n" \ FIXUP \ " MOVT D1Ar1,#HI(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \ Loading Loading @@ -260,27 +259,31 @@ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #32\n" \ "23:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "SUB %3, %3, #32\n" \ "24:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "25:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "26:\n" \ "SUB %3, %3, #32\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ "25:\n" \ "27:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "26:\n" \ "28:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "29:\n" \ "SUB %3, %3, #32\n" \ "27:\n" \ "30:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "31:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "32:\n" \ "SUB %0, %0, #8\n" \ "29:\n" \ "33:\n" \ "SETL [%0++], D0.7, D1.7\n" \ "SUB %3, %3, #32\n" \ "1:" \ Loading Loading @@ -312,11 +315,15 @@ " .long 26b,3b\n" \ " .long 27b,3b\n" \ " .long 28b,3b\n" \ " .long 29b,4b\n" \ " .long 29b,3b\n" \ " .long 30b,3b\n" \ " .long 31b,3b\n" \ " .long 32b,3b\n" \ " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ : "D1Ar1", "D0Ar2", "memory") : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * Loading @@ -342,7 +349,7 @@ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ "AND D0Ar2, D0Ar2, #0x7\n" \ "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ Loading Loading @@ -403,47 +410,55 @@ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ "23:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "24:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ "25:\n" \ "24:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "26:\n" \ "25:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "26:\n" \ "SUB %3, %3, #16\n" \ "27:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "29:\n" \ "SUB %3, %3, #16\n" \ "30:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "31:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "32:\n" \ "SUB %3, %3, #16\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ "29:\n" \ "33:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "30:\n" \ "34:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "35:\n" \ "SUB %3, %3, #16\n" \ "31:\n" \ "36:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "32:\n" \ "37:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "38:\n" \ "SUB %3, %3, #16\n" \ "33:\n" \ "39:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "34:\n" \ "40:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "41:\n" \ "SUB %3, %3, #16\n" \ "35:\n" \ "42:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "36:\n" \ "43:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "44:\n" \ "SUB %0, %0, #4\n" \ "37:\n" \ "45:\n" \ "SETD [%0++], D0.7\n" \ "SUB %3, %3, #16\n" \ "1:" \ Loading Loading @@ -483,11 +498,19 @@ " .long 34b,3b\n" \ " .long 35b,3b\n" \ " .long 36b,3b\n" \ " .long 37b,4b\n" \ " .long 37b,3b\n" \ " .long 38b,3b\n" \ " .long 39b,3b\n" \ " .long 40b,3b\n" \ " .long 41b,3b\n" \ " .long 42b,3b\n" \ " .long 43b,3b\n" \ " .long 44b,3b\n" \ " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ : "D1Ar1", "D0Ar2", "memory") : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * Loading @@ -513,7 +536,7 @@ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ "AND D0Ar2, D0Ar2, #0x7\n" \ "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ Loading @@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, if ((unsigned long) src & 1) { __asm_copy_to_user_1(dst, src, retn); n--; if (retn) return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ while (n > 0) { __asm_copy_to_user_1(dst, src, retn); n--; if (retn) return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; if (retn) return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ while (n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; if (retn) return retn + n; } } Loading @@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; if (retn) return retn + n; } } if (n >= RAPF_MIN_BUF_SIZE) { Loading @@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; if (retn) return retn + n; } } #endif Loading @@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 16) { __asm_copy_to_user_16(dst, src, retn); n -= 16; if (retn) return retn + n; } while (n >= 4) { __asm_copy_to_user_4(dst, src, retn); n -= 4; if (retn) return retn + n; } switch (n) { Loading @@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, break; } /* * If we get here, retn correctly reflects the number of failing * bytes. */ return retn; } EXPORT_SYMBOL(__copy_user); Loading @@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_user_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "2: SETB [%0++],D1Ar1\n", \ "3: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ "3: ADD %2,%2,#1\n", \ " .long 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "2: SETW [%0++],D1Ar1\n" COPY, \ "3: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ "3: ADD %2,%2,#2\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ Loading @@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_from_user_2x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ "5: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ "5: ADD %2,%2,#1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "2: SETD [%0++],D1Ar1\n" COPY, \ "3: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ "3: ADD %2,%2,#4\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_5(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ "5: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_4x_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "4: SETW [%0++],D1Ar1\n" COPY, \ "5: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ " .long 4b,5b\n" TENTRY) #define __asm_copy_from_user_6(to, from, ret) \ __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_7(to, from, ret) \ __asm_copy_from_user_6x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "6: SETB [%0++],D1Ar1\n", \ "7: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 6b,7b\n") #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_4x_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "4: SETD [%0++],D1Ar1\n" COPY, \ "5: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ " .long 4b,5b\n" TENTRY) #define __asm_copy_from_user_8(to, from, ret) \ __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_9(to, from, ret) \ __asm_copy_from_user_8x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "6: SETB [%0++],D1Ar1\n", \ "7: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 6b,7b\n") #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_8x_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "6: SETW [%0++],D1Ar1\n" COPY, \ "7: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ " .long 6b,7b\n" TENTRY) #define __asm_copy_from_user_10(to, from, ret) \ __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_11(to, from, ret) \ __asm_copy_from_user_10x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "8: SETB [%0++],D1Ar1\n", \ "9: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 8b,9b\n") #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_8x_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "6: SETD [%0++],D1Ar1\n" COPY, \ "7: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ " .long 6b,7b\n" TENTRY) #define __asm_copy_from_user_12(to, from, ret) \ __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_13(to, from, ret) \ __asm_copy_from_user_12x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "8: SETB [%0++],D1Ar1\n", \ "9: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 8b,9b\n") #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_12x_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "8: SETW [%0++],D1Ar1\n" COPY, \ "9: ADD %2,%2,#2\n" \ " SETW [%0++],D1Ar1\n" FIXUP, \ " .long 8b,9b\n" TENTRY) #define __asm_copy_from_user_14(to, from, ret) \ __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_15(to, from, ret) \ __asm_copy_from_user_14x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "10: SETB [%0++],D1Ar1\n", \ "11: ADD %2,%2,#1\n" \ " SETB [%0++],D1Ar1\n", \ " .long 10b,11b\n") #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_from_user_12x_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "8: SETD [%0++],D1Ar1\n" COPY, \ "9: ADD %2,%2,#4\n" \ " SETD [%0++],D1Ar1\n" FIXUP, \ " .long 8b,9b\n" TENTRY) #define __asm_copy_from_user_16(to, from, ret) \ __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ " GETL D0Ar2,D1Ar1,[%1++]\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \ "1:\n" \ " .section .fixup,\"ax\"\n" \ " MOV D1Ar1,#0\n" \ " MOV D0Ar2,#0\n" \ "3: ADD %2,%2,#8\n" \ " SETL [%0++],D0Ar2,D1Ar1\n" \ " MOVT D0Ar2,#HI(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \ " .previous\n" \ Loading @@ -789,35 +711,56 @@ EXPORT_SYMBOL(__copy_user); * * Rationale: * A fault occurs while reading from user buffer, which is the * source. Since the fault is at a single address, we only * need to rewind by 8 bytes. * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be * corrected. * corrected, but the source must be rewound to the beginning of * the block, which is LSM_STEP*8 bytes. * LSM_STEP is bits 10:8 in TXSTATUS which is already read * and stored in D0Ar2 * * NOTE: If a fault occurs at the last operation in M{G,S}ETL * LSM_STEP will be 0. ie: we do 4 writes in our case, if * a fault happens at the 4th write, LSM_STEP will be 0 * instead of 4. The code copes with that. */ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "SUB %1, %1, #8\n") "LSR D0Ar2, D0Ar2, #5\n" \ "ANDS D0Ar2, D0Ar2, #0x38\n" \ "ADDZ D0Ar2, D0Ar2, #32\n" \ "SUB %1, %1, D0Ar2\n") /* rewind 'from' pointer when a fault occurs * * Rationale: * A fault occurs while reading from user buffer, which is the * source. Since the fault is at a single address, we only * need to rewind by 4 bytes. * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be * corrected. * corrected, but the source must be rewound to the beginning of * the block, which is LSM_STEP*4 bytes. * LSM_STEP is bits 10:8 in TXSTATUS which is already read * and stored in D0Ar2 * * NOTE: If a fault occurs at the last operation in M{G,S}ETL * LSM_STEP will be 0. ie: we do 4 writes in our case, if * a fault happens at the 4th write, LSM_STEP will be 0 * instead of 4. The code copes with that. */ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "SUB %1, %1, #4\n") "LSR D0Ar2, D0Ar2, #6\n" \ "ANDS D0Ar2, D0Ar2, #0x1c\n" \ "ADDZ D0Ar2, D0Ar2, #16\n" \ "SUB %1, %1, D0Ar2\n") /* Copy from user to kernel, zeroing the bytes that were inaccessible in userland. The return-value is the number of bytes that were inaccessible. */ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* * Copy from user to kernel. The return-value is the number of bytes that were * inaccessible. */ unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, unsigned long n) { register char *dst asm ("A0.2") = pdst; Loading @@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if ((unsigned long) src & 1) { __asm_copy_from_user_1(dst, src, retn); n--; if (retn) return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ Loading @@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) goto copy_exception_bytes; return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ Loading @@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) goto copy_exception_bytes; return retn + n; } } /* We only need one check after the unalignment-adjustments, because if both adjustments were done, either both or neither reference had an exception. */ if (retn != 0) goto copy_exception_bytes; #ifdef USE_RAPF /* 64 bit copy loop */ if (!(((unsigned long) src | (unsigned long) dst) & 7)) { Loading @@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) goto copy_exception_bytes; return retn + n; } } Loading @@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) goto copy_exception_bytes; return retn + n; } } #endif Loading @@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) goto copy_exception_bytes; return retn + n; } /* If we get here, there were no memory read faults. */ Loading @@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; copy_exception_bytes: /* We already have "retn" bytes cleared, and need to clear the remaining "n" bytes. A non-optimized simple byte-for-byte in-line memset is preferred here, since this isn't speed-critical code and we'd rather have this a leaf-function than calling memset. */ { char *endp; for (endp = dst + n; dst < endp; dst++) *dst = 0; } return retn + n; } EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(raw_copy_from_user); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ Loading
arch/mips/Kconfig +2 −1 Original line number Diff line number Diff line Loading @@ -9,6 +9,7 @@ config MIPS select HAVE_CONTEXT_TRACKING select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_OPROFILE select HAVE_PERF_EVENTS select PERF_USE_VMALLOC Loading Loading @@ -1463,7 +1464,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA select GENERIC_CSUM select MIPS_O32_FP64_SUPPORT if MIPS32_O32 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 help Choose this option to build a kernel for release 6 or later of the MIPS64 architecture. New MIPS processors, starting with the Warrior Loading