Loading arch/alpha/include/asm/uaccess.h +5 −4 Original line number Diff line number Diff line Loading @@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n) extern inline long copy_from_user(void *to, const void __user *from, long n) { long res = n; if (likely(__access_ok((unsigned long)from, n, get_fs()))) n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); else memset(to, 0, n); return n; res = __copy_from_user_inatomic(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } extern void __do_clear_user(void); Loading arch/alpha/lib/copy_user.S +1 −15 Original line number Diff line number Diff line Loading @@ -124,22 +124,8 @@ $65: bis $31,$31,$0 $41: $35: $exitout: ret $31,($28),1 $exitin: /* A stupid byte-by-byte zeroing of the rest of the output buffer. This cures security holes by never leaving random kernel data around to be copied elsewhere. */ mov $0,$1 $101: EXO ( ldq_u $2,0($6) ) subq $1,1,$1 mskbl $2,$6,$2 EXO ( stq_u $2,0($6) ) addq $6,1,$6 bgt $1,$101 $exitout: ret $31,($28),1 .end __copy_user arch/alpha/lib/ev6-copy_user.S +1 −22 Original line number Diff line number Diff line Loading @@ -227,33 +227,12 @@ $dirtyentry: bgt $0,$onebyteloop # U .. .. .. : U L U L $zerolength: $exitin: $exitout: # Destination for exception recovery(?) nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. ret $31,($28),1 # L0 .. .. .. : L U L U $exitin: /* A stupid byte-by-byte zeroing of the rest of the output buffer. This cures security holes by never leaving random kernel data around to be copied elsewhere. */ nop nop nop mov $0,$1 $101: EXO ( stb $31,0($6) ) # L subq $1,1,$1 # E addq $6,1,$6 # E bgt $1,$101 # U nop nop nop ret $31,($28),1 # L0 .end __copy_user arch/arc/kernel/signal.c +4 −4 Original line number Diff line number Diff line Loading @@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) struct user_regs_struct uregs; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (!err) set_current_blocked(&set); err |= __copy_from_user(&uregs.scratch, &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); if (err) return err; set_current_blocked(&set); regs->bta = uregs.scratch.bta; regs->lp_start = uregs.scratch.lp_start; regs->lp_end = uregs.scratch.lp_end; Loading @@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) regs->r0 = uregs.scratch.r0; regs->sp = uregs.scratch.sp; return err; return 0; } static inline int is_do_ss_needed(unsigned int magic) Loading arch/arm/include/asm/uaccess.h +6 −5 Original line number Diff line number Diff line Loading @@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else /* security hole - plug it */ memset(to, 0, n); return n; unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) res = __copy_from_user(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) Loading Loading
arch/alpha/include/asm/uaccess.h +5 −4 Original line number Diff line number Diff line Loading @@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n) extern inline long copy_from_user(void *to, const void __user *from, long n) { long res = n; if (likely(__access_ok((unsigned long)from, n, get_fs()))) n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); else memset(to, 0, n); return n; res = __copy_from_user_inatomic(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } extern void __do_clear_user(void); Loading
arch/alpha/lib/copy_user.S +1 −15 Original line number Diff line number Diff line Loading @@ -124,22 +124,8 @@ $65: bis $31,$31,$0 $41: $35: $exitout: ret $31,($28),1 $exitin: /* A stupid byte-by-byte zeroing of the rest of the output buffer. This cures security holes by never leaving random kernel data around to be copied elsewhere. */ mov $0,$1 $101: EXO ( ldq_u $2,0($6) ) subq $1,1,$1 mskbl $2,$6,$2 EXO ( stq_u $2,0($6) ) addq $6,1,$6 bgt $1,$101 $exitout: ret $31,($28),1 .end __copy_user
arch/alpha/lib/ev6-copy_user.S +1 −22 Original line number Diff line number Diff line Loading @@ -227,33 +227,12 @@ $dirtyentry: bgt $0,$onebyteloop # U .. .. .. : U L U L $zerolength: $exitin: $exitout: # Destination for exception recovery(?) nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. ret $31,($28),1 # L0 .. .. .. : L U L U $exitin: /* A stupid byte-by-byte zeroing of the rest of the output buffer. This cures security holes by never leaving random kernel data around to be copied elsewhere. */ nop nop nop mov $0,$1 $101: EXO ( stb $31,0($6) ) # L subq $1,1,$1 # E addq $6,1,$6 # E bgt $1,$101 # U nop nop nop ret $31,($28),1 # L0 .end __copy_user
arch/arc/kernel/signal.c +4 −4 Original line number Diff line number Diff line Loading @@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) struct user_regs_struct uregs; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (!err) set_current_blocked(&set); err |= __copy_from_user(&uregs.scratch, &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); if (err) return err; set_current_blocked(&set); regs->bta = uregs.scratch.bta; regs->lp_start = uregs.scratch.lp_start; regs->lp_end = uregs.scratch.lp_end; Loading @@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) regs->r0 = uregs.scratch.r0; regs->sp = uregs.scratch.sp; return err; return 0; } static inline int is_do_ss_needed(unsigned int magic) Loading
arch/arm/include/asm/uaccess.h +6 −5 Original line number Diff line number Diff line Loading @@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else /* security hole - plug it */ memset(to, 0, n); return n; unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) res = __copy_from_user(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) Loading