Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b26b5ef5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more misc uaccess and vfs updates from Al Viro:
 "The rest of the stuff from -next (more uaccess work) + assorted fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  score: traps: Add missing include file to fix build error
  fs/super.c: don't fool lockdep in freeze_super() and thaw_super() paths
  fs/super.c: fix race between freeze_super() and thaw_super()
  overlayfs: Fix setting IOP_XATTR flag
  iov_iter: kernel-doc import_iovec() and rw_copy_check_uvector()
  blackfin: no access_ok() for __copy_{to,from}_user()
  arm64: don't zero in __copy_from_user{,_inatomic}
  arm: don't zero in __copy_from_user_inatomic()/__copy_from_user()
  arc: don't leak bits of kernel stack into coredump
  alpha: get rid of tail-zeroing in __copy_user()
parents 87dbe42a 2692a71b
Loading
Loading
Loading
Loading
+5 −4
Original line number Original line Diff line number Diff line
@@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n)
extern inline long
extern inline long
copy_from_user(void *to, const void __user *from, long n)
copy_from_user(void *to, const void __user *from, long n)
{
{
	long res = n;
	if (likely(__access_ok((unsigned long)from, n, get_fs())))
	if (likely(__access_ok((unsigned long)from, n, get_fs())))
		n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
		res = __copy_from_user_inatomic(to, from, n);
	else
	if (unlikely(res))
		memset(to, 0, n);
		memset(to + (n - res), 0, res);
	return n;
	return res;
}
}


extern void __do_clear_user(void);
extern void __do_clear_user(void);
+1 −15
Original line number Original line Diff line number Diff line
@@ -126,22 +126,8 @@ $65:
	bis $31,$31,$0
	bis $31,$31,$0
$41:
$41:
$35:
$35:
$exitout:
	ret $31,($28),1

$exitin:
$exitin:
	/* A stupid byte-by-byte zeroing of the rest of the output
$exitout:
	   buffer.  This cures security holes by never leaving 
	   random kernel data around to be copied elsewhere.  */

	mov $0,$1
$101:
	EXO ( ldq_u $2,0($6) )
	subq $1,1,$1
	mskbl $2,$6,$2
	EXO ( stq_u $2,0($6) )
	addq $6,1,$6
	bgt $1,$101
	ret $31,($28),1
	ret $31,($28),1


	.end __copy_user
	.end __copy_user
+1 −22
Original line number Original line Diff line number Diff line
@@ -228,33 +228,12 @@ $dirtyentry:
	bgt $0,$onebyteloop	# U  .. .. ..	: U L U L
	bgt $0,$onebyteloop	# U  .. .. ..	: U L U L


$zerolength:
$zerolength:
$exitin:
$exitout:			# Destination for exception recovery(?)
$exitout:			# Destination for exception recovery(?)
	nop			# .. .. .. E
	nop			# .. .. .. E
	nop			# .. .. E  ..
	nop			# .. .. E  ..
	nop			# .. E  .. ..
	nop			# .. E  .. ..
	ret $31,($28),1		# L0 .. .. ..	: L U L U
	ret $31,($28),1		# L0 .. .. ..	: L U L U


$exitin:

	/* A stupid byte-by-byte zeroing of the rest of the output
	   buffer.  This cures security holes by never leaving 
	   random kernel data around to be copied elsewhere.  */

	nop
	nop
	nop
	mov	$0,$1

$101:
	EXO ( stb $31,0($6) )	# L
	subq $1,1,$1		# E
	addq $6,1,$6		# E
	bgt $1,$101		# U

	nop
	nop
	nop
	ret $31,($28),1		# L0

	.end __copy_user
	.end __copy_user
	EXPORT_SYMBOL(__copy_user)
	EXPORT_SYMBOL(__copy_user)
+4 −4
Original line number Original line Diff line number Diff line
@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
	struct user_regs_struct uregs;
	struct user_regs_struct uregs;


	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
	if (!err)
		set_current_blocked(&set);

	err |= __copy_from_user(&uregs.scratch,
	err |= __copy_from_user(&uregs.scratch,
				&(sf->uc.uc_mcontext.regs.scratch),
				&(sf->uc.uc_mcontext.regs.scratch),
				sizeof(sf->uc.uc_mcontext.regs.scratch));
				sizeof(sf->uc.uc_mcontext.regs.scratch));
	if (err)
		return err;


	set_current_blocked(&set);
	regs->bta	= uregs.scratch.bta;
	regs->bta	= uregs.scratch.bta;
	regs->lp_start	= uregs.scratch.lp_start;
	regs->lp_start	= uregs.scratch.lp_start;
	regs->lp_end	= uregs.scratch.lp_end;
	regs->lp_end	= uregs.scratch.lp_end;
@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
	regs->r0	= uregs.scratch.r0;
	regs->r0	= uregs.scratch.r0;
	regs->sp	= uregs.scratch.sp;
	regs->sp	= uregs.scratch.sp;


	return err;
	return 0;
}
}


static inline int is_do_ss_needed(unsigned int magic)
static inline int is_do_ss_needed(unsigned int magic)
+6 −5
Original line number Original line Diff line number Diff line
@@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n)


static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
{
	if (access_ok(VERIFY_READ, from, n))
	unsigned long res = n;
		n = __copy_from_user(to, from, n);
	if (likely(access_ok(VERIFY_READ, from, n)))
	else /* security hole - plug it */
		res = __copy_from_user(to, from, n);
		memset(to, 0, n);
	if (unlikely(res))
	return n;
		memset(to + (n - res), 0, res);
	return res;
}
}


static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
Loading