Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 346ce1d7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull parisc fixes from Helge Deller:
 "Al Viro reported that - in case of read faults - our copy_from_user()
  implementation may claim to have copied more bytes than it actually
  did. In order to fix this bug and because of the way how gcc optimizes
  register usage for inline assembly in C code, we had to replace our
  pa_memcpy() function with a pure assembler implementation.

  While fixing the memcpy bug we noticed some other issues with our
  get_user() and put_user() functions, e.g. nested faults may return
  wrong data. This is now fixed by a common fixup handler for
  get_user/put_user in the exception handler which additionally makes
  generated code smaller and faster.

  The third patch is a trivial one-line fix for a patch which went in
  during 4.11-rc and which avoids stalled CPU warnings after power
  shutdown (for parisc machines which can't plug power off themselves).

  Due to the rewrite of pa_memcpy() into assembly this patch got bigger
  than what I wanted to have sent at this stage.

  Those patches have been running in production during the last few days
  on our debian build servers without any further issues"

* 'parisc-4.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Avoid stalled CPU warnings after system shutdown
  parisc: Clean up fixup routines for get_user()/put_user()
  parisc: Fix access fault handling in pa_memcpy()
parents 7d34ddbe 476e75a4
Loading
Loading
Loading
Loading
+34 −25
Original line number Diff line number Diff line
@@ -64,6 +64,15 @@ struct exception_table_entry {
	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
	".previous\n"

/*
 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
 * (with lowest bit set) for which the fault handler in fixup_exception() will
 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
 * register in case of a read fault in get_user().
 */
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)

/*
 * The page fault handler stores, in a per-cpu area, the following information
 * if a fixup routine is available.
@@ -91,7 +100,7 @@ struct exception_data {
#define __get_user(x, ptr)                               \
({                                                       \
	register long __gu_err __asm__ ("r8") = 0;       \
	register long __gu_val __asm__ ("r9") = 0;       \
	register long __gu_val;				 \
							 \
	load_sr2();					 \
	switch (sizeof(*(ptr))) {			 \
@@ -107,22 +116,23 @@ struct exception_data {
})

#define __get_user_asm(ldx, ptr)                        \
	__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t"	\
		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
		"9:\n"					\
		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
		: "=r"(__gu_val), "=r"(__gu_err)        \
		: "r"(ptr), "1"(__gu_err)		\
		: "r1");
		: "r"(ptr), "1"(__gu_err));

#if !defined(CONFIG_64BIT)

#define __get_user_asm64(ptr) 				\
	__asm__("\n1:\tldw 0(%%sr2,%2),%0"		\
		"\n2:\tldw 4(%%sr2,%2),%R0\n\t"		\
		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
	__asm__("   copy %%r0,%R0\n"			\
		"1: ldw 0(%%sr2,%2),%0\n"		\
		"2: ldw 4(%%sr2,%2),%R0\n"		\
		"9:\n"					\
		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
		: "=r"(__gu_val), "=r"(__gu_err)	\
		: "r"(ptr), "1"(__gu_err)		\
		: "r1");
		: "r"(ptr), "1"(__gu_err));

#endif /* !defined(CONFIG_64BIT) */

@@ -148,32 +158,31 @@ struct exception_data {
 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
 * instead of writing. This is because they do not write to any memory
 * gcc knows about, so there are no aliasing issues. These macros must
 * also be aware that "fixup_put_user_skip_[12]" are executed in the
 * context of the fault, and any registers used there must be listed
 * as clobbers. In this case only "r1" is used by the current routines.
 * r8/r9 are already listed as err/val.
 * also be aware that fixups are executed in the context of the fault,
 * and any registers used there must be listed as clobbers.
 * r8 is already listed as err.
 */

#define __put_user_asm(stx, x, ptr)                         \
	__asm__ __volatile__ (                              \
		"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t"	    \
		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
		"1: " stx " %2,0(%%sr2,%1)\n"		    \
		"9:\n"					    \
		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
		: "=r"(__pu_err)                            \
		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
		: "r1")
		: "r"(ptr), "r"(x), "0"(__pu_err))


#if !defined(CONFIG_64BIT)

#define __put_user_asm64(__val, ptr) do {	    	    \
	__asm__ __volatile__ (				    \
		"\n1:\tstw %2,0(%%sr2,%1)"		    \
		"\n2:\tstw %R2,4(%%sr2,%1)\n\t"		    \
		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
		"1: stw %2,0(%%sr2,%1)\n"		    \
		"2: stw %R2,4(%%sr2,%1)\n"		    \
		"9:\n"					    \
		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
		: "=r"(__pu_err)                            \
		: "r"(ptr), "r"(__val), "0"(__pu_err) \
		: "r1");				    \
		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
} while (0)

#endif /* !defined(CONFIG_64BIT) */
+0 −10
Original line number Diff line number Diff line
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);

/* Global fixups - defined as int to avoid creation of function pointers */
extern int fixup_get_user_skip_1;
extern int fixup_get_user_skip_2;
extern int fixup_put_user_skip_1;
extern int fixup_put_user_skip_2;
EXPORT_SYMBOL(fixup_get_user_skip_1);
EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);
EXPORT_SYMBOL(fixup_put_user_skip_2);

#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
+2 −0
Original line number Diff line number Diff line
@@ -143,6 +143,8 @@ void machine_power_off(void)
	printk(KERN_EMERG "System shut down completed.\n"
	       "Please power this system off now.");

	/* prevent soft lockup/stalled CPU messages for endless loop. */
	rcu_sysrq_start();
	for (;;);
}

+1 −1
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
# Makefile for parisc-specific library files
#

lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
	   ucmpdi2.o delay.o

obj-y	:= iomap.o

arch/parisc/lib/fixup.S

deleted100644 → 0
+0 −98
Original line number Diff line number Diff line
/*
 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
 *
 *  Copyright (C) 2004  Randolph Chung <tausq@debian.org>
 *
 *    This program is free software; you can redistribute it and/or modify
 *    it under the terms of the GNU General Public License as published by
 *    the Free Software Foundation; either version 2, or (at your option)
 *    any later version.
 *
 *    This program is distributed in the hope that it will be useful,
 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *    GNU General Public License for more details.
 *
 *    You should have received a copy of the GNU General Public License
 *    along with this program; if not, write to the Free Software
 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 * 
 * Fixup routines for kernel exception handling.
 */
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>

#ifdef CONFIG_SMP
	.macro  get_fault_ip t1 t2
	loadgp
	addil LT%__per_cpu_offset,%r27
	LDREG RT%__per_cpu_offset(%r1),\t1
	/* t2 = smp_processor_id() */
	mfctl 30,\t2
	ldw TI_CPU(\t2),\t2
#ifdef CONFIG_64BIT
	extrd,u \t2,63,32,\t2
#endif
	/* t2 = &__per_cpu_offset[smp_processor_id()]; */
	LDREGX \t2(\t1),\t2 
	addil LT%exception_data,%r27
	LDREG RT%exception_data(%r1),\t1
	/* t1 = this_cpu_ptr(&exception_data) */
	add,l \t1,\t2,\t1
	/* %r27 = t1->fault_gp - restore gp */
	LDREG EXCDATA_GP(\t1), %r27
	/* t1 = t1->fault_ip */
	LDREG EXCDATA_IP(\t1), \t1
	.endm
#else
	.macro  get_fault_ip t1 t2
	loadgp
	/* t1 = this_cpu_ptr(&exception_data) */
	addil LT%exception_data,%r27
	LDREG RT%exception_data(%r1),\t2
	/* %r27 = t2->fault_gp - restore gp */
	LDREG EXCDATA_GP(\t2), %r27
	/* t1 = t2->fault_ip */
	LDREG EXCDATA_IP(\t2), \t1
	.endm
#endif

	.level LEVEL

	.text
	.section .fixup, "ax"

	/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
ENTRY_CFI(fixup_get_user_skip_1)
	get_fault_ip %r1,%r8
	ldo 4(%r1), %r1
	ldi -EFAULT, %r8
	bv %r0(%r1)
	copy %r0, %r9
ENDPROC_CFI(fixup_get_user_skip_1)

ENTRY_CFI(fixup_get_user_skip_2)
	get_fault_ip %r1,%r8
	ldo 8(%r1), %r1
	ldi -EFAULT, %r8
	bv %r0(%r1)
	copy %r0, %r9
ENDPROC_CFI(fixup_get_user_skip_2)

	/* put_user() fixups, store -EFAULT in r8 */
ENTRY_CFI(fixup_put_user_skip_1)
	get_fault_ip %r1,%r8
	ldo 4(%r1), %r1
	bv %r0(%r1)
	ldi -EFAULT, %r8
ENDPROC_CFI(fixup_put_user_skip_1)

ENTRY_CFI(fixup_put_user_skip_2)
	get_fault_ip %r1,%r8
	ldo 8(%r1), %r1
	bv %r0(%r1)
	ldi -EFAULT, %r8
ENDPROC_CFI(fixup_put_user_skip_2)
Loading