Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04ed7d9c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull sparc fixes from David Miller:
 "Several sparc64 bug fixes here:

  1) Make the user copy routines on sparc64 return a properly accurate
     residual length when an exception occurs.

  2) We can get enormous kernel TLB range flush requests from vmalloc
     unmaps, so handle these more gracefully by doing full flushes
     instead of going page-by-page.

  3) Cope properly with negative branch offsets in sparc jump-label
     support, from James Clarke.

  4) Some old-style decl GCC warning fixups from Tobias Klauser"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Handle extremely large kernel TLB range flushes more gracefully.
  sparc64: Fix illegal relative branches in hypervisor patched TLB cross-call code.
  sparc64: Fix instruction count in comment for __hypervisor_flush_tlb_pending.
  sparc64: Handle extremely large kernel TSB range flushes sanely.
  sparc: Handle negative offsets in arch_jump_label_transform
  sparc64: Fix illegal relative branches in hypervisor patched TLB code.
  sparc64: Delete now unused user copy fixup functions.
  sparc64: Delete now unused user copy assembler helpers.
  sparc64: Convert U3copy_{from,to}_user to accurate exception reporting.
  sparc64: Convert NG2copy_{from,to}_user to accurate exception reporting.
  sparc64: Convert NGcopy_{from,to}_user to accurate exception reporting.
  sparc64: Convert NG4copy_{from,to}_user to accurate exception reporting.
  sparc64: Convert U1copy_{from,to}_user to accurate exception reporting.
  sparc64: Convert GENcopy_{from,to}_user to accurate exception reporting.
  sparc64: Convert copy_in_user to accurate exception reporting.
  sparc64: Prepare to move to more saner user copy exception handling.
  sparc64: Delete __ret_efault.
  sparc32: Fix old style declaration GCC warnings
  sparc64: Fix old style declaration GCC warnings
  sparc64: Setup a scheduling domain for highest level cache.
parents 2a26d99b a74ad5e6
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -24,9 +24,10 @@ typedef struct {
	unsigned int	icache_line_size;
	unsigned int	ecache_size;
	unsigned int	ecache_line_size;
	unsigned short	sock_id;
	unsigned short	sock_id;	/* physical package */
	unsigned short	core_id;
	int		proc_id;
	unsigned short  max_cache_id;	/* groupings of highest shared cache */
	unsigned short	proc_id;	/* strand (aka HW thread) id */
} cpuinfo_sparc;

DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
+1 −1
Original line number Diff line number Diff line
@@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
	*(volatile __u32 *)&lp->lock = ~0U;
}

static void inline arch_write_unlock(arch_rwlock_t *lock)
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
	__asm__ __volatile__(
"	st		%%g0, [%0]"
+6 −6
Original line number Diff line number Diff line
@@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla

/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */

static void inline arch_read_lock(arch_rwlock_t *lock)
static inline void arch_read_lock(arch_rwlock_t *lock)
{
	unsigned long tmp1, tmp2;

@@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
	: "memory");
}

static int inline arch_read_trylock(arch_rwlock_t *lock)
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
	int tmp1, tmp2;

@@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
	return tmp1;
}

static void inline arch_read_unlock(arch_rwlock_t *lock)
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
	unsigned long tmp1, tmp2;

@@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
	: "memory");
}

static void inline arch_write_lock(arch_rwlock_t *lock)
static inline void arch_write_lock(arch_rwlock_t *lock)
{
	unsigned long mask, tmp1, tmp2;

@@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
	: "memory");
}

static void inline arch_write_unlock(arch_rwlock_t *lock)
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
	__asm__ __volatile__(
"	stw		%%g0, [%0]"
@@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
	: "memory");
}

static int inline arch_write_trylock(arch_rwlock_t *lock)
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
	unsigned long mask, tmp1, tmp2, result;

+7 −1
Original line number Diff line number Diff line
@@ -44,14 +44,20 @@ int __node_distance(int, int);
#define topology_physical_package_id(cpu)	(cpu_data(cpu).proc_id)
#define topology_core_id(cpu)			(cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu)		(&cpu_core_sib_map[cpu])
#define topology_core_cache_cpumask(cpu)	(&cpu_core_sib_cache_map[cpu])
#define topology_sibling_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
#endif /* CONFIG_SMP */

extern cpumask_t cpu_core_map[NR_CPUS];
extern cpumask_t cpu_core_sib_map[NR_CPUS];
extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];

/**
 * Return cores that shares the last level cache.
 */
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
        return &cpu_core_map[cpu];
	return &cpu_core_sib_cache_map[cpu];
}

#endif /* _ASM_SPARC64_TOPOLOGY_H */
+3 −25
Original line number Diff line number Diff line
@@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
	return 1;
}

void __ret_efault(void);
void __retl_efault(void);

/* Uh, these should become the main single-value transfer routines..
@@ -189,55 +188,34 @@ int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to,
					     const void __user *from,
					     unsigned long size);
unsigned long copy_from_user_fixup(void *to, const void __user *from,
				   unsigned long size);
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
	unsigned long ret;

	check_object_size(to, size, false);

	ret = ___copy_from_user(to, from, size);
	if (unlikely(ret))
		ret = copy_from_user_fixup(to, from, size);

	return ret;
	return ___copy_from_user(to, from, size);
}
#define __copy_from_user copy_from_user

unsigned long __must_check ___copy_to_user(void __user *to,
					   const void *from,
					   unsigned long size);
unsigned long copy_to_user_fixup(void __user *to, const void *from,
				 unsigned long size);
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
	unsigned long ret;

	check_object_size(from, size, true);

	ret = ___copy_to_user(to, from, size);
	if (unlikely(ret))
		ret = copy_to_user_fixup(to, from, size);
	return ret;
	return ___copy_to_user(to, from, size);
}
#define __copy_to_user copy_to_user

unsigned long __must_check ___copy_in_user(void __user *to,
					   const void __user *from,
					   unsigned long size);
unsigned long copy_in_user_fixup(void __user *to, void __user *from,
				 unsigned long size);
static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size)
{
	unsigned long ret = ___copy_in_user(to, from, size);

	if (unlikely(ret))
		ret = copy_in_user_fixup(to, from, size);
	return ret;
	return ___copy_in_user(to, from, size);
}
#define __copy_in_user copy_in_user

Loading