Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0e60b20 authored by David Gibson's avatar David Gibson Committed by Paul Mackerras
Browse files

[PATCH] powerpc: Merge bitops.h



Here's a revised version.  This re-introduces the set_bits() function
from ppc64, which I removed because I thought it was unused (it exists
on no other arch).  In fact it is used in the powermac interrupt code
(but not on pSeries).

- We use LARXL/STCXL macros to generate the right (32 or 64 bit)
  instructions, similar to LDL/STL from ppc_asm.h, used in fpu.S

- ppc32 previously used a full "sync" barrier at the end of
  test_and_*_bit(), whereas ppc64 used an "isync".  The merged version
  uses "isync", since I believe that's sufficient.

- The ppc64 versions of then minix_*() bitmap functions have changed
  semantics.  Previously on ppc64, these functions were big-endian
  (that is bit 0 was the LSB in the first 64-bit, big-endian word).
  On ppc32 (and x86, for that matter, they were little-endian.  As far
  as I can tell, the big-endian usage was simply wrong - I guess
  no-one ever tried to use minixfs on ppc64.

- On ppc32 find_next_bit() and find_next_zero_bit() are no longer
  inline (they were already out-of-line on ppc64).

- For ppc64, sched_find_first_bit() has moved from mmu_context.h to
  the merged bitops.  What it was doing in mmu_context.h in the first
  place, I have no idea.

- The fls() function is now implemented using the cntlzw instruction
  on ppc64, instead of generic_fls(), as it already was on ppc32.

- For ARCH=ppc, this patch requires adding arch/powerpc/lib to the
  arch/ppc/Makefile.  This in turn requires some changes to
  arch/powerpc/lib/Makefile which didn't correctly handle ARCH=ppc.

Built and running on G5.

Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 031ef0a7
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -81,15 +81,6 @@ EXPORT_SYMBOL(_prep_type);
EXPORT_SYMBOL(ucSystemType);
#endif

#if !defined(__INLINE_BITOPS)
EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(test_and_set_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(test_and_change_bit);
#endif /* __INLINE_BITOPS */

EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
+5 −4
Original line number Diff line number Diff line
@@ -3,13 +3,14 @@
#

ifeq ($(CONFIG_PPC_MERGE),y)
obj-y			:= string.o
obj-y			:= string.o strcase.o
obj-$(CONFIG_PPC32)	+= div64.o copy_32.o checksum_32.o
endif

obj-y			+= strcase.o
obj-$(CONFIG_PPC32)	+= div64.o copy_32.o checksum_32.o
obj-y			+= bitops.o
obj-$(CONFIG_PPC64)	+= checksum_64.o copypage_64.o copyuser_64.o \
			   memcpy_64.o usercopy_64.o mem_64.o string.o
			   memcpy_64.o usercopy_64.o mem_64.o string.o \
			   strcase.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o
obj-$(CONFIG_XMON)	+= sstep.o

+50 −47
Original line number Diff line number Diff line
/*
 * These are too big to be inlined.
 */

#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/bitops.h>

unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
/**
 * find_next_bit - find the next set bit in a memory region
 * @addr: The address to base the search on
 * @offset: The bitnumber to start searching at
 * @size: The maximum size to search
 */
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
			    unsigned long offset)
{
	const unsigned long *p = addr + (offset >> 6);
	unsigned long result = offset & ~63UL;
	const unsigned long *p = addr + BITOP_WORD(offset);
	unsigned long result = offset & ~(BITS_PER_LONG-1);
	unsigned long tmp;

	if (offset >= size)
		return size;
	size -= result;
	offset &= 63UL;
	offset %= BITS_PER_LONG;
	if (offset) {
		tmp = *(p++);
		tmp |= ~0UL >> (64 - offset);
		if (size < 64)
		tmp &= (~0UL << offset);
		if (size < BITS_PER_LONG)
			goto found_first;
		if (~tmp)
		if (tmp)
			goto found_middle;
		size -= 64;
		result += 64;
		size -= BITS_PER_LONG;
		result += BITS_PER_LONG;
	}
	while (size & ~63UL) {
		if (~(tmp = *(p++)))
	while (size & ~(BITS_PER_LONG-1)) {
		if ((tmp = *(p++)))
			goto found_middle;
		result += 64;
		size -= 64;
		result += BITS_PER_LONG;
		size -= BITS_PER_LONG;
	}
	if (!size)
		return result;
	tmp = *p;

found_first:
	tmp |= ~0UL << size;
	if (tmp == ~0UL)	/* Are any bits zero? */
	tmp &= (~0UL >> (64 - size));
	if (tmp == 0UL)		/* Are any bits set? */
		return result + size;	/* Nope. */
found_middle:
	return result + ffz(tmp);
	return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_next_bit);

EXPORT_SYMBOL(find_next_zero_bit);

unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
/*
 * This implementation of find_{first,next}_zero_bit was stolen from
 * Linus' asm-alpha/bitops.h.
 */
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
				 unsigned long offset)
{
	const unsigned long *p = addr + (offset >> 6);
	unsigned long result = offset & ~63UL;
	const unsigned long *p = addr + BITOP_WORD(offset);
	unsigned long result = offset & ~(BITS_PER_LONG-1);
	unsigned long tmp;

	if (offset >= size)
		return size;
	size -= result;
	offset &= 63UL;
	offset %= BITS_PER_LONG;
	if (offset) {
		tmp = *(p++);
		tmp &= (~0UL << offset);
		if (size < 64)
		tmp |= ~0UL >> (BITS_PER_LONG - offset);
		if (size < BITS_PER_LONG)
			goto found_first;
		if (tmp)
		if (~tmp)
			goto found_middle;
		size -= 64;
		result += 64;
		size -= BITS_PER_LONG;
		result += BITS_PER_LONG;
	}
	while (size & ~63UL) {
		if ((tmp = *(p++)))
	while (size & ~(BITS_PER_LONG-1)) {
		if (~(tmp = *(p++)))
			goto found_middle;
		result += 64;
		size -= 64;
		result += BITS_PER_LONG;
		size -= BITS_PER_LONG;
	}
	if (!size)
		return result;
	tmp = *p;

found_first:
	tmp &= (~0UL >> (64 - size));
	if (tmp == 0UL)		/* Are any bits set? */
	tmp |= ~0UL << size;
	if (tmp == ~0UL)	/* Are any bits zero? */
		return result + size;	/* Nope. */
found_middle:
	return result + __ffs(tmp);
	return result + ffz(tmp);
}

EXPORT_SYMBOL(find_next_bit);
EXPORT_SYMBOL(find_next_zero_bit);

static inline unsigned int ext2_ilog2(unsigned int x)
{
@@ -106,8 +110,8 @@ static inline unsigned int ext2_ffz(unsigned int x)
	return rc;
}

unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size,
				    unsigned long offset)
unsigned long find_next_zero_le_bit(const unsigned long *addr,
				    unsigned long size, unsigned long offset)
{
	const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5);
	unsigned int result = offset & ~31;
@@ -143,5 +147,4 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long siz
found_middle:
	return result + ext2_ffz(tmp);
}

EXPORT_SYMBOL(find_next_zero_le_bit);
+2 −1
Original line number Diff line number Diff line
@@ -66,7 +66,8 @@ head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
core-y				+= arch/ppc/kernel/ arch/powerpc/kernel/ \
				   arch/ppc/platforms/ \
				   arch/ppc/mm/ arch/ppc/lib/ \
				   arch/ppc/syslib/ arch/powerpc/sysdev/
				   arch/ppc/syslib/ arch/powerpc/sysdev/ \
				   arch/powerpc/lib/
core-$(CONFIG_4xx)		+= arch/ppc/platforms/4xx/
core-$(CONFIG_83xx)		+= arch/ppc/platforms/83xx/
core-$(CONFIG_85xx)		+= arch/ppc/platforms/85xx/

arch/ppc/kernel/bitops.c

deleted100644 → 0
+0 −126
Original line number Diff line number Diff line
/*
 * Copyright (C) 1996 Paul Mackerras.
 */

#include <linux/kernel.h>
#include <linux/bitops.h>

/*
 * If the bitops are not inlined in bitops.h, they are defined here.
 *  -- paulus
 */
#if !__INLINE_BITOPS
void set_bit(int nr, volatile void * addr)
{
	unsigned long old;
	unsigned long mask = 1 << (nr & 0x1f);
	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
	
	__asm__ __volatile__(SMP_WMB "\n\
1:	lwarx	%0,0,%3 \n\
	or	%0,%0,%2 \n"
	PPC405_ERR77(0,%3)
"	stwcx.	%0,0,%3 \n\
	bne	1b"
	SMP_MB
	: "=&r" (old), "=m" (*p)
	: "r" (mask), "r" (p), "m" (*p)
	: "cc" );
}

void clear_bit(int nr, volatile void *addr)
{
	unsigned long old;
	unsigned long mask = 1 << (nr & 0x1f);
	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);

	__asm__ __volatile__(SMP_WMB "\n\
1:	lwarx	%0,0,%3 \n\
	andc	%0,%0,%2 \n"
	PPC405_ERR77(0,%3)
"	stwcx.	%0,0,%3 \n\
	bne	1b"
	SMP_MB
	: "=&r" (old), "=m" (*p)
	: "r" (mask), "r" (p), "m" (*p)
	: "cc");
}

void change_bit(int nr, volatile void *addr)
{
	unsigned long old;
	unsigned long mask = 1 << (nr & 0x1f);
	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);

	__asm__ __volatile__(SMP_WMB "\n\
1:	lwarx	%0,0,%3 \n\
	xor	%0,%0,%2 \n"
	PPC405_ERR77(0,%3)
"	stwcx.	%0,0,%3 \n\
	bne	1b"
	SMP_MB
	: "=&r" (old), "=m" (*p)
	: "r" (mask), "r" (p), "m" (*p)
	: "cc");
}

int test_and_set_bit(int nr, volatile void *addr)
{
	unsigned int old, t;
	unsigned int mask = 1 << (nr & 0x1f);
	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);

	__asm__ __volatile__(SMP_WMB "\n\
1:	lwarx	%0,0,%4 \n\
	or	%1,%0,%3 \n"
	PPC405_ERR77(0,%4)
"	stwcx.	%1,0,%4 \n\
	bne	1b"
	SMP_MB
	: "=&r" (old), "=&r" (t), "=m" (*p)
	: "r" (mask), "r" (p), "m" (*p)
	: "cc");

	return (old & mask) != 0;
}

int test_and_clear_bit(int nr, volatile void *addr)
{
	unsigned int old, t;
	unsigned int mask = 1 << (nr & 0x1f);
	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);

	__asm__ __volatile__(SMP_WMB "\n\
1:	lwarx	%0,0,%4 \n\
	andc	%1,%0,%3 \n"
	PPC405_ERR77(0,%4)
"	stwcx.	%1,0,%4 \n\
	bne	1b"
	SMP_MB
	: "=&r" (old), "=&r" (t), "=m" (*p)
	: "r" (mask), "r" (p), "m" (*p)
	: "cc");

	return (old & mask) != 0;
}

int test_and_change_bit(int nr, volatile void *addr)
{
	unsigned int old, t;
	unsigned int mask = 1 << (nr & 0x1f);
	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);

	__asm__ __volatile__(SMP_WMB "\n\
1:	lwarx	%0,0,%4 \n\
	xor	%1,%0,%3 \n"
	PPC405_ERR77(0,%4)
"	stwcx.	%1,0,%4 \n\
	bne	1b"
	SMP_MB
	: "=&r" (old), "=&r" (t), "=m" (*p)
	: "r" (mask), "r" (p), "m" (*p)
	: "cc");

	return (old & mask) != 0;
}
#endif /* !__INLINE_BITOPS */
Loading