Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bc248f9 authored by Andy McFadden's avatar Andy McFadden Committed by Android Git Automerger
Browse files

am ac322da6: Atomic/SMP update.

parents 2bdcf63c ac322da6
Loading
Loading
Loading
Loading
+101 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2010 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_CUTILS_ATOMIC_INLINE_H
#define ANDROID_CUTILS_ATOMIC_INLINE_H

/*
 * Inline declarations and macros for some special-purpose atomic
 * operations.  These are intended for rare circumstances where a
 * memory barrier needs to be issued inline rather than as a function
 * call.
 *
 * Most code should not use these.
 *
 * Anything that does include this file must set ANDROID_SMP to either
 * 0 or 1, indicating compilation for UP or SMP, respectively.
 */

#if !defined(ANDROID_SMP)
# error "Must define ANDROID_SMP before including atomic-inline.h"
#endif

#ifdef __cplusplus
extern "C" {
#endif

/*
 * Define the full memory barrier for an SMP system.  This is
 * platform-specific.
 */

#ifdef __arm__
#include <machine/cpu-features.h>

/*
 * For ARMv6K we need to issue a specific MCR instead of the DMB, since
 * that wasn't added until v7.  For anything older, SMP isn't relevant.
 * Since we don't have an ARMv6K to test with, we're not going to deal
 * with that now.
 *
 * The DMB instruction is found in the ARM and Thumb2 instruction sets.
 * This will fail on plain 16-bit Thumb.
 */
#if defined(__ARM_HAVE_DMB)
# define __android_membar_full_smp() \
    do { __asm__ __volatile__ ("dmb" ::: "memory"); } while (0)
#else
# define __android_membar_full_smp()  ARM_SMP_defined_but_no_DMB()
#endif

#elif defined(__i386__) || defined(__x86_64__)
/*
 * For recent x86, we can use the SSE2 mfence instruction.
 */
# define __android_membar_full_smp() \
    do { __asm__ __volatile__ ("mfence" ::: "memory"); } while (0)

#else
/*
 * Implementation not defined for this platform.  Hopefully we're building
 * in uniprocessor mode.
 */
# define __android_membar_full_smp()  SMP_barrier_not_defined_for_platform()
#endif


/*
 * Full barrier.  On uniprocessors this is just a compiler reorder barrier,
 * which ensures that the statements appearing above the barrier in the C/C++
 * code will be issued after the statements appearing below the barrier.
 *
 * For SMP this also includes a memory barrier instruction.  On an ARM
 * CPU this means that the current core will flush pending writes, wait
 * for pending reads to complete, and discard any cached reads that could
 * be stale.  Other CPUs may do less, but the end result is equivalent.
 */
#if ANDROID_SMP != 0
# define android_membar_full() __android_membar_full_smp()
#else
# define android_membar_full() \
    do { __asm__ __volatile__ ("" ::: "memory"); } while (0)
#endif

#ifdef __cplusplus
} // extern "C"
#endif

#endif // ANDROID_CUTILS_ATOMIC_INLINE_H
+21 −22
Original line number Diff line number Diff line
@@ -25,10 +25,8 @@ extern "C" {
#endif

/*
 * NOTE: memory shared between threads is synchronized by all atomic operations
 * below, this means that no explicit memory barrier is required: all reads or 
 * writes issued before android_atomic_* operations are guaranteed to complete
 * before the atomic operation takes place.
 * Unless otherwise noted, the operations below perform a full fence before
 * the atomic operation on SMP systems ("release" semantics).
 */

void android_atomic_write(int32_t value, volatile int32_t* addr);
@@ -37,7 +35,6 @@ void android_atomic_write(int32_t value, volatile int32_t* addr);
 * all these atomic operations return the previous value
 */


int32_t android_atomic_inc(volatile int32_t* addr);
int32_t android_atomic_dec(volatile int32_t* addr);

@@ -48,29 +45,31 @@ int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
int32_t android_atomic_swap(int32_t value, volatile int32_t* addr);

/*
 * NOTE: Two "quasiatomic" operations on the exact same memory address
 * are guaranteed to operate atomically with respect to each other,
 * but no guarantees are made about quasiatomic operations mixed with
 * non-quasiatomic operations on the same address, nor about
 * quasiatomic operations that are performed on partially-overlapping
 * memory.
 * cmpxchg returns zero if the new value was successfully written.  This
 * will only happen when *addr == oldvalue.
 *
 * (The return value is inverted from implementations on other platforms, but
 * matches the ARM ldrex/strex sematics.  Note also this is a compare-and-set
 * operation, not a compare-and-exchange operation, since we don't return
 * the original value.)
 */

int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr);
int64_t android_quasiatomic_read_64(volatile int64_t* addr);
int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue,
        volatile int32_t* addr);

/*
 * cmpxchg return a non zero value if the exchange was NOT performed,
 * in other words if oldvalue != *addr
 * Same basic operation as android_atomic_cmpxchg, but with "acquire"
 * semantics.  The memory barrier, if required, is performed after the
 * new value is stored.  Useful for acquiring a spin lock.
 */

int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue,
int android_atomic_acquire_cmpxchg(int32_t oldvalue, int32_t newvalue,
        volatile int32_t* addr);

int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
        volatile int64_t* addr);


/*
 * Perform an atomic store with "release" semantics.  The memory barrier,
 * if required, is performed before the store instruction.  Useful for
 * releasing a spin lock.
 */
#define android_atomic_release_store android_atomic_write

#ifdef __cplusplus
} // extern "C"
+11 −0
Original line number Diff line number Diff line
@@ -16,6 +16,13 @@
LOCAL_PATH := $(my-dir)
include $(CLEAR_VARS)

ifeq ($(TARGET_CPU_SMP),true)
    targetSmpFlag := -DANDROID_SMP=1
else
    targetSmpFlag := -DANDROID_SMP=0
endif
hostSmpFlag := -DANDROID_SMP=0

commonSources := \
	array.c \
	hashmap.c \
@@ -80,6 +87,7 @@ LOCAL_MODULE := libcutils
LOCAL_SRC_FILES := $(commonSources) $(commonHostSources)
LOCAL_LDLIBS := -lpthread
LOCAL_STATIC_LIBRARIES := liblog
LOCAL_CFLAGS += $(hostSmpFlag)
include $(BUILD_HOST_STATIC_LIBRARY)


@@ -92,6 +100,7 @@ LOCAL_MODULE := libcutils
LOCAL_SRC_FILES := $(commonSources) $(commonHostSources) memory.c dlmalloc_stubs.c
LOCAL_LDLIBS := -lpthread
LOCAL_SHARED_LIBRARIES := liblog
LOCAL_CFLAGS += $(targetSmpFlag)
include $(BUILD_SHARED_LIBRARY)

else #!sim
@@ -114,12 +123,14 @@ endif # !arm

LOCAL_C_INCLUDES := $(KERNEL_HEADERS)
LOCAL_STATIC_LIBRARIES := liblog
LOCAL_CFLAGS += $(targetSmpFlag)
include $(BUILD_STATIC_LIBRARY)

include $(CLEAR_VARS)
LOCAL_MODULE := libcutils
LOCAL_WHOLE_STATIC_LIBRARIES := libcutils
LOCAL_SHARED_LIBRARIES := liblog
LOCAL_CFLAGS += $(targetSmpFlag)
include $(BUILD_SHARED_LIBRARY)

endif #!sim
+6 −8
Original line number Diff line number Diff line
@@ -14,6 +14,8 @@
 * limitations under the License.
 */

/* TODO: insert memory barriers on SMP */

#include <machine/cpu-features.h>

/*
@@ -43,6 +45,8 @@
	
	.global android_atomic_cmpxchg
	.type android_atomic_cmpxchg, %function
	.global android_atomic_acquire_cmpxchg
	.type android_atomic_acquire_cmpxchg, %function

/*
 * ----------------------------------------------------------------------------
@@ -237,7 +241,7 @@ android_atomic_or:

/* replaced swp instruction with ldrex/strex for ARMv6 & ARMv7 */
android_atomic_swap:
#if defined (_ARM_HAVE_LDREX_STREX)
#if defined (__ARM_HAVE_LDREX_STREX)
1:  ldrex   r2, [r1]
    strex   r3, r0, [r1]
    teq     r3, #0
@@ -256,6 +260,7 @@ android_atomic_swap:
 * output: r0 = 0 (xchg done) or non-zero (xchg not done)
 */

android_atomic_acquire_cmpxchg:
android_atomic_cmpxchg:
    .fnstart
    .save {r4, lr}
@@ -282,10 +287,3 @@ android_atomic_cmpxchg:
    bx      lr
    .fnend
/*
 * ----------------------------------------------------------------------------
 * android_atomic_cmpxchg_64
 * input: r0-r1=oldvalue, r2-r3=newvalue, arg4 (on stack)=address
 * output: r0 = 0 (xchg done) or non-zero (xchg not done)
 */
/* TODO: NEED IMPLEMENTATION FOR THIS ARCHITECTURE */
+3 −37
Original line number Diff line number Diff line
@@ -118,42 +118,8 @@ int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue,
    return result;
}

int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
    int64_t oldValue;
    pthread_mutex_t*  lock = SWAP_LOCK(addr);

    pthread_mutex_lock(lock);

    oldValue = *addr;
    *addr    = value;

    pthread_mutex_unlock(lock);
    return oldValue;
}

int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
        volatile int64_t* addr) {
    int result;
    pthread_mutex_t*  lock = SWAP_LOCK(addr);

    pthread_mutex_lock(lock);

    if (*addr == oldvalue) {
        *addr  = newvalue;
        result = 0;
    } else {
        result = 1;
    }
    pthread_mutex_unlock(lock);
    return result;
int android_atomic_acquire_cmpxchg(int32_t oldvalue, int32_t newvalue,
                           volatile int32_t* addr) {
    return android_atomic_cmpxchg(oldValue, newValue, addr);
}
int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
    int64_t result;
    pthread_mutex_t*  lock = SWAP_LOCK(addr);

    pthread_mutex_lock(lock);
    result = *addr;
    pthread_mutex_unlock(lock);
    return result;
}
Loading