Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13d2a83d authored by Treehugger Robot's avatar Treehugger Robot Committed by Automerger Merge Worker
Browse files

Merge "Collect Parcel statistics using std::atomics." am: fa05b521 am: 34a56502

Original change: https://android-review.googlesource.com/c/platform/frameworks/native/+/1479937

Change-Id: I10ae1d36de0e8cfec8044171131cdbbf34e5b814
parents 391a495c 34a56502
Loading
Loading
Loading
Loading
+12 −31
Original line number Diff line number Diff line
@@ -77,9 +77,8 @@ namespace android {
// many things compile this into prebuilts on the stack
static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120);

static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
static size_t gParcelGlobalAllocSize = 0;
static size_t gParcelGlobalAllocCount = 0;
static std::atomic<size_t> gParcelGlobalAllocCount;
static std::atomic<size_t> gParcelGlobalAllocSize;

static size_t gMaxFds = 0;

@@ -275,17 +274,11 @@ Parcel::~Parcel()
}

size_t Parcel::getGlobalAllocSize() {
    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
    size_t size = gParcelGlobalAllocSize;
    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
    return size;
    return gParcelGlobalAllocSize.load();
}

size_t Parcel::getGlobalAllocCount() {
    pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
    size_t count = gParcelGlobalAllocCount;
    pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
    return count;
    return gParcelGlobalAllocCount.load();
}

const uint8_t* Parcel::data() const
@@ -2630,16 +2623,8 @@ void Parcel::freeDataNoInit()
        releaseObjects();
        if (mData) {
            LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
            pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
            if (mDataCapacity <= gParcelGlobalAllocSize) {
              gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
            } else {
              gParcelGlobalAllocSize = 0;
            }
            if (gParcelGlobalAllocCount > 0) {
            gParcelGlobalAllocSize -= mDataCapacity;
            gParcelGlobalAllocCount--;
            }
            pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
            free(mData);
        }
        if (mObjects) free(mObjects);
@@ -2685,13 +2670,15 @@ status_t Parcel::restartWrite(size_t desired)

    if (data) {
        LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
        gParcelGlobalAllocSize += desired;
        gParcelGlobalAllocSize -= mDataCapacity;
        if (mDataCapacity > desired) {
            gParcelGlobalAllocSize -= (mDataCapacity - desired);
        } else {
            gParcelGlobalAllocSize += (desired - mDataCapacity);
        }

        if (!mData) {
            gParcelGlobalAllocCount++;
        }
        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
        mData = data;
        mDataCapacity = desired;
    }
@@ -2779,10 +2766,8 @@ status_t Parcel::continueWrite(size_t desired)
        mOwner = nullptr;

        LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
        gParcelGlobalAllocSize += desired;
        gParcelGlobalAllocCount++;
        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);

        mData = data;
        mObjects = objects;
@@ -2830,10 +2815,8 @@ status_t Parcel::continueWrite(size_t desired)
            if (data) {
                LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
                        desired);
                pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
                gParcelGlobalAllocSize += desired;
                gParcelGlobalAllocSize -= mDataCapacity;
                pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
                mData = data;
                mDataCapacity = desired;
            } else {
@@ -2865,10 +2848,8 @@ status_t Parcel::continueWrite(size_t desired)
        }

        LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
        pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
        gParcelGlobalAllocSize += desired;
        gParcelGlobalAllocCount++;
        pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);

        mData = data;
        mDataSize = mDataPos = 0;