Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9793a8e0 authored by Elie Kheirallah's avatar Elie Kheirallah Committed by Automerger Merge Worker
Browse files

Merge "Fix for flaky test in binderLibTest" am: 1d06201d am: b8c51a24 am:...

Merge "Fix for flaky test in binderLibTest" am: 1d06201d am: b8c51a24 am: 3d47dfdc am: fee63697 am: 9ac519f8

Original change: https://android-review.googlesource.com/c/platform/frameworks/native/+/2109451



Change-Id: I1565e638d11a29c7cc4f406fe3a94c1749342fba
Signed-off-by: default avatarAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
parents c090ab42 9ac519f8
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -35,14 +35,15 @@

#include <errno.h>
#include <fcntl.h>
#include <mutex>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <mutex>

#define BINDER_VM_SIZE ((1 * 1024 * 1024) - sysconf(_SC_PAGE_SIZE) * 2)
#define DEFAULT_MAX_BINDER_THREADS 15
@@ -399,7 +400,9 @@ void ProcessState::spawnPooledThread(bool isMain)
        ALOGV("Spawning new pooled thread, name=%s\n", name.string());
        sp<Thread> t = sp<PoolThread>::make(isMain);
        t->run(name.string());
        pthread_mutex_lock(&mThreadCountLock);
        mKernelStartedThreads++;
        pthread_mutex_unlock(&mThreadCountLock);
    }
}

+5 −0
Original line number Diff line number Diff line
@@ -83,5 +83,10 @@
    {
      "name": "rustBinderSerializationTest"
    }
  ],
 "hwasan-presubmit": [
    {
      "name": "binderLibTest"
    }
  ]
}
+10 −9
Original line number Diff line number Diff line
@@ -1280,7 +1280,7 @@ TEST_F(BinderLibTest, ThreadPoolAvailableThreads) {
                StatusEq(NO_ERROR));
    replyi = reply.readInt32();
    // No more than 16 threads should exist.
    EXPECT_EQ(replyi, kKernelThreads + 1);
    EXPECT_TRUE(replyi == kKernelThreads || replyi == kKernelThreads + 1);
}

size_t epochMillis() {
@@ -1726,11 +1726,11 @@ public:
                return NO_ERROR;
            }
            case BINDER_LIB_TEST_PROCESS_LOCK: {
                blockMutex.lock();
                m_blockMutex.lock();
                return NO_ERROR;
            }
            case BINDER_LIB_TEST_LOCK_UNLOCK: {
                std::lock_guard<std::mutex> _l(blockMutex);
                std::lock_guard<std::mutex> _l(m_blockMutex);
                return NO_ERROR;
            }
            case BINDER_LIB_TEST_UNLOCK_AFTER_MS: {
@@ -1738,10 +1738,11 @@ public:
                return unlockInMs(ms);
            }
            case BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK: {
                blockMutex.lock();
                std::thread t([&] {
                    unlockInMs(data.readInt32());
                }); // start local thread to unlock in 1s
                m_blockMutex.lock();
                sp<BinderLibTestService> thisService = this;
                int32_t value = data.readInt32();
                // start local thread to unlock in 1s
                std::thread t([=] { thisService->unlockInMs(value); });
                t.detach();
                return NO_ERROR;
            }
@@ -1752,7 +1753,7 @@ public:

    status_t unlockInMs(int32_t ms) {
        usleep(ms * 1000);
        blockMutex.unlock();
        m_blockMutex.unlock();
        return NO_ERROR;
    }

@@ -1766,7 +1767,7 @@ private:
    sp<IBinder> m_strongRef;
    sp<IBinder> m_callback;
    bool m_exitOnDestroy;
    std::mutex blockMutex;
    std::mutex m_blockMutex;
};

int run_server(int index, int readypipefd, bool usePoll)