Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af4ca715 authored by Steven Moreland's avatar Steven Moreland
Browse files

binderRpcTest: use waitpid

Actually reap child processes. This gives us stronger guarantees (that
everything can shut down) and it avoids 'kill'.

Bug: 186661301
Test: binderRpcTest
Change-Id: If10f00de845eb8097813b4edbf8e2b8ffdc90c5f
parent f5174273
Loading
Loading
Loading
Loading
+6 −4
Original line number Original line Diff line number Diff line
@@ -193,10 +193,12 @@ bool RpcServer::shutdown() {


    mShutdownTrigger->trigger();
    mShutdownTrigger->trigger();
    while (mJoinThreadRunning || !mConnectingThreads.empty() || !mSessions.empty()) {
    while (mJoinThreadRunning || !mConnectingThreads.empty() || !mSessions.empty()) {
        ALOGI("Waiting for RpcServer to shut down. Join thread running: %d, Connecting threads: "
        if (std::cv_status::timeout == mShutdownCv.wait_for(_l, std::chrono::seconds(1))) {
              "%zu, Sessions: %zu",
            ALOGE("Waiting for RpcServer to shut down (1s w/o progress). Join thread running: %d, "
                  "Connecting threads: "
                  "%zu, Sessions: %zu. Is your server deadlocked?",
                  mJoinThreadRunning, mConnectingThreads.size(), mSessions.size());
                  mJoinThreadRunning, mConnectingThreads.size(), mSessions.size());
        mShutdownCv.wait(_l);
        }
    }
    }


    // At this point, we know join() is about to exit, but the thread that calls
    // At this point, we know join() is about to exit, but the thread that calls
+2 −1
Original line number Original line Diff line number Diff line
@@ -199,7 +199,8 @@ void RpcSession::join(unique_fd client) {
                state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));
                state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));


        if (error != OK) {
        if (error != OK) {
            ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str());
            LOG_RPC_DETAIL("Binder connection thread closing w/ status %s",
                           statusToString(error).c_str());
            break;
            break;
        }
        }
    }
    }
+5 −2
Original line number Original line Diff line number Diff line
@@ -239,8 +239,11 @@ bool RpcState::rpcRec(const base::unique_fd& fd, const sp<RpcSession>& session,


    if (status_t status = session->mShutdownTrigger->interruptableReadFully(fd.get(), data, size);
    if (status_t status = session->mShutdownTrigger->interruptableReadFully(fd.get(), data, size);
        status != OK) {
        status != OK) {
        if (status != -ECANCELED) {
            ALOGE("Failed to read %s (%zu bytes) on fd %d, error: %s", what, size, fd.get(),
            ALOGE("Failed to read %s (%zu bytes) on fd %d, error: %s", what, size, fd.get(),
                  statusToString(status).c_str());
                  statusToString(status).c_str());
        }

        return false;
        return false;
    }
    }


+1 −0
Original line number Original line Diff line number Diff line
@@ -55,6 +55,7 @@ interface IBinderRpcTest {
    oneway void sleepMsAsync(int ms);
    oneway void sleepMsAsync(int ms);


    void die(boolean cleanup);
    void die(boolean cleanup);
    void scheduleShutdown();


    void useKernelBinderCallingId();
    void useKernelBinderCallingId();
}
}
+24 −14
Original line number Original line Diff line number Diff line
@@ -194,6 +194,18 @@ public:
            _exit(1);
            _exit(1);
        }
        }
    }
    }

    Status scheduleShutdown() override {
        sp<RpcServer> strongServer = server.promote();
        if (strongServer == nullptr) {
            return Status::fromExceptionCode(Status::EX_NULL_POINTER);
        }
        std::thread([=] {
            LOG_ALWAYS_FATAL_IF(!strongServer->shutdown(), "Could not shutdown");
        }).detach();
        return Status::ok();
    }

    Status useKernelBinderCallingId() override {
    Status useKernelBinderCallingId() override {
        // this is WRONG! It does not make sense when using RPC binder, and
        // this is WRONG! It does not make sense when using RPC binder, and
        // because it is SO wrong, and so much code calls this, it should abort!
        // because it is SO wrong, and so much code calls this, it should abort!
@@ -225,11 +237,13 @@ public:
            prctl(PR_SET_PDEATHSIG, SIGHUP);
            prctl(PR_SET_PDEATHSIG, SIGHUP);


            f(&mPipe);
            f(&mPipe);

            exit(0);
        }
        }
    }
    }
    ~Process() {
    ~Process() {
        if (mPid != 0) {
        if (mPid != 0) {
            kill(mPid, SIGKILL);
            waitpid(mPid, nullptr, 0);
        }
        }
    }
    }
    Pipe* getPipe() { return &mPipe; }
    Pipe* getPipe() { return &mPipe; }
@@ -290,11 +304,11 @@ struct BinderRpcTestProcessSession {
    sp<IBinderRpcTest> rootIface;
    sp<IBinderRpcTest> rootIface;


    // whether session should be invalidated by end of run
    // whether session should be invalidated by end of run
    bool expectInvalid = false;
    bool expectAlreadyShutdown = false;


    BinderRpcTestProcessSession(BinderRpcTestProcessSession&&) = default;
    BinderRpcTestProcessSession(BinderRpcTestProcessSession&&) = default;
    ~BinderRpcTestProcessSession() {
    ~BinderRpcTestProcessSession() {
        if (!expectInvalid) {
        if (!expectAlreadyShutdown) {
            std::vector<int32_t> remoteCounts;
            std::vector<int32_t> remoteCounts;
            // calling over any sessions counts across all sessions
            // calling over any sessions counts across all sessions
            EXPECT_OK(rootIface->countBinders(&remoteCounts));
            EXPECT_OK(rootIface->countBinders(&remoteCounts));
@@ -302,6 +316,8 @@ struct BinderRpcTestProcessSession {
            for (auto remoteCount : remoteCounts) {
            for (auto remoteCount : remoteCounts) {
                EXPECT_EQ(remoteCount, 1);
                EXPECT_EQ(remoteCount, 1);
            }
            }

            EXPECT_OK(rootIface->scheduleShutdown());
        }
        }


        rootIface = nullptr;
        rootIface = nullptr;
@@ -373,6 +389,9 @@ public:
                    configure(server);
                    configure(server);


                    server->join();
                    server->join();

                    // Another thread calls shutdown. Wait for it to complete.
                    (void)server->shutdown();
                }),
                }),
        };
        };


@@ -424,15 +443,6 @@ public:
    }
    }
};
};


TEST_P(BinderRpc, RootObjectIsNull) {
    auto proc = createRpcTestSocketServerProcess(1, 1, [](const sp<RpcServer>& server) {
        // this is the default, but to be explicit
        server->setRootObject(nullptr);
    });

    EXPECT_EQ(nullptr, proc.sessions.at(0).root);
}

TEST_P(BinderRpc, Ping) {
TEST_P(BinderRpc, Ping) {
    auto proc = createRpcTestSocketServerProcess(1);
    auto proc = createRpcTestSocketServerProcess(1);
    ASSERT_NE(proc.rootBinder, nullptr);
    ASSERT_NE(proc.rootBinder, nullptr);
@@ -900,7 +910,7 @@ TEST_P(BinderRpc, Die) {
        EXPECT_EQ(DEAD_OBJECT, proc.rootIface->die(doDeathCleanup).transactionError())
        EXPECT_EQ(DEAD_OBJECT, proc.rootIface->die(doDeathCleanup).transactionError())
                << "Do death cleanup: " << doDeathCleanup;
                << "Do death cleanup: " << doDeathCleanup;


        proc.expectInvalid = true;
        proc.expectAlreadyShutdown = true;
    }
    }
}
}


@@ -914,7 +924,7 @@ TEST_P(BinderRpc, UseKernelBinderCallingId) {
    // second time! we catch the error :)
    // second time! we catch the error :)
    EXPECT_EQ(DEAD_OBJECT, proc.rootIface->useKernelBinderCallingId().transactionError());
    EXPECT_EQ(DEAD_OBJECT, proc.rootIface->useKernelBinderCallingId().transactionError());


    proc.expectInvalid = true;
    proc.expectAlreadyShutdown = true;
}
}


TEST_P(BinderRpc, WorksWithLibbinderNdkPing) {
TEST_P(BinderRpc, WorksWithLibbinderNdkPing) {