Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82de44ff authored by Devin Moore's avatar Devin Moore Committed by Gerrit Code Review
Browse files

Merge "Add linkToDeath support for RPC binder so a client can act on disconnect"

parents db8f9c58 66d5b7ac
Loading
Loading
Loading
Loading
+39 −27
Original line number Diff line number Diff line
@@ -343,11 +343,23 @@ status_t BpBinder::transact(
status_t BpBinder::linkToDeath(
    const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags)
{
    if (isRpcBinder()) return UNKNOWN_TRANSACTION;

    if constexpr (!kEnableKernelIpc) {
    if (isRpcBinder()) {
        if (rpcSession()->getMaxIncomingThreads() < 1) {
            LOG_ALWAYS_FATAL("Cannot register a DeathRecipient without any incoming connections.");
            return INVALID_OPERATION;
        }
    } else if constexpr (!kEnableKernelIpc) {
        LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
        return INVALID_OPERATION;
    } else {
        if (ProcessState::self()->getThreadPoolMaxTotalThreadCount() == 0) {
            ALOGW("Linking to death on %s but there are no threads (yet?) listening to incoming "
                  "transactions. See ProcessState::startThreadPool and "
                  "ProcessState::setThreadPoolMaxThreadCount. Generally you should setup the "
                  "binder "
                  "threadpool before other initialization steps.",
                  String8(getInterfaceDescriptor()).c_str());
        }
    }

    Obituary ob;
@@ -358,14 +370,6 @@ status_t BpBinder::linkToDeath(
    LOG_ALWAYS_FATAL_IF(recipient == nullptr,
                        "linkToDeath(): recipient must be non-NULL");

    if (ProcessState::self()->getThreadPoolMaxTotalThreadCount() == 0) {
        ALOGW("Linking to death on %s but there are no threads (yet?) listening to incoming "
              "transactions. See ProcessState::startThreadPool and "
              "ProcessState::setThreadPoolMaxThreadCount. Generally you should setup the binder "
              "threadpool before other initialization steps.",
              String8(getInterfaceDescriptor()).c_str());
    }

    {
        AutoMutex _l(mLock);

@@ -376,11 +380,15 @@ status_t BpBinder::linkToDeath(
                    return NO_MEMORY;
                }
                ALOGV("Requesting death notification: %p handle %d\n", this, binderHandle());
                if (!isRpcBinder()) {
                    if constexpr (kEnableKernelIpc) {
                        getWeakRefs()->incWeak(this);
                        IPCThreadState* self = IPCThreadState::self();
                        self->requestDeathNotification(binderHandle(), this);
                        self->flushCommands();
                    }
                }
            }
            ssize_t res = mObituaries->add(ob);
            return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res;
        }
@@ -394,9 +402,7 @@ status_t BpBinder::unlinkToDeath(
    const wp<DeathRecipient>& recipient, void* cookie, uint32_t flags,
    wp<DeathRecipient>* outRecipient)
{
    if (isRpcBinder()) return UNKNOWN_TRANSACTION;

    if constexpr (!kEnableKernelIpc) {
    if (!kEnableKernelIpc && !isRpcBinder()) {
        LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
        return INVALID_OPERATION;
    }
@@ -419,9 +425,13 @@ status_t BpBinder::unlinkToDeath(
            mObituaries->removeAt(i);
            if (mObituaries->size() == 0) {
                ALOGV("Clearing death notification: %p handle %d\n", this, binderHandle());
                if (!isRpcBinder()) {
                    if constexpr (kEnableKernelIpc) {
                        IPCThreadState* self = IPCThreadState::self();
                        self->clearDeathNotification(binderHandle(), this);
                        self->flushCommands();
                    }
                }
                delete mObituaries;
                mObituaries = nullptr;
            }
@@ -434,9 +444,7 @@ status_t BpBinder::unlinkToDeath(

void BpBinder::sendObituary()
{
    LOG_ALWAYS_FATAL_IF(isRpcBinder(), "Cannot send obituary for remote binder.");

    if constexpr (!kEnableKernelIpc) {
    if (!kEnableKernelIpc && !isRpcBinder()) {
        LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
        return;
    }
@@ -451,9 +459,13 @@ void BpBinder::sendObituary()
    Vector<Obituary>* obits = mObituaries;
    if(obits != nullptr) {
        ALOGV("Clearing sent death notification: %p handle %d\n", this, binderHandle());
        if (!isRpcBinder()) {
            if constexpr (kEnableKernelIpc) {
                IPCThreadState* self = IPCThreadState::self();
                self->clearDeathNotification(binderHandle(), this);
                self->flushCommands();
            }
        }
        mObituaries = nullptr;
    }
    mObitsSent = 1;
+5 −0
Original line number Diff line number Diff line
@@ -223,6 +223,11 @@ bool RpcSession::shutdownAndWait(bool wait) {

    _l.unlock();

    if (status_t res = state()->sendObituaries(sp<RpcSession>::fromExisting(this)); res != OK) {
        ALOGE("Failed to send obituaries as the RpcSession is shutting down: %s",
              statusToString(res).c_str());
    }

    mRpcBinderState->clear();

    return true;
+24 −0
Original line number Diff line number Diff line
@@ -226,6 +226,30 @@ status_t RpcState::flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t
    return OK;
}

status_t RpcState::sendObituaries(const sp<RpcSession>& session) {
    RpcMutexUniqueLock _l(mNodeMutex);

    // Gather strong pointers to all of the remote binders for this session so
    // we hold the strong references. remoteBinder() returns a raw pointer.
    // Send the obituaries and drop the strong pointers outside of the lock so
    // the destructors and the onBinderDied calls are not done while locked.
    std::vector<sp<IBinder>> remoteBinders;
    for (const auto& [_, binderNode] : mNodeForAddress) {
        if (auto binder = binderNode.binder.promote()) {
            remoteBinders.push_back(std::move(binder));
        }
    }
    _l.unlock();

    for (const auto& binder : remoteBinders) {
        if (binder->remoteBinder() &&
            binder->remoteBinder()->getPrivateAccessor().rpcSession() == session) {
            binder->remoteBinder()->sendObituary();
        }
    }
    return OK;
}

size_t RpcState::countBinders() {
    RpcMutexLockGuard _l(mNodeMutex);
    return mNodeForAddress.size();
+5 −0
Original line number Diff line number Diff line
@@ -140,6 +140,11 @@ public:
     */
    [[nodiscard]] status_t flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
                                                 const sp<IBinder>& binder);
    /**
     * Called when the RpcSession is shutdown.
     * Send obituaries for each known remote binder with this session.
     */
    [[nodiscard]] status_t sendObituaries(const sp<RpcSession>& session);

    size_t countBinders();
    void dump();
+120 −1
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ TEST(BinderRpcParcel, EntireParcelFormatted) {
    Parcel p;
    p.writeInt32(3);

    EXPECT_DEATH(p.markForBinder(sp<BBinder>::make()), "");
    EXPECT_DEATH(p.markForBinder(sp<BBinder>::make()), "format must be set before data is written");
}

class BinderRpcServerOnly : public ::testing::TestWithParam<std::tuple<RpcSecurity, uint32_t>> {
@@ -1035,6 +1035,125 @@ TEST_P(BinderRpc, Callbacks) {
    }
}

TEST_P(BinderRpc, SingleDeathRecipient) {
    if (singleThreaded() || !kEnableRpcThreads) {
        GTEST_SKIP() << "This test requires multiple threads";
    }
    class MyDeathRec : public IBinder::DeathRecipient {
    public:
        void binderDied(const wp<IBinder>& /* who */) override {
            dead = true;
            mCv.notify_one();
        }
        std::mutex mMtx;
        std::condition_variable mCv;
        bool dead = false;
    };

    // Death recipient needs to have an incoming connection to be called
    auto proc = createRpcTestSocketServerProcess(
            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});

    auto dr = sp<MyDeathRec>::make();
    ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));

    if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
        EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
    }

    std::unique_lock<std::mutex> lock(dr->mMtx);
    if (!dr->dead) {
        EXPECT_EQ(std::cv_status::no_timeout, dr->mCv.wait_for(lock, 1000ms));
    }
    EXPECT_TRUE(dr->dead) << "Failed to receive the death notification.";

    // need to wait for the session to shutdown so we don't "Leak session"
    EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));
    proc.expectAlreadyShutdown = true;
}

TEST_P(BinderRpc, SingleDeathRecipientOnShutdown) {
    if (singleThreaded() || !kEnableRpcThreads) {
        GTEST_SKIP() << "This test requires multiple threads";
    }
    class MyDeathRec : public IBinder::DeathRecipient {
    public:
        void binderDied(const wp<IBinder>& /* who */) override {
            dead = true;
            mCv.notify_one();
        }
        std::mutex mMtx;
        std::condition_variable mCv;
        bool dead = false;
    };

    // Death recipient needs to have an incoming connection to be called
    auto proc = createRpcTestSocketServerProcess(
            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});

    auto dr = sp<MyDeathRec>::make();
    EXPECT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));

    // Explicitly calling shutDownAndWait will cause the death recipients
    // to be called.
    EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));

    std::unique_lock<std::mutex> lock(dr->mMtx);
    if (!dr->dead) {
        EXPECT_EQ(std::cv_status::no_timeout, dr->mCv.wait_for(lock, 1000ms));
    }
    EXPECT_TRUE(dr->dead) << "Failed to receive the death notification.";

    proc.proc.host.terminate();
    proc.proc.host.setCustomExitStatusCheck([](int wstatus) {
        EXPECT_TRUE(WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGTERM)
                << "server process failed incorrectly: " << WaitStatusToString(wstatus);
    });
    proc.expectAlreadyShutdown = true;
}

TEST_P(BinderRpc, DeathRecipientFatalWithoutIncoming) {
    class MyDeathRec : public IBinder::DeathRecipient {
    public:
        void binderDied(const wp<IBinder>& /* who */) override {}
    };

    auto proc = createRpcTestSocketServerProcess(
            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 0});

    auto dr = sp<MyDeathRec>::make();
    EXPECT_DEATH(proc.rootBinder->linkToDeath(dr, (void*)1, 0),
                 "Cannot register a DeathRecipient without any incoming connections.");
}

TEST_P(BinderRpc, UnlinkDeathRecipient) {
    if (singleThreaded() || !kEnableRpcThreads) {
        GTEST_SKIP() << "This test requires multiple threads";
    }
    class MyDeathRec : public IBinder::DeathRecipient {
    public:
        void binderDied(const wp<IBinder>& /* who */) override {
            GTEST_FAIL() << "This should not be called after unlinkToDeath";
        }
    };

    // Death recipient needs to have an incoming connection to be called
    auto proc = createRpcTestSocketServerProcess(
            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});

    auto dr = sp<MyDeathRec>::make();
    ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
    ASSERT_EQ(OK, proc.rootBinder->unlinkToDeath(dr, (void*)1, 0, nullptr));

    if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
        EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
    }

    // need to wait for the session to shutdown so we don't "Leak session"
    EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));
    proc.expectAlreadyShutdown = true;
}

TEST_P(BinderRpc, OnewayCallbackWithNoThread) {
    auto proc = createRpcTestSocketServerProcess({});
    auto cb = sp<MyBinderRpcCallback>::make();