Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 195edb85 authored by Steven Moreland's avatar Steven Moreland
Browse files

libbinder: handle ExclusiveSocket failure

Fixes TODO here and adds test case for fatal abort which shouldn't be
allowed to kill a server.

Bug: 167966510
Test: binderRpcTest
Change-Id: Id38049eda5c9199a11101d0ff0b7790a476afc44
parent c88b7fcc
Loading
Loading
Loading
Loading
+55 −36
Original line number Diff line number Diff line
@@ -104,12 +104,18 @@ bool RpcSession::addNullDebuggingClient() {
}

sp<IBinder> RpcSession::getRootObject() {
    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
    ExclusiveConnection connection;
    status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
                                                ConnectionUse::CLIENT, &connection);
    if (status != OK) return nullptr;
    return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this));
}

status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
    ExclusiveConnection connection;
    status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
                                                ConnectionUse::CLIENT, &connection);
    if (status != OK) return status;
    return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads);
}

@@ -133,16 +139,22 @@ bool RpcSession::shutdownAndWait(bool wait) {

status_t RpcSession::transact(const sp<IBinder>& binder, uint32_t code, const Parcel& data,
                              Parcel* reply, uint32_t flags) {
    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
    ExclusiveConnection connection;
    status_t status =
            ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
                                      (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
                                                                  : ConnectionUse::CLIENT);
                                                                     : ConnectionUse::CLIENT,
                                      &connection);
    if (status != OK) return status;
    return state()->transact(connection.fd(), binder, code, data,
                             sp<RpcSession>::fromExisting(this), reply, flags);
}

status_t RpcSession::sendDecStrong(const RpcAddress& address) {
    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
                                   ConnectionUse::CLIENT_REFCOUNT);
    ExclusiveConnection connection;
    status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
                                                ConnectionUse::CLIENT_REFCOUNT, &connection);
    if (status != OK) return status;
    return state()->sendDecStrong(connection.fd(), sp<RpcSession>::fromExisting(this), address);
}

@@ -208,9 +220,12 @@ status_t RpcSession::readId() {

    int32_t id;

    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
    status_t status =
            state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id);
    ExclusiveConnection connection;
    status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
                                                ConnectionUse::CLIENT, &connection);
    if (status != OK) return status;

    status = state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id);
    if (status != OK) return status;

    LOG_RPC_DETAIL("RpcSession %p has id %d", this, id);
@@ -493,13 +508,16 @@ bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
    return false;
}

RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& session,
                                                     ConnectionUse use)
      : mSession(session) {
status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, ConnectionUse use,
                                               ExclusiveConnection* connection) {
    connection->mSession = session;
    connection->mConnection = nullptr;
    connection->mReentrant = false;

    pid_t tid = gettid();
    std::unique_lock<std::mutex> _l(mSession->mMutex);
    std::unique_lock<std::mutex> _l(session->mMutex);

    mSession->mWaitingThreads++;
    session->mWaitingThreads++;
    while (true) {
        sp<RpcConnection> exclusive;
        sp<RpcConnection> available;
@@ -507,8 +525,8 @@ RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& sessi
        // CHECK FOR DEDICATED CLIENT SOCKET
        //
        // A server/looper should always use a dedicated connection if available
        findConnection(tid, &exclusive, &available, mSession->mClientConnections,
                       mSession->mClientConnectionsOffset);
        findConnection(tid, &exclusive, &available, session->mClientConnections,
                       session->mClientConnectionsOffset);

        // WARNING: this assumes a server cannot request its client to send
        // a transaction, as mServerConnections is excluded below.
@@ -521,8 +539,8 @@ RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& sessi
        // command. So, we move to considering the second available thread
        // for subsequent calls.
        if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
            mSession->mClientConnectionsOffset =
                    (mSession->mClientConnectionsOffset + 1) % mSession->mClientConnections.size();
            session->mClientConnectionsOffset =
                    (session->mClientConnectionsOffset + 1) % session->mClientConnections.size();
        }

        // USE SERVING SOCKET (for nested transaction)
@@ -530,35 +548,36 @@ RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& sessi
        // asynchronous calls cannot be nested
        if (use != ConnectionUse::CLIENT_ASYNC) {
            // server connections are always assigned to a thread
            findConnection(tid, &exclusive, nullptr /*available*/, mSession->mServerConnections,
            findConnection(tid, &exclusive, nullptr /*available*/, session->mServerConnections,
                           0 /* index hint */);
        }

        // if our thread is already using a connection, prioritize using that
        if (exclusive != nullptr) {
            mConnection = exclusive;
            mReentrant = true;
            connection->mConnection = exclusive;
            connection->mReentrant = true;
            break;
        } else if (available != nullptr) {
            mConnection = available;
            mConnection->exclusiveTid = tid;
            connection->mConnection = available;
            connection->mConnection->exclusiveTid = tid;
            break;
        }

        // TODO(b/185167543): this should return an error, rather than crash a
        // server
        // in regular binder, this would usually be a deadlock :)
        LOG_ALWAYS_FATAL_IF(mSession->mClientConnections.size() == 0,
                            "Session has no client connections. This is required for an RPC server "
                            "to make any non-nested (e.g. oneway or on another thread) calls. "
                            "Use: %d. Server connections: %zu",
                            static_cast<int>(use), mSession->mServerConnections.size());
        if (session->mClientConnections.size() == 0) {
            ALOGE("Session has no client connections. This is required for an RPC server to make "
                  "any non-nested (e.g. oneway or on another thread) calls. Use: %d. Server "
                  "connections: %zu",
                  static_cast<int>(use), session->mServerConnections.size());
            return WOULD_BLOCK;
        }

        LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
                       mSession->mClientConnections.size(), mSession->mServerConnections.size());
        mSession->mAvailableConnectionCv.wait(_l);
                       session->mClientConnections.size(), session->mServerConnections.size());
        session->mAvailableConnectionCv.wait(_l);
    }
    mSession->mWaitingThreads--;
    session->mWaitingThreads--;

    return OK;
}

void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive,
@@ -592,7 +611,7 @@ RpcSession::ExclusiveConnection::~ExclusiveConnection() {
    // reentrant use of a connection means something less deep in the call stack
    // is using this fd, and it retains the right to it. So, we don't give up
    // exclusive ownership, and no thread is freed.
    if (!mReentrant) {
    if (!mReentrant && mConnection != nullptr) {
        std::unique_lock<std::mutex> _l(mSession->mMutex);
        mConnection->exclusiveTid = std::nullopt;
        if (mSession->mWaitingThreads > 0) {
+4 −2
Original line number Diff line number Diff line
@@ -230,10 +230,12 @@ private:
        CLIENT_REFCOUNT,
    };

    // RAII object for session connection
    // Object representing exclusive access to a connection.
    class ExclusiveConnection {
    public:
        explicit ExclusiveConnection(const sp<RpcSession>& session, ConnectionUse use);
        static status_t find(const sp<RpcSession>& session, ConnectionUse use,
                             ExclusiveConnection* connection);

        ~ExclusiveConnection();
        const base::unique_fd& fd() { return mConnection->fd; }

+8 −0
Original line number Diff line number Diff line
@@ -1007,6 +1007,14 @@ TEST_P(BinderRpc, Callbacks) {
    }
}

TEST_P(BinderRpc, OnewayCallbackWithNoThread) {
    auto proc = createRpcTestSocketServerProcess(1);
    auto cb = sp<MyBinderRpcCallback>::make();

    Status status = proc.rootIface->doCallback(cb, true /*oneway*/, false /*delayed*/, "anything");
    EXPECT_EQ(WOULD_BLOCK, status.transactionError());
}

TEST_P(BinderRpc, Die) {
    for (bool doDeathCleanup : {true, false}) {
        auto proc = createRpcTestSocketServerProcess(1);