Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19fc9f79 authored by Steven Moreland's avatar Steven Moreland
Browse files

libbinder: RPC disambiguate server/client

Now:
- server: RpcServer
- client: a client of an RpcServer
- incoming: a thread processing commands (either as part of an
  RpcServer's sessions or back to a client which has a threadpool)
- outgoing: a thread for sending commands (either to an RpcServer's
  sessions or back to a client which has a threadpool)

Bug: 167966510
Test: binderRpcTest
Change-Id: Iea286ab0ff6f9fb775994247003b8d29c999e10a
parent c7d40135
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clie
        }

        if (reverse) {
            LOG_ALWAYS_FATAL_IF(!session->addClientConnection(std::move(clientFd)),
            LOG_ALWAYS_FATAL_IF(!session->addOutgoingConnection(std::move(clientFd)),
                                "server state must already be initialized");
            return;
        }
@@ -350,7 +350,7 @@ bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) {
    return true;
}

void RpcServer::onSessionLockedAllServerThreadsEnded(const sp<RpcSession>& session) {
void RpcServer::onSessionLockedAllIncomingThreadsEnded(const sp<RpcSession>& session) {
    auto id = session->mId;
    LOG_ALWAYS_FATAL_IF(id == std::nullopt, "Server sessions must be initialized with ID");
    LOG_RPC_DETAIL("Dropping session %d", *id);
@@ -362,7 +362,7 @@ void RpcServer::onSessionLockedAllServerThreadsEnded(const sp<RpcSession>& sessi
    (void)mSessions.erase(it);
}

void RpcServer::onSessionServerThreadEnded() {
void RpcServer::onSessionIncomingThreadEnded() {
    mShutdownCv.notify_all();
}

+38 −37
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ RpcSession::~RpcSession() {
    LOG_RPC_DETAIL("RpcSession destroyed %p", this);

    std::lock_guard<std::mutex> _l(mMutex);
    LOG_ALWAYS_FATAL_IF(mServerConnections.size() != 0,
    LOG_ALWAYS_FATAL_IF(mIncomingConnections.size() != 0,
                        "Should not be able to destroy a session with servers in use.");
}

@@ -61,10 +61,10 @@ sp<RpcSession> RpcSession::make() {

void RpcSession::setMaxThreads(size_t threads) {
    std::lock_guard<std::mutex> _l(mMutex);
    LOG_ALWAYS_FATAL_IF(!mClientConnections.empty() || !mServerConnections.empty(),
    LOG_ALWAYS_FATAL_IF(!mOutgoingConnections.empty() || !mIncomingConnections.empty(),
                        "Must set max threads before setting up connections, but has %zu client(s) "
                        "and %zu server(s)",
                        mClientConnections.size(), mServerConnections.size());
                        mOutgoingConnections.size(), mIncomingConnections.size());
    mMaxThreads = threads;
}

@@ -100,7 +100,7 @@ bool RpcSession::addNullDebuggingClient() {
        return false;
    }

    return addClientConnection(std::move(serverFd));
    return addOutgoingConnection(std::move(serverFd));
}

sp<IBinder> RpcSession::getRootObject() {
@@ -233,13 +233,13 @@ status_t RpcSession::readId() {
    return OK;
}

void RpcSession::WaitForShutdownListener::onSessionLockedAllServerThreadsEnded(
void RpcSession::WaitForShutdownListener::onSessionLockedAllIncomingThreadsEnded(
        const sp<RpcSession>& session) {
    (void)session;
    mShutdown = true;
}

void RpcSession::WaitForShutdownListener::onSessionServerThreadEnded() {
void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() {
    mCv.notify_all();
}

@@ -263,7 +263,7 @@ void RpcSession::preJoinThreadOwnership(std::thread thread) {
RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(base::unique_fd fd) {
    // must be registered to allow arbitrary client code executing commands to
    // be able to do nested calls (we can't only read from it)
    sp<RpcConnection> connection = assignServerToThisThread(std::move(fd));
    sp<RpcConnection> connection = assignIncomingConnectionToThisThread(std::move(fd));

    status_t status = mState->readConnectionInit(connection, sp<RpcSession>::fromExisting(this));

@@ -291,7 +291,7 @@ void RpcSession::join(sp<RpcSession>&& session, PreJoinSetupResult&& setupResult
              statusToString(setupResult.status).c_str());
    }

    LOG_ALWAYS_FATAL_IF(!session->removeServerConnection(connection),
    LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection),
                        "bad state: connection object guaranteed to be in list");

    sp<RpcSession::EventListener> listener;
@@ -308,7 +308,7 @@ void RpcSession::join(sp<RpcSession>&& session, PreJoinSetupResult&& setupResult
    session = nullptr;

    if (listener != nullptr) {
        listener->onSessionServerThreadEnded();
        listener->onSessionIncomingThreadEnded();
    }
}

@@ -319,9 +319,9 @@ wp<RpcServer> RpcSession::server() {
bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
    {
        std::lock_guard<std::mutex> _l(mMutex);
        LOG_ALWAYS_FATAL_IF(mClientConnections.size() != 0,
        LOG_ALWAYS_FATAL_IF(mOutgoingConnections.size() != 0,
                            "Must only setup session once, but already has %zu clients",
                            mClientConnections.size());
                            mOutgoingConnections.size());
    }

    if (!setupOneSocketConnection(addr, RPC_SESSION_ID_NEW, false /*reverse*/)) return false;
@@ -427,7 +427,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, int32_t
            LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
            return true;
        } else {
            return addClientConnection(std::move(serverFd));
            return addOutgoingConnection(std::move(serverFd));
        }
    }

@@ -435,7 +435,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, int32_t
    return false;
}

bool RpcSession::addClientConnection(unique_fd fd) {
bool RpcSession::addOutgoingConnection(unique_fd fd) {
    sp<RpcConnection> connection = sp<RpcConnection>::make();
    {
        std::lock_guard<std::mutex> _l(mMutex);
@@ -450,7 +450,7 @@ bool RpcSession::addClientConnection(unique_fd fd) {

        connection->fd = std::move(fd);
        connection->exclusiveTid = gettid();
        mClientConnections.push_back(connection);
        mOutgoingConnections.push_back(connection);
    }

    status_t status = mState->sendConnectionInit(connection, sp<RpcSession>::fromExisting(this));
@@ -480,25 +480,26 @@ bool RpcSession::setForServer(const wp<RpcServer>& server, const wp<EventListene
    return true;
}

sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) {
sp<RpcSession::RpcConnection> RpcSession::assignIncomingConnectionToThisThread(unique_fd fd) {
    std::lock_guard<std::mutex> _l(mMutex);
    sp<RpcConnection> session = sp<RpcConnection>::make();
    session->fd = std::move(fd);
    session->exclusiveTid = gettid();
    mServerConnections.push_back(session);
    mIncomingConnections.push_back(session);

    return session;
}

bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
    std::lock_guard<std::mutex> _l(mMutex);
    if (auto it = std::find(mServerConnections.begin(), mServerConnections.end(), connection);
        it != mServerConnections.end()) {
        mServerConnections.erase(it);
        if (mServerConnections.size() == 0) {
    if (auto it = std::find(mIncomingConnections.begin(), mIncomingConnections.end(), connection);
        it != mIncomingConnections.end()) {
        mIncomingConnections.erase(it);
        if (mIncomingConnections.size() == 0) {
            sp<EventListener> listener = mEventListener.promote();
            if (listener) {
                listener->onSessionLockedAllServerThreadsEnded(sp<RpcSession>::fromExisting(this));
                listener->onSessionLockedAllIncomingThreadsEnded(
                        sp<RpcSession>::fromExisting(this));
            }
        }
        return true;
@@ -523,11 +524,11 @@ status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, Co
        // CHECK FOR DEDICATED CLIENT SOCKET
        //
        // A server/looper should always use a dedicated connection if available
        findConnection(tid, &exclusive, &available, session->mClientConnections,
                       session->mClientConnectionsOffset);
        findConnection(tid, &exclusive, &available, session->mOutgoingConnections,
                       session->mOutgoingConnectionsOffset);

        // WARNING: this assumes a server cannot request its client to send
        // a transaction, as mServerConnections is excluded below.
        // a transaction, as mIncomingConnections is excluded below.
        //
        // Imagine we have more than one thread in play, and a single thread
        // sends a synchronous, then an asynchronous command. Imagine the
@@ -537,29 +538,29 @@ status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, Co
        // command. So, we move to considering the second available thread
        // for subsequent calls.
        if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
            session->mClientConnectionsOffset =
                    (session->mClientConnectionsOffset + 1) % session->mClientConnections.size();
            session->mOutgoingConnectionsOffset = (session->mOutgoingConnectionsOffset + 1) %
                    session->mOutgoingConnections.size();
        }

        // USE SERVING SOCKET (e.g. nested transaction)
        if (use != ConnectionUse::CLIENT_ASYNC) {
            sp<RpcConnection> exclusiveServer;
            sp<RpcConnection> exclusiveIncoming;
            // server connections are always assigned to a thread
            findConnection(tid, &exclusiveServer, nullptr /*available*/,
                           session->mServerConnections, 0 /* index hint */);
            findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
                           session->mIncomingConnections, 0 /* index hint */);

            // asynchronous calls cannot be nested, we currently allow ref count
            // calls to be nested (so that you can use this without having extra
            // threads). Note 'drainCommands' is used so that these ref counts can't
            // build up.
            if (exclusiveServer != nullptr) {
                if (exclusiveServer->allowNested) {
            if (exclusiveIncoming != nullptr) {
                if (exclusiveIncoming->allowNested) {
                    // guaranteed to be processed as nested command
                    exclusive = exclusiveServer;
                    exclusive = exclusiveIncoming;
                } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) {
                    // prefer available socket, but if we don't have one, don't
                    // wait for one
                    exclusive = exclusiveServer;
                    exclusive = exclusiveIncoming;
                }
            }
        }
@@ -575,16 +576,16 @@ status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, Co
            break;
        }

        if (session->mClientConnections.size() == 0) {
        if (session->mOutgoingConnections.size() == 0) {
            ALOGE("Session has no client connections. This is required for an RPC server to make "
                  "any non-nested (e.g. oneway or on another thread) calls. Use: %d. Server "
                  "connections: %zu",
                  static_cast<int>(use), session->mServerConnections.size());
                  static_cast<int>(use), session->mIncomingConnections.size());
            return WOULD_BLOCK;
        }

        LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
                       session->mClientConnections.size(), session->mServerConnections.size());
                       session->mOutgoingConnections.size(), session->mIncomingConnections.size());
        session->mAvailableConnectionCv.wait(_l);
    }
    session->mWaitingThreads--;
+7 −7
Original line number Diff line number Diff line
@@ -272,7 +272,7 @@ status_t RpcState::rpcRec(const sp<RpcSession::RpcConnection>& connection,

status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
                                      const sp<RpcSession>& session) {
    RpcClientConnectionInit init{
    RpcOutgoingConnectionInit init{
            .msg = RPC_CONNECTION_INIT_OKAY,
    };
    return rpcSend(connection, session, "connection init", &init, sizeof(init));
@@ -280,7 +280,7 @@ status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& conne

status_t RpcState::readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
                                      const sp<RpcSession>& session) {
    RpcClientConnectionInit init;
    RpcOutgoingConnectionInit init;
    if (status_t status = rpcRec(connection, session, "connection init", &init, sizeof(init));
        status != OK)
        return status;
@@ -470,7 +470,7 @@ status_t RpcState::waitForReply(const sp<RpcSession::RpcConnection>& connection,

        if (command.command == RPC_COMMAND_REPLY) break;

        if (status_t status = processServerCommand(connection, session, command, CommandType::ANY);
        if (status_t status = processCommand(connection, session, command, CommandType::ANY);
            status != OK)
            return status;
    }
@@ -539,7 +539,7 @@ status_t RpcState::getAndExecuteCommand(const sp<RpcSession::RpcConnection>& con
        status != OK)
        return status;

    return processServerCommand(connection, session, command, type);
    return processCommand(connection, session, command, type);
}

status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection,
@@ -553,7 +553,7 @@ status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection
    return OK;
}

status_t RpcState::processServerCommand(const sp<RpcSession::RpcConnection>& connection,
status_t RpcState::processCommand(const sp<RpcSession::RpcConnection>& connection,
                                  const sp<RpcSession>& session, const RpcWireHeader& command,
                                  CommandType type) {
    IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull();
+3 −3
Original line number Diff line number Diff line
@@ -145,7 +145,7 @@ private:

    [[nodiscard]] status_t waitForReply(const sp<RpcSession::RpcConnection>& connection,
                                        const sp<RpcSession>& session, Parcel* reply);
    [[nodiscard]] status_t processServerCommand(const sp<RpcSession::RpcConnection>& connection,
    [[nodiscard]] status_t processCommand(const sp<RpcSession::RpcConnection>& connection,
                                          const sp<RpcSession>& session,
                                          const RpcWireHeader& command, CommandType type);
    [[nodiscard]] status_t processTransact(const sp<RpcSession::RpcConnection>& connection,
+1 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ struct RpcConnectionHeader {
 * transaction. The main use of this is in order to control the timing for when
 * a reverse connection is setup.
 */
struct RpcClientConnectionInit {
struct RpcOutgoingConnectionInit {
    char msg[4];
    uint8_t reserved[4];
};
Loading