Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3c4d869e authored by Steven Moreland's avatar Steven Moreland
Browse files

RPC Binder: RpcState centralize transactAddress

Previously:
- transact 1 takes binder as argument
           2 validates parcel (should happen in other case)
           3 updates state for binder transfer out
           4 calls transactAddress
- transactAddress - does transaction

Now, 2/3 are moved into transactAddress. The main goal
of this is to enable us to do transaction validation
before we do (3). That way, if we reject the transaction,
we won't leak the binder, due to recording that we sent
the binder out.

Bug: 424526253
Test: binderRpcTest
Flag: EXEMPT .

Change-Id: Ib518fdcffc92360fc960e4cdae47c361187554b6
parent 912d5ad8
Loading
Loading
Loading
Loading
+34 −26
Original line number Diff line number Diff line
@@ -524,8 +524,8 @@ sp<IBinder> RpcState::getRootObject(const sp<RpcSession::RpcConnection>& connect
    data.markForRpc(session);
    Parcel reply;

    status_t status =
            transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_ROOT, data, session, &reply, 0);
    status_t status = transactInternal(connection, nullptr, RPC_SPECIAL_TRANSACT_GET_ROOT, data,
                                       session, &reply, 0);
    if (status != OK) {
        ALOGE("Error getting root object: %s", statusToString(status).c_str());
        return nullptr;
@@ -540,8 +540,8 @@ status_t RpcState::getMaxThreads(const sp<RpcSession::RpcConnection>& connection
    data.markForRpc(session);
    Parcel reply;

    status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
                                      session, &reply, 0);
    status_t status = transactInternal(connection, nullptr, RPC_SPECIAL_TRANSACT_GET_MAX_THREADS,
                                       data, session, &reply, 0);
    if (status != OK) {
        ALOGE("Error getting max threads: %s", statusToString(status).c_str());
        return status;
@@ -565,8 +565,8 @@ status_t RpcState::getSessionId(const sp<RpcSession::RpcConnection>& connection,
    data.markForRpc(session);
    Parcel reply;

    status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
                                      session, &reply, 0);
    status_t status = transactInternal(connection, nullptr, RPC_SPECIAL_TRANSACT_GET_SESSION_ID,
                                       data, session, &reply, 0);
    if (status != OK) {
        ALOGE("Error getting session ID: %s", statusToString(status).c_str());
        return status;
@@ -578,16 +578,10 @@ status_t RpcState::getSessionId(const sp<RpcSession::RpcConnection>& connection,
status_t RpcState::transact(const sp<RpcSession::RpcConnection>& connection,
                            const sp<IBinder>& binder, uint32_t code, const Parcel& data,
                            const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
    std::string errorMsg;
    if (status_t status = validateParcel(session, data, &errorMsg); status != OK) {
        ALOGE("Refusing to send RPC on binder %p code %" PRIu32 ": Parcel %p failed validation: %s",
              binder.get(), code, &data, errorMsg.c_str());
        return status;
    }
    uint64_t address;
    if (status_t status = onBinderLeaving(session, binder, &address); status != OK) return status;
    // only internal callers of transactInternal can use special transactions
    LOG_ALWAYS_FATAL_IF(binder == nullptr, "Binder should not be null");

    if (status_t status = transactAddress(connection, address, code, data, session, reply, flags);
    if (status_t status = transactInternal(connection, binder, code, data, session, reply, flags);
        status != OK) {
        // TODO(b/414720799): this log is added to debug this bug, but it could be a bit noisy, and
        // we may only want to log it from some cases moving forward.
@@ -599,11 +593,25 @@ status_t RpcState::transact(const sp<RpcSession::RpcConnection>& connection,
    return OK;
}

status_t RpcState::transactAddress(const sp<RpcSession::RpcConnection>& connection,
                                   uint64_t address, uint32_t code, const Parcel& data,
                                   const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
    LOG_ALWAYS_FATAL_IF(!data.isForRpc());
    LOG_ALWAYS_FATAL_IF(data.objectsCount() != 0);
status_t RpcState::transactInternal(const sp<RpcSession::RpcConnection>& connection,
                                    const sp<IBinder>& maybeBinder, uint32_t code,
                                    const Parcel& data, const sp<RpcSession>& session,
                                    Parcel* reply, uint32_t flags) {
    std::string errorMsg;
    if (status_t status = validateParcel(session, data, &errorMsg); status != OK) {
        ALOGE("Refusing to send RPC on binder %p code %" PRIu32 ": Parcel %p failed validation: %s",
              maybeBinder.get(), code, &data, errorMsg.c_str());
        return status;
    }

    uint64_t address;
    if (maybeBinder) {
        if (status_t status = onBinderLeaving(session, maybeBinder, &address); status != OK) {
            return status;
        }
    } else {
        address = RPC_SPECIAL_TRANSACTION_ADDRESS;
    }

    if (!(flags & IBinder::FLAG_ONEWAY)) {
        LOG_ALWAYS_FATAL_IF(reply == nullptr,
@@ -612,7 +620,7 @@ status_t RpcState::transactAddress(const sp<RpcSession::RpcConnection>& connecti

    uint64_t asyncNumber = 0;

    if (address != 0) {
    if (address != RPC_SPECIAL_TRANSACTION_ADDRESS) {
        RpcMutexUniqueLock _l(mNodeMutex);
        if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
        auto it = mNodeForAddress.find(address);
@@ -1005,7 +1013,7 @@ processTransactInternalTailCall:
    bool oneway = transaction->flags & IBinder::FLAG_ONEWAY;

    status_t replyStatus = OK;
    if (addr != 0) {
    if (addr != RPC_SPECIAL_TRANSACTION_ADDRESS) {
        if (!target) {
            replyStatus = onBinderEntering(session, addr, &target);
        }
@@ -1128,8 +1136,8 @@ processTransactInternalTailCall:
                connection->allowNested = origAllowNested;
            } else {
                LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
                LOG_ALWAYS_FATAL_IF(addr != 0,
                                    "!target && replyStatus == OK should imply addr == 0");
                LOG_ALWAYS_FATAL_IF(addr != RPC_SPECIAL_TRANSACTION_ADDRESS,
                                    "!target && replyStatus == OK should imply special address");

                switch (transaction->code) {
                    case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
@@ -1219,7 +1227,7 @@ processTransactInternalTailCall:

        // done processing all the async commands on this binder that we can, so
        // write decstrongs on the binder
        if (addr != 0 && target != nullptr) {
        if (addr != RPC_SPECIAL_TRANSACTION_ADDRESS && target != nullptr) {
            return flushExcessBinderRefs(session, addr, target);
        }

@@ -1231,7 +1239,7 @@ processTransactInternalTailCall:
    // be a leak, but the more fundamental problem is the error.
    // Binder refs are flushed for oneway calls only after all calls which are
    // built up are executed. Otherwise, they fill up the binder buffer.
    if (addr != 0 && target != nullptr) {
    if (addr != RPC_SPECIAL_TRANSACTION_ADDRESS && target != nullptr) {
        // if this fails, we are broken out of the protocol, so just shutdown. There
        // is no chance we could write the status to the other side.
        if (status_t status = flushExcessBinderRefs(session, addr, target); status != OK) {
+8 −5
Original line number Diff line number Diff line
@@ -92,11 +92,6 @@ public:
    [[nodiscard]] status_t transact(const sp<RpcSession::RpcConnection>& connection,
                                    const sp<IBinder>& address, uint32_t code, const Parcel& data,
                                    const sp<RpcSession>& session, Parcel* reply, uint32_t flags);
    [[nodiscard]] status_t transactAddress(const sp<RpcSession::RpcConnection>& connection,
                                           uint64_t address, uint32_t code, const Parcel& data,
                                           const sp<RpcSession>& session, Parcel* reply,
                                           uint32_t flags);

    /**
     * The ownership model here carries an implicit strong refcount whenever a
     * binder is sent across processes. Since we have a local strong count in
@@ -229,6 +224,14 @@ private:
    [[nodiscard]] static status_t validateParcel(const sp<RpcSession>& session,
                                                 const Parcel& parcel, std::string* errorMsg);

    // Exactly the same as transact, but you can do a special transaction which we
    // don't want to export outside of RpcState. A special transaction is on address
    // '0', such as getting the root object.
    [[nodiscard]] status_t transactInternal(const sp<RpcSession::RpcConnection>& connection,
                                            const sp<IBinder>& maybeBinder, uint32_t code,
                                            const Parcel& data, const sp<RpcSession>& session,
                                            Parcel* reply, uint32_t flags);

    struct BinderNode {
        // Two cases:
        // A - local binder we are serving
+4 −0
Original line number Diff line number Diff line
@@ -97,6 +97,10 @@ enum : uint32_t {
    RPC_COMMAND_DEC_STRONG,
};

enum : uint64_t {
    RPC_SPECIAL_TRANSACTION_ADDRESS = 0,
};

/**
 * These commands are used when the address in an RpcWireTransaction is zero'd
 * out (no address). This allows the transact/reply flow to be used for