Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 860a932b authored by android-build-team Robot's avatar android-build-team Robot
Browse files

Snap for 7357013 from 23cb5990 to sc-d1-release

Change-Id: I53e078ecc5212f825b03bfdbbaa77b33598a6aaa
parents 41089600 23cb5990
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -1964,6 +1964,8 @@ static void DumpstateTelephonyOnly(const std::string& calling_package) {

    RunDumpsys("DUMPSYS", {"connectivity"}, CommandOptions::WithTimeout(90).Build(),
               SEC_TO_MSEC(10));
    RunDumpsys("DUMPSYS", {"vcn_management"}, CommandOptions::WithTimeout(90).Build(),
               SEC_TO_MSEC(10));
    if (include_sensitive_info) {
        // Carrier apps' services will be dumped below in dumpsys activity service all-non-platform.
        RunDumpsys("DUMPSYS", {"carrier_config"}, CommandOptions::WithTimeout(90).Build(),
@@ -3187,6 +3189,11 @@ Dumpstate::RunStatus Dumpstate::CopyBugreportIfUserConsented(int32_t calling_uid
        // Since we do not have user consent to share the bugreport it does not get
        // copied over to the calling app but remains in the internal directory from
        // where the user can manually pull it.
        std::string final_path = GetPath(".zip");
        bool copy_succeeded = android::os::CopyFileToFile(path_, final_path);
        if (copy_succeeded) {
            android::os::UnlinkAndLogOnError(path_);
        }
        return Dumpstate::RunStatus::USER_CONSENT_TIMED_OUT;
    }
    // Unknown result; must be a programming error.
+54 −30
Original line number Diff line number Diff line
@@ -132,21 +132,49 @@ void RpcServer::join() {
        }
        LOG_RPC_DETAIL("accept4 on fd %d yields fd %d", mServer.get(), clientFd.get());

        // TODO(b/183988761): cannot trust this simple ID, should not block this
        // thread
        {
            std::lock_guard<std::mutex> _l(mLock);
            std::thread thread =
                    std::thread(&RpcServer::establishConnection, this,
                                std::move(sp<RpcServer>::fromExisting(this)), std::move(clientFd));
            mConnectingThreads[thread.get_id()] = std::move(thread);
        }
    }
}

std::vector<sp<RpcSession>> RpcServer::listSessions() {
    std::lock_guard<std::mutex> _l(mLock);
    std::vector<sp<RpcSession>> sessions;
    for (auto& [id, session] : mSessions) {
        (void)id;
        sessions.push_back(session);
    }
    return sessions;
}

void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clientFd) {
    LOG_ALWAYS_FATAL_IF(this != server.get(), "Must pass same ownership object");

    // TODO(b/183988761): cannot trust this simple ID
    LOG_ALWAYS_FATAL_IF(!mAgreedExperimental, "no!");
    int32_t id;
    if (sizeof(id) != read(clientFd.get(), &id, sizeof(id))) {
        ALOGE("Could not read ID from fd %d", clientFd.get());
            continue;
        return;
    }

    std::thread thisThread;
    sp<RpcSession> session;
    {
        std::lock_guard<std::mutex> _l(mLock);

            sp<RpcSession> session;
        auto threadId = mConnectingThreads.find(std::this_thread::get_id());
        LOG_ALWAYS_FATAL_IF(threadId == mConnectingThreads.end(),
                            "Must establish connection on owned thread");
        thisThread = std::move(threadId->second);
        mConnectingThreads.erase(threadId);

        if (id == RPC_SESSION_ID_NEW) {
                // new client!
            LOG_ALWAYS_FATAL_IF(mSessionIdCounter >= INT32_MAX, "Out of session IDs");
            mSessionIdCounter++;

@@ -158,24 +186,20 @@ void RpcServer::join() {
            auto it = mSessions.find(id);
            if (it == mSessions.end()) {
                ALOGE("Cannot add thread, no record of session with ID %d", id);
                    continue;
                return;
            }
            session = it->second;
        }

            session->startThread(std::move(clientFd));
        }
    }
    }

std::vector<sp<RpcSession>> RpcServer::listSessions() {
    std::lock_guard<std::mutex> _l(mLock);
    std::vector<sp<RpcSession>> sessions;
    for (auto& [id, session] : mSessions) {
        (void)id;
        sessions.push_back(session);
    }
    return sessions;
    // avoid strong cycle
    server = nullptr;
    //
    //
    // DO NOT ACCESS MEMBER VARIABLES BELOW
    //

    session->join(std::move(thisThread), std::move(clientFd));
}

bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) {
+20 −22
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ bool RpcSession::addNullDebuggingClient() {
        return false;
    }

    addClient(std::move(serverFd));
    addClientConnection(std::move(serverFd));
    return true;
}

@@ -93,7 +93,7 @@ sp<IBinder> RpcSession::getRootObject() {
    return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this));
}

status_t RpcSession::getMaxThreads(size_t* maxThreads) {
status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
    return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads);
}
@@ -131,24 +131,14 @@ status_t RpcSession::readId() {
    return OK;
}

void RpcSession::startThread(unique_fd client) {
    std::lock_guard<std::mutex> _l(mMutex);
    sp<RpcSession> holdThis = sp<RpcSession>::fromExisting(this);
    int fd = client.release();
    auto thread = std::thread([=] {
        holdThis->join(unique_fd(fd));
void RpcSession::join(std::thread thread, unique_fd client) {
    LOG_ALWAYS_FATAL_IF(thread.get_id() != std::this_thread::get_id(), "Must own this thread");

    {
            std::lock_guard<std::mutex> _l(holdThis->mMutex);
            auto it = mThreads.find(std::this_thread::get_id());
            LOG_ALWAYS_FATAL_IF(it == mThreads.end());
            it->second.detach();
            mThreads.erase(it);
        }
    });
        std::lock_guard<std::mutex> _l(mMutex);
        mThreads[thread.get_id()] = std::move(thread);
    }

void RpcSession::join(unique_fd client) {
    // must be registered to allow arbitrary client code executing commands to
    // be able to do nested calls (we can't only read from it)
    sp<RpcConnection> connection = assignServerToThisThread(std::move(client));
@@ -165,6 +155,14 @@ void RpcSession::join(unique_fd client) {

    LOG_ALWAYS_FATAL_IF(!removeServerConnection(connection),
                        "bad state: connection object guaranteed to be in list");

    {
        std::lock_guard<std::mutex> _l(mMutex);
        auto it = mThreads.find(std::this_thread::get_id());
        LOG_ALWAYS_FATAL_IF(it == mThreads.end());
        it->second.detach();
        mThreads.erase(it);
    }
}

void RpcSession::terminateLocked() {
@@ -201,7 +199,7 @@ bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
    // instead of all at once.
    // TODO(b/186470974): first risk of blocking
    size_t numThreadsAvailable;
    if (status_t status = getMaxThreads(&numThreadsAvailable); status != OK) {
    if (status_t status = getRemoteMaxThreads(&numThreadsAvailable); status != OK) {
        ALOGE("Could not get max threads after initial session to %s: %s", addr.toString().c_str(),
              statusToString(status).c_str());
        return false;
@@ -255,7 +253,7 @@ bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id)

        LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());

        addClient(std::move(serverFd));
        addClientConnection(std::move(serverFd));
        return true;
    }

@@ -263,7 +261,7 @@ bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id)
    return false;
}

void RpcSession::addClient(unique_fd fd) {
void RpcSession::addClientConnection(unique_fd fd) {
    std::lock_guard<std::mutex> _l(mMutex);
    sp<RpcConnection> session = sp<RpcConnection>::make();
    session->fd = std::move(fd);
+3 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <utils/RefBase.h>

#include <mutex>
#include <thread>

// WARNING: This is a feature which is still in development, and it is subject
// to radical change. Any production use of this may subject your code to any
@@ -115,6 +116,7 @@ private:
    friend sp<RpcServer>;
    RpcServer();

    void establishConnection(sp<RpcServer>&& session, base::unique_fd clientFd);
    bool setupSocketServer(const RpcSocketAddress& address);

    bool mAgreedExperimental = false;
@@ -123,6 +125,7 @@ private:
    base::unique_fd mServer; // socket we are accepting sessions on

    std::mutex mLock; // for below
    std::map<std::thread::id, std::thread> mConnectingThreads;
    sp<IBinder> mRootObject;
    std::map<int32_t, sp<RpcSession>> mSessions;
    int32_t mSessionIdCounter = 0;
+3 −4
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ public:
     * Query the other side of the session for the maximum number of threads
     * it supports (maximum number of concurrent non-nested synchronous transactions)
     */
    status_t getMaxThreads(size_t* maxThreads);
    status_t getRemoteMaxThreads(size_t* maxThreads);

    [[nodiscard]] status_t transact(const RpcAddress& address, uint32_t code, const Parcel& data,
                                    Parcel* reply, uint32_t flags);
@@ -114,8 +114,7 @@ private:

    status_t readId();

    void startThread(base::unique_fd client);
    void join(base::unique_fd client);
    void join(std::thread thread, base::unique_fd client);
    void terminateLocked();

    struct RpcConnection : public RefBase {
@@ -128,7 +127,7 @@ private:

    bool setupSocketClient(const RpcSocketAddress& address);
    bool setupOneSocketClient(const RpcSocketAddress& address, int32_t sessionId);
    void addClient(base::unique_fd fd);
    void addClientConnection(base::unique_fd fd);
    void setForServer(const wp<RpcServer>& server, int32_t sessionId);
    sp<RpcConnection> assignServerToThisThread(base::unique_fd fd);
    bool removeServerConnection(const sp<RpcConnection>& connection);
Loading