Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a7fc3d6 authored by Connor O'Brien's avatar Connor O'Brien Committed by Android (Google) Code Review
Browse files

Merge "libtimeinstate: support concurrent_{active,policy}_time"

parents 862e50c5 daceef75
Loading
Loading
Loading
Loading
+148 −17
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#include <sys/sysinfo.h>

#include <mutex>
#include <numeric>
#include <optional>
#include <set>
#include <string>
@@ -53,7 +54,8 @@ static uint32_t gNCpus = 0;
static std::vector<std::vector<uint32_t>> gPolicyFreqs;
static std::vector<std::vector<uint32_t>> gPolicyCpus;
static std::set<uint32_t> gAllFreqs;
static unique_fd gMapFd;
static unique_fd gTisMapFd;
static unique_fd gConcurrentMapFd;

static std::optional<std::vector<uint32_t>> readNumbersFromFile(const std::string &path) {
    std::string data;
@@ -122,8 +124,12 @@ static bool initGlobals() {
        gPolicyCpus.emplace_back(*cpus);
    }

    gMapFd = unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_times_map")};
    if (gMapFd < 0) return false;
    gTisMapFd = unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_time_in_state_map")};
    if (gTisMapFd < 0) return false;

    gConcurrentMapFd =
            unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
    if (gConcurrentMapFd < 0) return false;

    gInitialized = true;
    return true;
@@ -143,7 +149,7 @@ static bool attachTracepointProgram(const std::string &eventType, const std::str
// process dies then it must be called again to resume tracking.
// This function should *not* be called while tracking is already active; doing so is unnecessary
// and can lead to accounting errors.
bool startTrackingUidCpuFreqTimes() {
bool startTrackingUidTimes() {
    if (!initGlobals()) return false;

    unique_fd fd(bpf_obj_get(BPF_FS_PATH "map_time_in_state_cpu_policy_map"));
@@ -174,7 +180,7 @@ bool startTrackingUidCpuFreqTimes() {
            attachTracepointProgram("power", "cpu_frequency");
}

// Retrieve the times in ns that uid spent running at each CPU frequency and store in freqTimes.
// Retrieve the times in ns that uid spent running at each CPU frequency.
// Return contains no value on error, otherwise it contains a vector of vectors using the format:
// [[t0_0, t0_1, ...],
//  [t1_0, t1_1, ...], ...]
@@ -189,11 +195,11 @@ std::optional<std::vector<std::vector<uint64_t>>> getUidCpuFreqTimes(uint32_t ui
        out.emplace_back(freqList.size(), 0);
    }

    std::vector<val_t> vals(gNCpus);
    std::vector<tis_val_t> vals(gNCpus);
    time_key_t key = {.uid = uid};
    for (uint32_t i = 0; i <= (maxFreqCount - 1) / FREQS_PER_ENTRY; ++i) {
        key.bucket = i;
        if (findMapEntry(gMapFd, &key, vals.data())) {
        if (findMapEntry(gTisMapFd, &key, vals.data())) {
            if (errno != ENOENT) return {};
            continue;
        }
@@ -214,7 +220,7 @@ std::optional<std::vector<std::vector<uint64_t>>> getUidCpuFreqTimes(uint32_t ui
    return out;
}

// Retrieve the times in ns that each uid spent running at each CPU freq and store in freqTimeMap.
// Retrieve the times in ns that each uid spent running at each CPU freq.
// Return contains no value on error, otherwise it contains a map from uids to vectors of vectors
// using the format:
// { uid0 -> [[t0_0_0, t0_0_1, ...], [t0_1_0, t0_1_1, ...], ...],
@@ -225,7 +231,7 @@ getUidsCpuFreqTimes() {
    if (!gInitialized && !initGlobals()) return {};
    time_key_t key, prevKey;
    std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>> map;
    if (getFirstMapKey(gMapFd, &key)) {
    if (getFirstMapKey(gTisMapFd, &key)) {
        if (errno == ENOENT) return map;
        return std::nullopt;
    }
@@ -233,9 +239,9 @@ getUidsCpuFreqTimes() {
    std::vector<std::vector<uint64_t>> mapFormat;
    for (const auto &freqList : gPolicyFreqs) mapFormat.emplace_back(freqList.size(), 0);

    std::vector<val_t> vals(gNCpus);
    std::vector<tis_val_t> vals(gNCpus);
    do {
        if (findMapEntry(gMapFd, &key, vals.data())) return {};
        if (findMapEntry(gTisMapFd, &key, vals.data())) return {};
        if (map.find(key.uid) == map.end()) map.emplace(key.uid, mapFormat);

        auto offset = key.bucket * FREQS_PER_ENTRY;
@@ -250,13 +256,129 @@ getUidsCpuFreqTimes() {
            }
        }
        prevKey = key;
    } while (!getNextMapKey(gMapFd, &prevKey, &key));
    } while (!getNextMapKey(gTisMapFd, &prevKey, &key));
    if (errno != ENOENT) return {};
    return map;
}

static bool verifyConcurrentTimes(const concurrent_time_t &ct) {
    uint64_t activeSum = std::accumulate(ct.active.begin(), ct.active.end(), (uint64_t)0);
    uint64_t policySum = 0;
    for (const auto &vec : ct.policy) {
        policySum += std::accumulate(vec.begin(), vec.end(), (uint64_t)0);
    }
    return activeSum == policySum;
}

// Retrieve the times in ns that uid spent running concurrently with each possible number of other
// tasks on each cluster (policy times) and overall (active times).
// Return contains no value on error, otherwise it contains a concurrent_time_t with the format:
// {.active = [a0, a1, ...], .policy = [[p0_0, p0_1, ...], [p1_0, p1_1, ...], ...]}
// where ai is the ns spent running concurrently with tasks on i other cpus and pi_j is the ns spent
// running on the ith cluster, concurrently with tasks on j other cpus in the same cluster
std::optional<concurrent_time_t> getUidConcurrentTimes(uint32_t uid, bool retry) {
    if (!gInitialized && !initGlobals()) return {};
    concurrent_time_t ret = {.active = std::vector<uint64_t>(gNCpus, 0)};
    for (const auto &cpuList : gPolicyCpus) ret.policy.emplace_back(cpuList.size(), 0);
    std::vector<concurrent_val_t> vals(gNCpus);
    time_key_t key = {.uid = uid};
    for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
        if (findMapEntry(gConcurrentMapFd, &key, vals.data())) {
            if (errno != ENOENT) return {};
            continue;
        }
        auto offset = key.bucket * CPUS_PER_ENTRY;
        auto nextOffset = (key.bucket + 1) * CPUS_PER_ENTRY;

        auto activeBegin = ret.active.begin() + offset;
        auto activeEnd = nextOffset < gNCpus ? activeBegin + CPUS_PER_ENTRY : ret.active.end();

        for (uint32_t cpu = 0; cpu < gNCpus; ++cpu) {
            std::transform(activeBegin, activeEnd, std::begin(vals[cpu].active), activeBegin,
                           std::plus<uint64_t>());
        }

        for (uint32_t policy = 0; policy < gNPolicies; ++policy) {
            if (offset >= gPolicyCpus[policy].size()) continue;
            auto policyBegin = ret.policy[policy].begin() + offset;
            auto policyEnd = nextOffset < gPolicyCpus[policy].size() ? policyBegin + CPUS_PER_ENTRY
                                                                     : ret.policy[policy].end();

            for (const auto &cpu : gPolicyCpus[policy]) {
                std::transform(policyBegin, policyEnd, std::begin(vals[cpu].policy), policyBegin,
                               std::plus<uint64_t>());
            }
        }
    }
    if (!verifyConcurrentTimes(ret) && retry)  return getUidConcurrentTimes(uid, false);
    return ret;
}

// Retrieve the times in ns that each uid spent running concurrently with each possible number of
// other tasks on each cluster (policy times) and overall (active times).
// Return contains no value on error, otherwise it contains a map from uids to concurrent_time_t's
// using the format:
// { uid0 -> {.active = [a0, a1, ...], .policy = [[p0_0, p0_1, ...], [p1_0, p1_1, ...], ...] }, ...}
// where ai is the ns spent running concurrently with tasks on i other cpus and pi_j is the ns spent
// running on the ith cluster, concurrently with tasks on j other cpus in the same cluster.
std::optional<std::unordered_map<uint32_t, concurrent_time_t>> getUidsConcurrentTimes() {
    if (!gInitialized && !initGlobals()) return {};
    time_key_t key, prevKey;
    std::unordered_map<uint32_t, concurrent_time_t> ret;
    if (getFirstMapKey(gConcurrentMapFd, &key)) {
        if (errno == ENOENT) return ret;
        return {};
    }

    concurrent_time_t retFormat = {.active = std::vector<uint64_t>(gNCpus, 0)};
    for (const auto &cpuList : gPolicyCpus) retFormat.policy.emplace_back(cpuList.size(), 0);

    std::vector<concurrent_val_t> vals(gNCpus);
    std::vector<uint64_t>::iterator activeBegin, activeEnd, policyBegin, policyEnd;

    do {
        if (findMapEntry(gConcurrentMapFd, &key, vals.data())) return {};
        if (ret.find(key.uid) == ret.end()) ret.emplace(key.uid, retFormat);

        auto offset = key.bucket * CPUS_PER_ENTRY;
        auto nextOffset = (key.bucket + 1) * CPUS_PER_ENTRY;

        activeBegin = ret[key.uid].active.begin();
        activeEnd = nextOffset < gNCpus ? activeBegin + CPUS_PER_ENTRY : ret[key.uid].active.end();

        for (uint32_t cpu = 0; cpu < gNCpus; ++cpu) {
            std::transform(activeBegin, activeEnd, std::begin(vals[cpu].active), activeBegin,
                           std::plus<uint64_t>());
        }

        for (uint32_t policy = 0; policy < gNPolicies; ++policy) {
            if (offset >= gPolicyCpus[policy].size()) continue;
            policyBegin = ret[key.uid].policy[policy].begin() + offset;
            policyEnd = nextOffset < gPolicyCpus[policy].size() ? policyBegin + CPUS_PER_ENTRY
                                                                : ret[key.uid].policy[policy].end();

            for (const auto &cpu : gPolicyCpus[policy]) {
                std::transform(policyBegin, policyEnd, std::begin(vals[cpu].policy), policyBegin,
                               std::plus<uint64_t>());
            }
        }
        prevKey = key;
    } while (!getNextMapKey(gConcurrentMapFd, &prevKey, &key));
    if (errno != ENOENT) return {};
    for (const auto &[key, value] : ret) {
        if (!verifyConcurrentTimes(value)) {
            auto val = getUidConcurrentTimes(key, false);
            if (val.has_value()) ret[key] = val.value();
        }
    }
    return ret;
}

// Clear all time in state data for a given uid. Returns false on error, true otherwise.
bool clearUidCpuFreqTimes(uint32_t uid) {
// This is only suitable for clearing data when an app is uninstalled; if called on a UID with
// running tasks it will cause time in state vs. concurrent time totals to be inconsistent for that
// UID.
bool clearUidTimes(uint32_t uid) {
    if (!gInitialized && !initGlobals()) return false;

    time_key_t key = {.uid = uid};
@@ -266,11 +388,20 @@ bool clearUidCpuFreqTimes(uint32_t uid) {
        if (freqList.size() > maxFreqCount) maxFreqCount = freqList.size();
    }

    val_t zeros = {0};
    std::vector<val_t> vals(gNCpus, zeros);
    tis_val_t zeros = {0};
    std::vector<tis_val_t> vals(gNCpus, zeros);
    for (key.bucket = 0; key.bucket <= (maxFreqCount - 1) / FREQS_PER_ENTRY; ++key.bucket) {
        if (writeToMapEntry(gMapFd, &key, vals.data(), BPF_EXIST) && errno != ENOENT) return false;
        if (deleteMapEntry(gMapFd, &key) && errno != ENOENT) return false;
        if (writeToMapEntry(gTisMapFd, &key, vals.data(), BPF_EXIST) && errno != ENOENT)
            return false;
        if (deleteMapEntry(gTisMapFd, &key) && errno != ENOENT) return false;
    }

    concurrent_val_t czeros = {.policy = {0}, .active = {0}};
    std::vector<concurrent_val_t> cvals(gNCpus, czeros);
    for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
        if (writeToMapEntry(gConcurrentMapFd, &key, cvals.data(), BPF_EXIST) && errno != ENOENT)
            return false;
        if (deleteMapEntry(gConcurrentMapFd, &key) && errno != ENOENT) return false;
    }
    return true;
}
+10 −2
Original line number Diff line number Diff line
@@ -22,11 +22,19 @@
namespace android {
namespace bpf {

bool startTrackingUidCpuFreqTimes();
bool startTrackingUidTimes();
std::optional<std::vector<std::vector<uint64_t>>> getUidCpuFreqTimes(uint32_t uid);
std::optional<std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>>>
    getUidsCpuFreqTimes();
bool clearUidCpuFreqTimes(unsigned int uid);

struct concurrent_time_t {
    std::vector<uint64_t> active;
    std::vector<std::vector<uint64_t>> policy;
};

std::optional<concurrent_time_t> getUidConcurrentTimes(uint32_t uid, bool retry = true);
std::optional<std::unordered_map<uint32_t, concurrent_time_t>> getUidsConcurrentTimes();
bool clearUidTimes(unsigned int uid);

} // namespace bpf
} // namespace android
+210 −8
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@

#include <sys/sysinfo.h>

#include <numeric>
#include <unordered_map>
#include <vector>

@@ -21,13 +22,83 @@ static constexpr uint64_t NSEC_PER_YEAR = NSEC_PER_SEC * 60 * 60 * 24 * 365;

using std::vector;

TEST(TimeInStateTest, SingleUid) {
TEST(TimeInStateTest, SingleUidTimeInState) {
    auto times = getUidCpuFreqTimes(0);
    ASSERT_TRUE(times.has_value());
    EXPECT_FALSE(times->empty());
}

TEST(TimeInStateTest, AllUid) {
TEST(TimeInStateTest, SingleUidConcurrentTimes) {
    auto concurrentTimes = getUidConcurrentTimes(0);
    ASSERT_TRUE(concurrentTimes.has_value());
    ASSERT_FALSE(concurrentTimes->active.empty());
    ASSERT_FALSE(concurrentTimes->policy.empty());

    uint64_t policyEntries = 0;
    for (const auto &policyTimeVec : concurrentTimes->policy) policyEntries += policyTimeVec.size();
    ASSERT_EQ(concurrentTimes->active.size(), policyEntries);
}

static void TestConcurrentTimesConsistent(const struct concurrent_time_t &concurrentTime) {
    size_t maxPolicyCpus = 0;
    for (const auto &vec : concurrentTime.policy) {
        maxPolicyCpus = std::max(maxPolicyCpus, vec.size());
    }
    uint64_t policySum = 0;
    for (size_t i = 0; i < maxPolicyCpus; ++i) {
        for (const auto &vec : concurrentTime.policy) {
            if (i < vec.size()) policySum += vec[i];
        }
        ASSERT_LE(concurrentTime.active[i], policySum);
        policySum -= concurrentTime.active[i];
    }
    policySum = 0;
    for (size_t i = 0; i < concurrentTime.active.size(); ++i) {
        for (const auto &vec : concurrentTime.policy) {
            if (i < vec.size()) policySum += vec[vec.size() - 1 - i];
        }
        auto activeSum = concurrentTime.active[concurrentTime.active.size() - 1 - i];
        // This check is slightly flaky because we may read a map entry in the middle of an update
        // when active times have been updated but policy times have not. This happens infrequently
        // and can be distinguished from more serious bugs by re-running the test: if the underlying
        // data itself is inconsistent, the test will fail every time.
        ASSERT_LE(activeSum, policySum);
        policySum -= activeSum;
    }
}

static void TestUidTimesConsistent(const std::vector<std::vector<uint64_t>> &timeInState,
                                   const struct concurrent_time_t &concurrentTime) {
    ASSERT_NO_FATAL_FAILURE(TestConcurrentTimesConsistent(concurrentTime));
    ASSERT_EQ(timeInState.size(), concurrentTime.policy.size());
    uint64_t policySum = 0;
    for (uint32_t i = 0; i < timeInState.size(); ++i) {
        uint64_t tisSum =
                std::accumulate(timeInState[i].begin(), timeInState[i].end(), (uint64_t)0);
        uint64_t concurrentSum = std::accumulate(concurrentTime.policy[i].begin(),
                                                 concurrentTime.policy[i].end(), (uint64_t)0);
        if (tisSum < concurrentSum)
            ASSERT_LE(concurrentSum - tisSum, NSEC_PER_SEC);
        else
            ASSERT_LE(tisSum - concurrentSum, NSEC_PER_SEC);
        policySum += concurrentSum;
    }
    uint64_t activeSum = std::accumulate(concurrentTime.active.begin(), concurrentTime.active.end(),
                                         (uint64_t)0);
    EXPECT_EQ(activeSum, policySum);
}

TEST(TimeInStateTest, SingleUidTimesConsistent) {
    auto times = getUidCpuFreqTimes(0);
    ASSERT_TRUE(times.has_value());

    auto concurrentTimes = getUidConcurrentTimes(0);
    ASSERT_TRUE(concurrentTimes.has_value());

    ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(*times, *concurrentTimes));
}

TEST(TimeInStateTest, AllUidTimeInState) {
    vector<size_t> sizes;
    auto map = getUidsCpuFreqTimes();
    ASSERT_TRUE(map.has_value());
@@ -43,7 +114,7 @@ TEST(TimeInStateTest, AllUid) {
    }
}

TEST(TimeInStateTest, SingleAndAllUidConsistent) {
TEST(TimeInStateTest, SingleAndAllUidTimeInStateConsistent) {
    auto map = getUidsCpuFreqTimes();
    ASSERT_TRUE(map.has_value());
    ASSERT_FALSE(map->empty());
@@ -64,6 +135,40 @@ TEST(TimeInStateTest, SingleAndAllUidConsistent) {
    }
}

TEST(TimeInStateTest, AllUidConcurrentTimes) {
    auto map = getUidsConcurrentTimes();
    ASSERT_TRUE(map.has_value());
    ASSERT_FALSE(map->empty());

    auto firstEntry = map->begin()->second;
    for (const auto &kv : *map) {
        ASSERT_EQ(kv.second.active.size(), firstEntry.active.size());
        ASSERT_EQ(kv.second.policy.size(), firstEntry.policy.size());
        for (size_t i = 0; i < kv.second.policy.size(); ++i) {
            ASSERT_EQ(kv.second.policy[i].size(), firstEntry.policy[i].size());
        }
    }
}

TEST(TimeInStateTest, SingleAndAllUidConcurrentTimesConsistent) {
    auto map = getUidsConcurrentTimes();
    ASSERT_TRUE(map.has_value());
    for (const auto &kv : *map) {
        uint32_t uid = kv.first;
        auto times1 = kv.second;
        auto times2 = getUidConcurrentTimes(uid);
        ASSERT_TRUE(times2.has_value());
        for (uint32_t i = 0; i < times1.active.size(); ++i) {
            ASSERT_LE(times2->active[i] - times1.active[i], NSEC_PER_SEC);
        }
        for (uint32_t i = 0; i < times1.policy.size(); ++i) {
            for (uint32_t j = 0; j < times1.policy[i].size(); ++j) {
                ASSERT_LE(times2->policy[i][j] - times1.policy[i][j], NSEC_PER_SEC);
            }
        }
    }
}

void TestCheckDelta(uint64_t before, uint64_t after) {
    // Times should never decrease
    ASSERT_LE(before, after);
@@ -71,7 +176,7 @@ void TestCheckDelta(uint64_t before, uint64_t after) {
    ASSERT_LE(after - before, NSEC_PER_SEC * 2 * get_nprocs_conf());
}

TEST(TimeInStateTest, AllUidMonotonic) {
TEST(TimeInStateTest, AllUidTimeInStateMonotonic) {
    auto map1 = getUidsCpuFreqTimes();
    ASSERT_TRUE(map1.has_value());
    sleep(1);
@@ -92,7 +197,35 @@ TEST(TimeInStateTest, AllUidMonotonic) {
    }
}

TEST(TimeInStateTest, AllUidSanityCheck) {
TEST(TimeInStateTest, AllUidConcurrentTimesMonotonic) {
    auto map1 = getUidsConcurrentTimes();
    ASSERT_TRUE(map1.has_value());
    ASSERT_FALSE(map1->empty());
    sleep(1);
    auto map2 = getUidsConcurrentTimes();
    ASSERT_TRUE(map2.has_value());
    ASSERT_FALSE(map2->empty());

    for (const auto &kv : *map1) {
        uint32_t uid = kv.first;
        auto times = kv.second;
        ASSERT_NE(map2->find(uid), map2->end());
        for (uint32_t i = 0; i < times.active.size(); ++i) {
            auto before = times.active[i];
            auto after = (*map2)[uid].active[i];
            ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
        }
        for (uint32_t policy = 0; policy < times.policy.size(); ++policy) {
            for (uint32_t idx = 0; idx < times.policy[policy].size(); ++idx) {
                auto before = times.policy[policy][idx];
                auto after = (*map2)[uid].policy[policy][idx];
                ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
            }
        }
    }
}

TEST(TimeInStateTest, AllUidTimeInStateSanityCheck) {
    auto map = getUidsCpuFreqTimes();
    ASSERT_TRUE(map.has_value());

@@ -110,6 +243,48 @@ TEST(TimeInStateTest, AllUidSanityCheck) {
    ASSERT_TRUE(foundLargeValue);
}

TEST(TimeInStateTest, AllUidConcurrentTimesSanityCheck) {
    auto concurrentMap = getUidsConcurrentTimes();
    ASSERT_TRUE(concurrentMap);

    bool activeFoundLargeValue = false;
    bool policyFoundLargeValue = false;
    for (const auto &kv : *concurrentMap) {
        for (const auto &time : kv.second.active) {
            ASSERT_LE(time, NSEC_PER_YEAR);
            if (time > UINT32_MAX) activeFoundLargeValue = true;
        }
        for (const auto &policyTimeVec : kv.second.policy) {
            for (const auto &time : policyTimeVec) {
                ASSERT_LE(time, NSEC_PER_YEAR);
                if (time > UINT32_MAX) policyFoundLargeValue = true;
            }
        }
    }
    // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
    // uint64_t as expected, we should have some times higher than that.
    ASSERT_TRUE(activeFoundLargeValue);
    ASSERT_TRUE(policyFoundLargeValue);
}

TEST(TimeInStateTest, AllUidTimesConsistent) {
    auto tisMap = getUidsCpuFreqTimes();
    ASSERT_TRUE(tisMap.has_value());

    auto concurrentMap = getUidsConcurrentTimes();
    ASSERT_TRUE(concurrentMap.has_value());

    ASSERT_EQ(tisMap->size(), concurrentMap->size());
    for (const auto &kv : *tisMap) {
        uint32_t uid = kv.first;
        auto times = kv.second;
        ASSERT_NE(concurrentMap->find(uid), concurrentMap->end());

        auto concurrentTimes = (*concurrentMap)[uid];
        ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(times, concurrentTimes));
    }
}

TEST(TimeInStateTest, RemoveUid) {
    uint32_t uid = 0;
    {
@@ -122,31 +297,58 @@ TEST(TimeInStateTest, RemoveUid) {
    }
    {
        // Add a map entry for our fake UID by copying a real map entry
        android::base::unique_fd fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_times_map")};
        android::base::unique_fd fd{
                bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_time_in_state_map")};
        ASSERT_GE(fd, 0);
        time_key_t k;
        ASSERT_FALSE(getFirstMapKey(fd, &k));
        std::vector<val_t> vals(get_nprocs_conf());
        std::vector<tis_val_t> vals(get_nprocs_conf());
        ASSERT_FALSE(findMapEntry(fd, &k, vals.data()));
        uint32_t copiedUid = k.uid;
        k.uid = uid;
        ASSERT_FALSE(writeToMapEntry(fd, &k, vals.data(), BPF_NOEXIST));

        android::base::unique_fd fd2{
                bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
        k.uid = copiedUid;
        k.bucket = 0;
        std::vector<concurrent_val_t> cvals(get_nprocs_conf());
        ASSERT_FALSE(findMapEntry(fd2, &k, cvals.data()));
        k.uid = uid;
        ASSERT_FALSE(writeToMapEntry(fd2, &k, cvals.data(), BPF_NOEXIST));
    }
    auto times = getUidCpuFreqTimes(uid);
    ASSERT_TRUE(times.has_value());
    ASSERT_FALSE(times->empty());

    auto concurrentTimes = getUidConcurrentTimes(0);
    ASSERT_TRUE(concurrentTimes.has_value());
    ASSERT_FALSE(concurrentTimes->active.empty());
    ASSERT_FALSE(concurrentTimes->policy.empty());

    uint64_t sum = 0;
    for (size_t i = 0; i < times->size(); ++i) {
        for (auto x : (*times)[i]) sum += x;
    }
    ASSERT_GT(sum, (uint64_t)0);

    ASSERT_TRUE(clearUidCpuFreqTimes(uid));
    uint64_t activeSum = 0;
    for (size_t i = 0; i < concurrentTimes->active.size(); ++i) {
        activeSum += concurrentTimes->active[i];
    }
    ASSERT_GT(activeSum, (uint64_t)0);

    ASSERT_TRUE(clearUidTimes(uid));

    auto allTimes = getUidsCpuFreqTimes();
    ASSERT_TRUE(allTimes.has_value());
    ASSERT_FALSE(allTimes->empty());
    ASSERT_EQ(allTimes->find(uid), allTimes->end());

    auto allConcurrentTimes = getUidsConcurrentTimes();
    ASSERT_TRUE(allConcurrentTimes.has_value());
    ASSERT_FALSE(allConcurrentTimes->empty());
    ASSERT_EQ(allConcurrentTimes->find(uid), allConcurrentTimes->end());
}

} // namespace bpf
+7 −1
Original line number Diff line number Diff line
@@ -19,16 +19,22 @@
#define BPF_FS_PATH "/sys/fs/bpf/"

#define FREQS_PER_ENTRY 32
#define CPUS_PER_ENTRY 8

struct time_key_t {
    uint32_t uid;
    uint32_t bucket;
};

struct val_t {
struct tis_val_t {
    uint64_t ar[FREQS_PER_ENTRY];
};

struct concurrent_val_t {
    uint64_t active[CPUS_PER_ENTRY];
    uint64_t policy[CPUS_PER_ENTRY];
};

struct freq_idx_key_t {
    uint32_t policy;
    uint32_t freq;