Loading cmds/statsd/src/metrics/ValueMetricProducer.cpp +11 −10 Original line number Diff line number Diff line Loading @@ -652,7 +652,7 @@ void ValueMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const { (unsigned long)mCurrentSlicedBucket.size()); if (verbose) { for (const auto& it : mCurrentSlicedBucket) { for (const auto& interval : it.second) { for (const auto& interval : it.second.intervals) { fprintf(out, "\t(what)%s\t(states)%s (value)%s\n", it.first.getDimensionKeyInWhat().toString().c_str(), it.first.getStateValuesKey().toString().c_str(), Loading Loading @@ -804,7 +804,8 @@ void ValueMetricProducer::onMatchedLogEventInternalLocked( // We need to get the intervals stored with the previous state key so we can // close these value intervals. const auto oldStateKey = baseInfos[0].currentState; vector<Interval>& intervals = mCurrentSlicedBucket[MetricDimensionKey(whatKey, oldStateKey)]; vector<Interval>& intervals = mCurrentSlicedBucket[MetricDimensionKey(whatKey, oldStateKey)].intervals; if (intervals.size() < mFieldMatchers.size()) { VLOG("Resizing number of intervals to %d", (int)mFieldMatchers.size()); intervals.resize(mFieldMatchers.size()); Loading Loading @@ -990,7 +991,7 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs, bool bucketHasData = false; // The current bucket is large enough to keep. for (const auto& slice : mCurrentSlicedBucket) { ValueBucket bucket = buildPartialBucket(bucketEndTime, slice.second); PastValueBucket bucket = buildPartialBucket(bucketEndTime, slice.second.intervals); bucket.mConditionTrueNs = conditionTrueDuration; // it will auto create new vector of ValuebucketInfo if the key is not found. if (bucket.valueIndex.size() > 0) { Loading Loading @@ -1030,9 +1031,9 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs, mCurrentBucketNum += numBucketsForward; } ValueBucket ValueMetricProducer::buildPartialBucket(int64_t bucketEndTime, PastValueBucket ValueMetricProducer::buildPartialBucket(int64_t bucketEndTime, const std::vector<Interval>& intervals) { ValueBucket bucket; PastValueBucket bucket; bucket.mBucketStartNs = mCurrentBucketStartTimeNs; bucket.mBucketEndNs = bucketEndTime; for (const auto& interval : intervals) { Loading @@ -1059,7 +1060,7 @@ void ValueMetricProducer::initCurrentSlicedBucket(int64_t nextBucketStartTimeNs) // Cleanup data structure to aggregate values. for (auto it = mCurrentSlicedBucket.begin(); it != mCurrentSlicedBucket.end();) { bool obsolete = true; for (auto& interval : it->second) { for (auto& interval : it->second.intervals) { interval.hasValue = false; interval.sampleSize = 0; if (interval.seenNewData) { Loading Loading @@ -1107,7 +1108,7 @@ void ValueMetricProducer::appendToFullBucket(const bool isFullBucketReached) { continue; } // TODO: fix this when anomaly can accept double values auto& interval = slice.second[0]; auto& interval = slice.second.intervals[0]; if (interval.hasValue) { mCurrentFullBucket[slice.first] += interval.value.long_value; } Loading @@ -1126,7 +1127,7 @@ void ValueMetricProducer::appendToFullBucket(const bool isFullBucketReached) { for (auto& tracker : mAnomalyTrackers) { if (tracker != nullptr) { // TODO: fix this when anomaly can accept double values auto& interval = slice.second[0]; auto& interval = slice.second.intervals[0]; if (interval.hasValue) { tracker->addPastBucket(slice.first, interval.value.long_value, mCurrentBucketNum); Loading @@ -1139,7 +1140,7 @@ void ValueMetricProducer::appendToFullBucket(const bool isFullBucketReached) { // Accumulate partial bucket. for (const auto& slice : mCurrentSlicedBucket) { // TODO: fix this when anomaly can accept double values auto& interval = slice.second[0]; auto& interval = slice.second.intervals[0]; if (interval.hasValue) { mCurrentFullBucket[slice.first] += interval.value.long_value; } Loading cmds/statsd/src/metrics/ValueMetricProducer.h +15 −8 Original line number Diff line number Diff line Loading @@ -31,7 +31,7 @@ namespace android { namespace os { namespace statsd { struct ValueBucket { struct PastValueBucket { int64_t mBucketStartNs; int64_t mBucketEndNs; std::vector<int> valueIndex; Loading @@ -41,7 +41,6 @@ struct ValueBucket { int64_t mConditionTrueNs; }; // Aggregates values within buckets. // // There are different events that might complete a bucket Loading Loading @@ -173,7 +172,7 @@ private: // if this is pulled metric const bool mIsPulled; // internal state of an ongoing aggregation bucket. // Tracks the value information of one value field. typedef struct { // Index in multi value aggregation. int valueIndex; Loading @@ -188,6 +187,12 @@ private: bool seenNewData = false; } Interval; // Internal state of an ongoing aggregation bucket. typedef struct CurrentValueBucket { // Value information for each value field of the metric. std::vector<Interval> intervals; } CurrentValueBucket; typedef struct { // Holds current base value of the dimension. Take diff and update if necessary. Value base; Loading @@ -199,14 +204,16 @@ private: bool hasCurrentState; } BaseInfo; std::unordered_map<MetricDimensionKey, std::vector<Interval>> mCurrentSlicedBucket; // Tracks the internal state in the ongoing aggregation bucket for each DimensionsInWhat // key and StateValuesKey pair. std::unordered_map<MetricDimensionKey, CurrentValueBucket> mCurrentSlicedBucket; std::unordered_map<HashableDimensionKey, std::vector<BaseInfo>> mCurrentBaseInfo; std::unordered_map<MetricDimensionKey, int64_t> mCurrentFullBucket; // Save the past buckets and we can clear when the StatsLogReport is dumped. std::unordered_map<MetricDimensionKey, std::vector<ValueBucket>> mPastBuckets; std::unordered_map<MetricDimensionKey, std::vector<PastValueBucket>> mPastBuckets; const int64_t mMinBucketSizeNs; Loading @@ -224,7 +231,7 @@ private: void accumulateEvents(const std::vector<std::shared_ptr<LogEvent>>& allData, int64_t originalPullTimeNs, int64_t eventElapsedTimeNs); ValueBucket buildPartialBucket(int64_t bucketEndTime, PastValueBucket buildPartialBucket(int64_t bucketEndTime, const std::vector<Interval>& intervals); void initCurrentSlicedBucket(int64_t nextBucketStartTimeNs); Loading @@ -234,7 +241,7 @@ private: // Reset diff base and mHasGlobalBase void resetBase(); static const size_t kBucketSize = sizeof(ValueBucket{}); static const size_t kBucketSize = sizeof(PastValueBucket{}); const size_t mDimensionSoftLimit; Loading cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp +136 −136 File changed.Preview size limit exceeded, changes collapsed. Show changes Loading
cmds/statsd/src/metrics/ValueMetricProducer.cpp +11 −10 Original line number Diff line number Diff line Loading @@ -652,7 +652,7 @@ void ValueMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const { (unsigned long)mCurrentSlicedBucket.size()); if (verbose) { for (const auto& it : mCurrentSlicedBucket) { for (const auto& interval : it.second) { for (const auto& interval : it.second.intervals) { fprintf(out, "\t(what)%s\t(states)%s (value)%s\n", it.first.getDimensionKeyInWhat().toString().c_str(), it.first.getStateValuesKey().toString().c_str(), Loading Loading @@ -804,7 +804,8 @@ void ValueMetricProducer::onMatchedLogEventInternalLocked( // We need to get the intervals stored with the previous state key so we can // close these value intervals. const auto oldStateKey = baseInfos[0].currentState; vector<Interval>& intervals = mCurrentSlicedBucket[MetricDimensionKey(whatKey, oldStateKey)]; vector<Interval>& intervals = mCurrentSlicedBucket[MetricDimensionKey(whatKey, oldStateKey)].intervals; if (intervals.size() < mFieldMatchers.size()) { VLOG("Resizing number of intervals to %d", (int)mFieldMatchers.size()); intervals.resize(mFieldMatchers.size()); Loading Loading @@ -990,7 +991,7 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs, bool bucketHasData = false; // The current bucket is large enough to keep. for (const auto& slice : mCurrentSlicedBucket) { ValueBucket bucket = buildPartialBucket(bucketEndTime, slice.second); PastValueBucket bucket = buildPartialBucket(bucketEndTime, slice.second.intervals); bucket.mConditionTrueNs = conditionTrueDuration; // it will auto create new vector of ValuebucketInfo if the key is not found. if (bucket.valueIndex.size() > 0) { Loading Loading @@ -1030,9 +1031,9 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs, mCurrentBucketNum += numBucketsForward; } ValueBucket ValueMetricProducer::buildPartialBucket(int64_t bucketEndTime, PastValueBucket ValueMetricProducer::buildPartialBucket(int64_t bucketEndTime, const std::vector<Interval>& intervals) { ValueBucket bucket; PastValueBucket bucket; bucket.mBucketStartNs = mCurrentBucketStartTimeNs; bucket.mBucketEndNs = bucketEndTime; for (const auto& interval : intervals) { Loading @@ -1059,7 +1060,7 @@ void ValueMetricProducer::initCurrentSlicedBucket(int64_t nextBucketStartTimeNs) // Cleanup data structure to aggregate values. for (auto it = mCurrentSlicedBucket.begin(); it != mCurrentSlicedBucket.end();) { bool obsolete = true; for (auto& interval : it->second) { for (auto& interval : it->second.intervals) { interval.hasValue = false; interval.sampleSize = 0; if (interval.seenNewData) { Loading Loading @@ -1107,7 +1108,7 @@ void ValueMetricProducer::appendToFullBucket(const bool isFullBucketReached) { continue; } // TODO: fix this when anomaly can accept double values auto& interval = slice.second[0]; auto& interval = slice.second.intervals[0]; if (interval.hasValue) { mCurrentFullBucket[slice.first] += interval.value.long_value; } Loading @@ -1126,7 +1127,7 @@ void ValueMetricProducer::appendToFullBucket(const bool isFullBucketReached) { for (auto& tracker : mAnomalyTrackers) { if (tracker != nullptr) { // TODO: fix this when anomaly can accept double values auto& interval = slice.second[0]; auto& interval = slice.second.intervals[0]; if (interval.hasValue) { tracker->addPastBucket(slice.first, interval.value.long_value, mCurrentBucketNum); Loading @@ -1139,7 +1140,7 @@ void ValueMetricProducer::appendToFullBucket(const bool isFullBucketReached) { // Accumulate partial bucket. for (const auto& slice : mCurrentSlicedBucket) { // TODO: fix this when anomaly can accept double values auto& interval = slice.second[0]; auto& interval = slice.second.intervals[0]; if (interval.hasValue) { mCurrentFullBucket[slice.first] += interval.value.long_value; } Loading
cmds/statsd/src/metrics/ValueMetricProducer.h +15 −8 Original line number Diff line number Diff line Loading @@ -31,7 +31,7 @@ namespace android { namespace os { namespace statsd { struct ValueBucket { struct PastValueBucket { int64_t mBucketStartNs; int64_t mBucketEndNs; std::vector<int> valueIndex; Loading @@ -41,7 +41,6 @@ struct ValueBucket { int64_t mConditionTrueNs; }; // Aggregates values within buckets. // // There are different events that might complete a bucket Loading Loading @@ -173,7 +172,7 @@ private: // if this is pulled metric const bool mIsPulled; // internal state of an ongoing aggregation bucket. // Tracks the value information of one value field. typedef struct { // Index in multi value aggregation. int valueIndex; Loading @@ -188,6 +187,12 @@ private: bool seenNewData = false; } Interval; // Internal state of an ongoing aggregation bucket. typedef struct CurrentValueBucket { // Value information for each value field of the metric. std::vector<Interval> intervals; } CurrentValueBucket; typedef struct { // Holds current base value of the dimension. Take diff and update if necessary. Value base; Loading @@ -199,14 +204,16 @@ private: bool hasCurrentState; } BaseInfo; std::unordered_map<MetricDimensionKey, std::vector<Interval>> mCurrentSlicedBucket; // Tracks the internal state in the ongoing aggregation bucket for each DimensionsInWhat // key and StateValuesKey pair. std::unordered_map<MetricDimensionKey, CurrentValueBucket> mCurrentSlicedBucket; std::unordered_map<HashableDimensionKey, std::vector<BaseInfo>> mCurrentBaseInfo; std::unordered_map<MetricDimensionKey, int64_t> mCurrentFullBucket; // Save the past buckets and we can clear when the StatsLogReport is dumped. std::unordered_map<MetricDimensionKey, std::vector<ValueBucket>> mPastBuckets; std::unordered_map<MetricDimensionKey, std::vector<PastValueBucket>> mPastBuckets; const int64_t mMinBucketSizeNs; Loading @@ -224,7 +231,7 @@ private: void accumulateEvents(const std::vector<std::shared_ptr<LogEvent>>& allData, int64_t originalPullTimeNs, int64_t eventElapsedTimeNs); ValueBucket buildPartialBucket(int64_t bucketEndTime, PastValueBucket buildPartialBucket(int64_t bucketEndTime, const std::vector<Interval>& intervals); void initCurrentSlicedBucket(int64_t nextBucketStartTimeNs); Loading @@ -234,7 +241,7 @@ private: // Reset diff base and mHasGlobalBase void resetBase(); static const size_t kBucketSize = sizeof(ValueBucket{}); static const size_t kBucketSize = sizeof(PastValueBucket{}); const size_t mDimensionSoftLimit; Loading
cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp +136 −136 File changed.Preview size limit exceeded, changes collapsed. Show changes