Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bc37ffb authored by Colin Cross's avatar Colin Cross Committed by Gerrit Code Review
Browse files

Merge changes Ie9594b6e,I2e050a37,Ia2bf2ccf,I4e4db704,Ifbfd14e2, ...

* changes:
  Fix mac build
  Add ABI to leak report
  Combine leaks with same stacktrace
  Fix allocator::map template argument ordering
  Fold leaks that are referenced by other leaks
  Compile some tests for the host
parents 4006add7 33e601ac
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -370,11 +370,11 @@ void* HeapImpl::Alloc(size_t size) {
}
}


void* HeapImpl::AllocLocked(size_t size) {
void* HeapImpl::AllocLocked(size_t size) {
  if (__predict_false(size > kMaxBucketAllocationSize)) {
  if (size > kMaxBucketAllocationSize) {
    return MapAlloc(size);
    return MapAlloc(size);
  }
  }
  int bucket = size_to_bucket(size);
  int bucket = size_to_bucket(size);
  if (__predict_false(free_chunks_[bucket].empty())) {
  if (free_chunks_[bucket].empty()) {
    Chunk *chunk = new Chunk(this, bucket);
    Chunk *chunk = new Chunk(this, bucket);
    free_chunks_[bucket].insert(chunk->node_);
    free_chunks_[bucket].insert(chunk->node_);
  }
  }
+5 −1
Original line number Original line Diff line number Diff line
@@ -24,6 +24,7 @@
#include <map>
#include <map>
#include <memory>
#include <memory>
#include <set>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <unordered_set>
#include <vector>
#include <vector>
extern std::atomic<int> heap_count;
extern std::atomic<int> heap_count;
@@ -209,9 +210,12 @@ using vector = std::vector<T, Allocator<T>>;
template<class T>
template<class T>
using list = std::list<T, Allocator<T>>;
using list = std::list<T, Allocator<T>>;


template<class T, class Key, class Compare = std::less<Key>>
template<class Key, class T, class Compare = std::less<Key>>
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;


template<class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_map = std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;

template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;


+22 −0
Original line number Original line Diff line number Diff line
@@ -3,6 +3,7 @@ LOCAL_PATH := $(call my-dir)
memunreachable_srcs := \
memunreachable_srcs := \
   Allocator.cpp \
   Allocator.cpp \
   HeapWalker.cpp \
   HeapWalker.cpp \
   LeakFolding.cpp \
   LeakPipe.cpp \
   LeakPipe.cpp \
   LineBuffer.cpp \
   LineBuffer.cpp \
   MemUnreachable.cpp \
   MemUnreachable.cpp \
@@ -12,7 +13,9 @@ memunreachable_srcs := \


memunreachable_test_srcs := \
memunreachable_test_srcs := \
   tests/Allocator_test.cpp \
   tests/Allocator_test.cpp \
   tests/DisableMalloc_test.cpp \
   tests/HeapWalker_test.cpp \
   tests/HeapWalker_test.cpp \
   tests/LeakFolding_test.cpp \
   tests/MemUnreachable_test.cpp \
   tests/MemUnreachable_test.cpp \
   tests/ThreadCapture_test.cpp \
   tests/ThreadCapture_test.cpp \


@@ -41,3 +44,22 @@ LOCAL_CLANG := true
LOCAL_SHARED_LIBRARIES := libmemunreachable libbase liblog
LOCAL_SHARED_LIBRARIES := libmemunreachable libbase liblog


include $(BUILD_NATIVE_TEST)
include $(BUILD_NATIVE_TEST)

include $(CLEAR_VARS)

LOCAL_MODULE := memunreachable_test
LOCAL_SRC_FILES := \
   Allocator.cpp \
   HeapWalker.cpp  \
   LeakFolding.cpp \
   tests/Allocator_test.cpp \
   tests/HeapWalker_test.cpp \
   tests/HostMallocStub.cpp \
   tests/LeakFolding_test.cpp \

LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_CLANG := true
LOCAL_SHARED_LIBRARIES := libbase liblog
LOCAL_MODULE_HOST_OS := linux

include $(BUILD_HOST_NATIVE_TEST)
+28 −28
Original line number Original line Diff line number Diff line
@@ -21,17 +21,19 @@


#include "Allocator.h"
#include "Allocator.h"
#include "HeapWalker.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "log.h"
#include "log.h"


bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
  if (end == begin) {
  if (end == begin) {
    end = begin + 1;
    end = begin + 1;
  }
  }
  auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
  Range range{begin, end};
  auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
  if (inserted.second) {
  if (inserted.second) {
    valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
    valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
    valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
    valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
    allocation_bytes_ += end - begin;
    allocation_bytes_ += range.size();
    return true;
    return true;
  } else {
  } else {
    Range overlap = inserted.first->first;
    Range overlap = inserted.first->first;
@@ -44,27 +46,30 @@ bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
  }
  }
}
}


void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
bool HeapWalker::IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info) {
  allocator::vector<Range> to_do(1, range, allocator_);
  if (ptr >= valid_allocations_range_.begin && ptr < valid_allocations_range_.end) {
  while (!to_do.empty()) {
    AllocationMap::iterator it = allocations_.find(Range{ptr, ptr + 1});
    Range range = to_do.back();
    to_do.pop_back();
    uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
    // TODO(ccross): we might need to consider a pointer to the end of a buffer
    // to be inside the buffer, which means the common case of a pointer to the
    // beginning of a buffer may keep two ranges live.
    for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
      uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
      if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
        RangeMap::iterator it = allocations_.find(Range{val, val + 1});
    if (it != allocations_.end()) {
    if (it != allocations_.end()) {
          if (!(it->second.*flag)) {
      *range = it->first;
            to_do.push_back(it->first);
      *info = &it->second;
            it->second.*flag = true;
      return true;
    }
    }
  }
  }
  return false;
}
}

void HeapWalker::RecurseRoot(const Range& root) {
  allocator::vector<Range> to_do(1, root, allocator_);
  while (!to_do.empty()) {
    Range range = to_do.back();
    to_do.pop_back();

    ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
      if (!ref_info->referenced_from_root) {
        ref_info->referenced_from_root = true;
        to_do.push_back(ref_range);
      }
      }
    });
  }
  }
}
}


@@ -85,27 +90,22 @@ size_t HeapWalker::AllocationBytes() {
}
}


bool HeapWalker::DetectLeaks() {
bool HeapWalker::DetectLeaks() {
  // Recursively walk pointers from roots to mark referenced allocations
  for (auto it = roots_.begin(); it != roots_.end(); it++) {
  for (auto it = roots_.begin(); it != roots_.end(); it++) {
    Walk(*it, &RangeInfo::referenced_from_root);
    RecurseRoot(*it);
  }
  }


  Range vals;
  Range vals;
  vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
  vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
  vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
  vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
  Walk(vals, &RangeInfo::referenced_from_root);


  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
  RecurseRoot(vals);
    if (!it->second.referenced_from_root) {
      Walk(it->first, &RangeInfo::referenced_from_leak);
    }
  }


  return true;
  return true;
}
}


bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
    size_t* num_leaks_out, size_t* leak_bytes_out) {
    size_t* num_leaks_out, size_t* leak_bytes_out) {
  DetectLeaks();
  leaked.clear();
  leaked.clear();


  size_t num_leaks = 0;
  size_t num_leaks = 0;
@@ -120,7 +120,7 @@ bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
  size_t n = 0;
  size_t n = 0;
  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
    if (!it->second.referenced_from_root) {
    if (!it->second.referenced_from_root) {
      if (n++ <= limit) {
      if (n++ < limit) {
        leaked.push_back(it->first);
        leaked.push_back(it->first);
      }
      }
    }
    }
+42 −7
Original line number Original line Diff line number Diff line
@@ -20,11 +20,14 @@
#include "android-base/macros.h"
#include "android-base/macros.h"


#include "Allocator.h"
#include "Allocator.h"
#include "Tarjan.h"


// A range [begin, end)
// A range [begin, end)
struct Range {
struct Range {
  uintptr_t begin;
  uintptr_t begin;
  uintptr_t end;
  uintptr_t end;

  size_t size() const { return end - begin; };
};
};


// Comparator for Ranges that returns equivalence for overlapping ranges
// Comparator for Ranges that returns equivalence for overlapping ranges
@@ -34,7 +37,6 @@ struct compare_range {
  }
  }
};
};



class HeapWalker {
class HeapWalker {
 public:
 public:
  HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
  HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
@@ -55,16 +57,25 @@ class HeapWalker {
  size_t Allocations();
  size_t Allocations();
  size_t AllocationBytes();
  size_t AllocationBytes();


 private:
  template<class F>
  struct RangeInfo {
  void ForEachPtrInRange(const Range& range, F&& f);

  template<class F>
  void ForEachAllocation(F&& f);

  struct AllocationInfo {
    bool referenced_from_root;
    bool referenced_from_root;
    bool referenced_from_leak;
  };
  };
  void Walk(const Range& range, bool RangeInfo::* flag);

 private:

  void RecurseRoot(const Range& root);
  bool IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);

  DISALLOW_COPY_AND_ASSIGN(HeapWalker);
  DISALLOW_COPY_AND_ASSIGN(HeapWalker);
  Allocator<HeapWalker> allocator_;
  Allocator<HeapWalker> allocator_;
  using RangeMap = allocator::map<RangeInfo, Range, compare_range>;
  using AllocationMap = allocator::map<Range, AllocationInfo, compare_range>;
  RangeMap allocations_;
  AllocationMap allocations_;
  size_t allocation_bytes_;
  size_t allocation_bytes_;
  Range valid_allocations_range_;
  Range valid_allocations_range_;


@@ -72,4 +83,28 @@ class HeapWalker {
  allocator::vector<uintptr_t> root_vals_;
  allocator::vector<uintptr_t> root_vals_;
};
};


template<class F>
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
  uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
  // TODO(ccross): we might need to consider a pointer to the end of a buffer
  // to be inside the buffer, which means the common case of a pointer to the
  // beginning of a buffer may keep two ranges live.
  for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
    Range ref_range;
    AllocationInfo* ref_info;
    if (IsAllocationPtr(*reinterpret_cast<uintptr_t*>(i), &ref_range, &ref_info)) {
      f(ref_range, ref_info);
    }
  }
}

template<class F>
inline void HeapWalker::ForEachAllocation(F&& f) {
  for (auto& it : allocations_) {
    const Range& range = it.first;
    HeapWalker::AllocationInfo& allocation = it.second;
    f(range, allocation);
  }
}

#endif
#endif
Loading