Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c3dc3304 authored by Colin Cross's avatar Colin Cross Committed by Android (Google) Code Review
Browse files

Merge changes from topic 'unreachable' into nyc-dev

* changes:
  libmemunreachable: fix long timeout on error
  imprecise mark and sweep native memory leak detector
parents 78d27661 de42af01
Loading
Loading
Loading
Loading
+480 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2016 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

// Header page:
//
// For minimum allocation size (8 bytes), bitmap can store used allocations for
// up to 4032*8*8=258048, which is 256KiB minus the header page

#include <assert.h>
#include <stdlib.h>

#include <sys/cdefs.h>
#include <sys/mman.h>

#include <cmath>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <mutex>

#include "android-base/macros.h"

#include "anon_vma_naming.h"
#include "Allocator.h"
#include "LinkedList.h"

// runtime interfaces used:
// abort
// assert - fprintf + mmap
// mmap
// munmap
// prctl

constexpr size_t const_log2(size_t n, size_t p = 0) {
  return (n <= 1) ? p : const_log2(n / 2, p + 1);
}

constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
  return (x + y - 1) / y;
}

#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))

static constexpr size_t kPageSize = 4096;
static constexpr size_t kChunkSize = 256 * 1024;
static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
static constexpr size_t kMinBucketAllocationSize = 8;
static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize)
    - const_log2(kMinBucketAllocationSize) + 1;
static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize
    / kPageSize;

std::atomic<int> heap_count;

class Chunk;

class HeapImpl {
 public:
  HeapImpl();
  ~HeapImpl();
  void* operator new(std::size_t count) noexcept;
  void operator delete(void* ptr);

  void* Alloc(size_t size);
  void Free(void* ptr);
  bool Empty();

  void MoveToFullList(Chunk* chunk, int bucket_);
  void MoveToFreeList(Chunk* chunk, int bucket_);

 private:
  DISALLOW_COPY_AND_ASSIGN(HeapImpl);

  LinkedList<Chunk*> free_chunks_[kNumBuckets];
  LinkedList<Chunk*> full_chunks_[kNumBuckets];

  void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
  void* MapAlloc(size_t size);
  void MapFree(void* ptr);
  void* AllocLocked(size_t size);
  void FreeLocked(void* ptr);

  struct MapAllocation {
    void *ptr;
    size_t size;
    MapAllocation* next;
  };
  MapAllocation* map_allocation_list_;
  std::mutex m_;
};

// Integer log 2, rounds down
static inline unsigned int log2(size_t n) {
  return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
}

static inline unsigned int size_to_bucket(size_t size) {
  if (size < kMinBucketAllocationSize)
    return kMinBucketAllocationSize;
  return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
}

static inline size_t bucket_to_size(unsigned int bucket) {
  return kMinBucketAllocationSize << bucket;
}

static void* MapAligned(size_t size, size_t align) {
  const int prot = PROT_READ | PROT_WRITE;
  const int flags = MAP_ANONYMOUS | MAP_PRIVATE;

  size = (size + kPageSize - 1) & ~(kPageSize - 1);

  // Over-allocate enough to align
  size_t map_size = size + align - kPageSize;
  if (map_size < size) {
    return nullptr;
  }

  void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
  if (ptr == MAP_FAILED) {
    return nullptr;
  }

  size_t aligned_size = map_size;
  void* aligned_ptr = ptr;

  std::align(align, size, aligned_ptr, aligned_size);

  // Trim beginning
  if (aligned_ptr != ptr) {
    ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr)
        - reinterpret_cast<uintptr_t>(ptr);
    munmap(ptr, extra);
    map_size -= extra;
    ptr = aligned_ptr;
  }

  // Trim end
  if (map_size != size) {
    assert(map_size > size);
    assert(ptr != NULL);
    munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size),
        map_size - size);
  }

#define PR_SET_VMA   0x53564d41
#define PR_SET_VMA_ANON_NAME    0
  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
      reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc");

  return ptr;
}

class Chunk {
 public:
  static void* operator new(std::size_t count) noexcept;
  static void operator delete(void* ptr);
  Chunk(HeapImpl* heap, int bucket);
  ~Chunk() {}

  void *Alloc();
  void Free(void* ptr);
  void Purge();
  bool Empty();

  static Chunk* ptr_to_chunk(void* ptr) {
    return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr)
        & ~(kChunkSize - 1));
  }
  static bool is_chunk(void* ptr) {
    return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
  }

  unsigned int free_count() {
    return free_count_;
  }
  HeapImpl* heap() {
    return heap_;
  }
  LinkedList<Chunk*> node_; // linked list sorted by minimum free count

 private:
  DISALLOW_COPY_AND_ASSIGN(Chunk);
  HeapImpl* heap_;
  unsigned int bucket_;
  unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
  unsigned int max_allocations_; // maximum number of allocations in the chunk
  unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
  unsigned int free_count_; // number of available allocations
  unsigned int frees_since_purge_; // number of calls to Free since last Purge

  // bitmap of pages that have been dirtied
  uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];

  // bitmap of free allocations.
  uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];

  char data_[0];

  unsigned int ptr_to_n(void* ptr) {
    ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr)
        - reinterpret_cast<uintptr_t>(data_);
    return offset / allocation_size_;
  }
  void* n_to_ptr(unsigned int n) {
    return data_ + n * allocation_size_;
  }
};
static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");

// Override new operator on chunk to use mmap to allocate kChunkSize
void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
  assert(count == sizeof(Chunk));
  void* mem = MapAligned(kChunkSize, kChunkSize);
  if (!mem) {
    abort(); //throw std::bad_alloc;
  }

  return mem;
}

// Override new operator on chunk to use mmap to allocate kChunkSize
void Chunk::operator delete(void *ptr) {
  assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
  munmap(ptr, kChunkSize);
}

Chunk::Chunk(HeapImpl* heap, int bucket) :
    node_(this), heap_(heap), bucket_(bucket), allocation_size_(
        bucket_to_size(bucket)), max_allocations_(
        kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_(
        max_allocations_), frees_since_purge_(0) {
  memset(dirty_pages_, 0, sizeof(dirty_pages_));
  memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
}

bool Chunk::Empty() {
  return free_count_ == max_allocations_;
}

void* Chunk::Alloc() {
  assert(free_count_ > 0);

  unsigned int i = first_free_bitmap_;
  while (free_bitmap_[i] == 0)
    i++;
  assert(i < ARRAY_SIZE(free_bitmap_));
  unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
  assert(free_bitmap_[i] & (1U << bit));
  free_bitmap_[i] &= ~(1U << bit);
  unsigned int n = i * 32 + bit;
  assert(n < max_allocations_);

  unsigned int page = n * allocation_size_ / kPageSize;
  assert(page / 32 < ARRAY_SIZE(dirty_pages_));
  dirty_pages_[page / 32] |= 1U << (page % 32);

  free_count_--;
  if (free_count_ == 0) {
    heap_->MoveToFullList(this, bucket_);
  }

  return n_to_ptr(n);
}

void Chunk::Free(void* ptr) {
  assert(is_chunk(ptr));
  assert(ptr_to_chunk(ptr) == this);

  unsigned int n = ptr_to_n(ptr);
  unsigned int i = n / 32;
  unsigned int bit = n % 32;

  assert(i < ARRAY_SIZE(free_bitmap_));
  assert(!(free_bitmap_[i] & (1U << bit)));
  free_bitmap_[i] |= 1U << bit;
  free_count_++;

  if (i < first_free_bitmap_) {
    first_free_bitmap_ = i;
  }

  if (free_count_ == 1) {
    heap_->MoveToFreeList(this, bucket_);
  } else {
    // TODO(ccross): move down free list if necessary
  }

  if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
    Purge();
  }
}

void Chunk::Purge() {
  frees_since_purge_ = 0;

  //unsigned int allocsPerPage = kPageSize / allocation_size_;
}

// Override new operator on HeapImpl to use mmap to allocate a page
void* HeapImpl::operator new(std::size_t count __attribute__((unused)))
    noexcept {
  assert(count == sizeof(HeapImpl));
  void* mem = MapAligned(kPageSize, kPageSize);
  if (!mem) {
    abort(); //throw std::bad_alloc;
  }

  heap_count++;
  return mem;
}

void HeapImpl::operator delete(void *ptr) {
  munmap(ptr, kPageSize);
}

HeapImpl::HeapImpl() :
    free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
}

bool HeapImpl::Empty() {
  for (unsigned int i = 0; i < kNumBuckets; i++) {
    for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
      if (!it->data()->Empty()) {
        return false;
      }
    }
    for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
      if (!it->data()->Empty()) {
        return false;
      }
    }
  }

  return true;
}

HeapImpl::~HeapImpl() {
  for (unsigned int i = 0; i < kNumBuckets; i++) {
    while (!free_chunks_[i].empty()) {
      Chunk *chunk = free_chunks_[i].next()->data();
      chunk->node_.remove();
      delete chunk;
    }
    while (!full_chunks_[i].empty()) {
      Chunk *chunk = full_chunks_[i].next()->data();
      chunk->node_.remove();
      delete chunk;
    }
  }
}

void* HeapImpl::Alloc(size_t size) {
  std::lock_guard<std::mutex> lk(m_);
  return AllocLocked(size);
}

void* HeapImpl::AllocLocked(size_t size) {
  if (__predict_false(size > kMaxBucketAllocationSize)) {
    return MapAlloc(size);
  }
  int bucket = size_to_bucket(size);
  if (__predict_false(free_chunks_[bucket].empty())) {
    Chunk *chunk = new Chunk(this, bucket);
    free_chunks_[bucket].insert(chunk->node_);
  }
  return free_chunks_[bucket].next()->data()->Alloc();
}

void HeapImpl::Free(void *ptr) {
  std::lock_guard<std::mutex> lk(m_);
  FreeLocked(ptr);
}

void HeapImpl::FreeLocked(void *ptr) {
  if (!Chunk::is_chunk(ptr)) {
    HeapImpl::MapFree(ptr);
  } else {
    Chunk* chunk = Chunk::ptr_to_chunk(ptr);
    assert(chunk->heap() == this);
    chunk->Free(ptr);
  }
}

void* HeapImpl::MapAlloc(size_t size) {
  size = (size + kPageSize - 1) & ~(kPageSize - 1);

  MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(
      sizeof(MapAllocation)));
  void* ptr = MapAligned(size, kChunkSize);
  if (!ptr) {
    FreeLocked(allocation);
    abort(); //throw std::bad_alloc;
  }
  allocation->ptr = ptr;
  allocation->size = size;
  allocation->next = map_allocation_list_;
  map_allocation_list_ = allocation;

  return ptr;
}

void HeapImpl::MapFree(void *ptr) {
  MapAllocation **allocation = &map_allocation_list_;
  while (*allocation && (*allocation)->ptr != ptr)
    allocation = &(*allocation)->next;

  assert(*allocation != nullptr);

  munmap((*allocation)->ptr, (*allocation)->size);
  FreeLocked(*allocation);

  *allocation = (*allocation)->next;
}

void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) {
  MoveToList(chunk, &free_chunks_[bucket]);
}

void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) {
  MoveToList(chunk, &full_chunks_[bucket]);
}

void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) {
  // Remove from old list
  chunk->node_.remove();

  LinkedList<Chunk*> *node = head;
  // Insert into new list, sorted by lowest free count
  while (node->next() != head && node->data() != nullptr
      && node->data()->free_count() < chunk->free_count())
    node = node->next();

  node->insert(chunk->node_);
}

Heap::Heap() {
  // HeapImpl overloads the operator new in order to mmap itself instead of
  // allocating with new.
  // Can't use a shared_ptr to store the result because shared_ptr needs to
  // allocate, and Allocator<T> is still being constructed.
  impl_ = new HeapImpl();
  owns_impl_ = true;
}

Heap::~Heap() {
  if (owns_impl_) {
    delete impl_;
  }
}

void* Heap::allocate(size_t size) {
  return impl_->Alloc(size);
}

void Heap::deallocate(void* ptr) {
  impl_->Free(ptr);
}

void Heap::deallocate(HeapImpl*impl, void* ptr) {
  impl->Free(ptr);
}

bool Heap::empty() {
  return impl_->Empty();
}
+224 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2016 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef LIBMEMUNREACHABLE_ALLOCATOR_H_
#define LIBMEMUNREACHABLE_ALLOCATOR_H_

#include <atomic>
#include <cstddef>
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <unordered_set>
#include <vector>
extern std::atomic<int> heap_count;

class HeapImpl;

template<typename T>
class Allocator;


// Non-templated class that implements wraps HeapImpl to keep
// implementation out of the header file
class Heap {
public:
  Heap();
  ~Heap();

  // Copy constructor that does not take ownership of impl_
  Heap(const Heap& other) : impl_(other.impl_), owns_impl_(false) {}

  // Assignment disabled
  Heap& operator=(const Heap&) = delete;

  // Allocate size bytes
  void* allocate(size_t size);

  // Deallocate allocation returned by allocate
  void deallocate(void*);

  bool empty();

  static void deallocate(HeapImpl* impl, void* ptr);

  // Allocate a class of type T
  template<class T>
  T* allocate() {
    return reinterpret_cast<T*>(allocate(sizeof(T)));
  }

  // Comparators, copied objects will be equal
  bool operator ==(const Heap& other) const {
    return impl_ == other.impl_;
  }
  bool operator !=(const Heap& other) const {
    return !(*this == other);
  }

  // std::unique_ptr wrapper that allocates using allocate and deletes using
  // deallocate
  template<class T>
  using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>;

  template<class T, class... Args>
  unique_ptr<T> make_unique(Args&&... args) {
    HeapImpl* impl = impl_;
    return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...),
        [impl](void* ptr) {
          reinterpret_cast<T*>(ptr)->~T();
          deallocate(impl, ptr);
        });
  }

  // std::unique_ptr wrapper that allocates using allocate and deletes using
  // deallocate
  template<class T>
  using shared_ptr = std::shared_ptr<T>;

  template<class T, class... Args>
  shared_ptr<T> make_shared(Args&&... args);

protected:
  HeapImpl* impl_;
  bool owns_impl_;
};

// STLAllocator implements the std allocator interface on top of a Heap
template<typename T>
class STLAllocator {
public:
  using value_type = T;
  ~STLAllocator() {
  }

  // Construct an STLAllocator on top of a Heap
  STLAllocator(const Heap& heap) :
      heap_(heap) {
  }

  // Rebind an STLAllocator from an another STLAllocator
  template<typename U>
  STLAllocator(const STLAllocator<U>& other) :
      heap_(other.heap_) {
  }

  STLAllocator(const STLAllocator&) = default;
  STLAllocator<T>& operator=(const STLAllocator<T>&) = default;

  T* allocate(std::size_t n) {
    return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T)));
  }

  void deallocate(T* ptr, std::size_t) {
    heap_.deallocate(ptr);
  }

  template<typename U>
  bool operator ==(const STLAllocator<U>& other) const {
    return heap_ == other.heap_;
  }
  template<typename U>
  inline bool operator !=(const STLAllocator<U>& other) const {
    return !(this == other);
  }

  template<typename U>
  friend class STLAllocator;

protected:
  Heap heap_;
};


// Allocator extends STLAllocator with some convenience methods for allocating
// a single object and for constructing unique_ptr and shared_ptr objects with
// appropriate deleters.
template<class T>
class Allocator : public STLAllocator<T> {
 public:
  ~Allocator() {}

  Allocator(const Heap& other) :
      STLAllocator<T>(other) {
  }

  template<typename U>
  Allocator(const STLAllocator<U>& other) :
      STLAllocator<T>(other) {
  }

  Allocator(const Allocator&) = default;
  Allocator<T>& operator=(const Allocator<T>&) = default;

  using STLAllocator<T>::allocate;
  using STLAllocator<T>::deallocate;
  using STLAllocator<T>::heap_;

  T* allocate() {
    return STLAllocator<T>::allocate(1);
  }
  void deallocate(void* ptr) {
    heap_.deallocate(ptr);
  }

  using shared_ptr = Heap::shared_ptr<T>;

  template<class... Args>
  shared_ptr make_shared(Args&& ...args) {
    return heap_.template make_shared<T>(std::forward<Args>(args)...);
  }

  using unique_ptr = Heap::unique_ptr<T>;

  template<class... Args>
  unique_ptr make_unique(Args&& ...args) {
    return heap_.template make_unique<T>(std::forward<Args>(args)...);
  }
};

// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate.  Implemented outside class definition in order to pass
// Allocator<T> to shared_ptr.
template<class T, class... Args>
inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) {
  return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this),
      std::forward<Args>(args)...);
}

namespace allocator {

template<class T>
using vector = std::vector<T, Allocator<T>>;

template<class T>
using list = std::list<T, Allocator<T>>;

template<class T, class Key, class Compare = std::less<Key>>
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;

template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;

template<class Key, class Compare = std::less<Key>>
using set = std::set<Key, Compare, Allocator<Key>>;

using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>;
}

#endif
+43 −0
Original line number Diff line number Diff line
LOCAL_PATH := $(call my-dir)

memunreachable_srcs := \
   Allocator.cpp \
   HeapWalker.cpp \
   LeakPipe.cpp \
   LineBuffer.cpp \
   MemUnreachable.cpp \
   ProcessMappings.cpp \
   PtracerThread.cpp \
   ThreadCapture.cpp \

memunreachable_test_srcs := \
   tests/Allocator_test.cpp \
   tests/HeapWalker_test.cpp \
   tests/MemUnreachable_test.cpp \
   tests/ThreadCapture_test.cpp \

include $(CLEAR_VARS)

LOCAL_MODULE := libmemunreachable
LOCAL_SRC_FILES := $(memunreachable_srcs)
LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_SHARED_LIBRARIES := libbase liblog
LOCAL_STATIC_LIBRARIES := libc_malloc_debug_backtrace libc_logging
# Only need this for arm since libc++ uses its own unwind code that
# doesn't mix with the other default unwind code.
LOCAL_STATIC_LIBRARIES_arm := libunwind_llvm
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
LOCAL_CLANG := true

include $(BUILD_SHARED_LIBRARY)

include $(CLEAR_VARS)

LOCAL_MODULE := memunreachable_test
LOCAL_SRC_FILES := $(memunreachable_test_srcs)
LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_CLANG := true
LOCAL_SHARED_LIBRARIES := libmemunreachable libbase liblog

include $(BUILD_NATIVE_TEST)
+137 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2016 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <inttypes.h>

#include <map>
#include <utility>

#include "Allocator.h"
#include "HeapWalker.h"
#include "log.h"

bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
  if (end == begin) {
    end = begin + 1;
  }
  auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
  if (inserted.second) {
    valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
    valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
    allocation_bytes_ += end - begin;
    return true;
  } else {
    Range overlap = inserted.first->first;
    ALOGE("range %p-%p overlaps with existing range %p-%p",
        reinterpret_cast<void*>(begin),
        reinterpret_cast<void*>(end),
        reinterpret_cast<void*>(overlap.begin),
        reinterpret_cast<void*>(overlap.end));
    return false;
  }
}

void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
  allocator::vector<Range> to_do(1, range, allocator_);
  while (!to_do.empty()) {
    Range range = to_do.back();
    to_do.pop_back();
    uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
    // TODO(ccross): we might need to consider a pointer to the end of a buffer
    // to be inside the buffer, which means the common case of a pointer to the
    // beginning of a buffer may keep two ranges live.
    for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
      uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
      if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
        RangeMap::iterator it = allocations_.find(Range{val, val + 1});
        if (it != allocations_.end()) {
          if (!(it->second.*flag)) {
            to_do.push_back(it->first);
            it->second.*flag = true;
          }
        }
      }
    }
  }
}

void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
  roots_.push_back(Range{begin, end});
}

void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
  root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
}

size_t HeapWalker::Allocations() {
  return allocations_.size();
}

size_t HeapWalker::AllocationBytes() {
  return allocation_bytes_;
}

bool HeapWalker::DetectLeaks() {
  for (auto it = roots_.begin(); it != roots_.end(); it++) {
    Walk(*it, &RangeInfo::referenced_from_root);
  }

  Range vals;
  vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
  vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
  Walk(vals, &RangeInfo::referenced_from_root);

  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
    if (!it->second.referenced_from_root) {
      Walk(it->first, &RangeInfo::referenced_from_leak);
    }
  }

  return true;
}

bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
    size_t* num_leaks_out, size_t* leak_bytes_out) {
  DetectLeaks();
  leaked.clear();

  size_t num_leaks = 0;
  size_t leak_bytes = 0;
  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
    if (!it->second.referenced_from_root) {
      num_leaks++;
      leak_bytes += it->first.end - it->first.begin;
    }
  }

  size_t n = 0;
  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
    if (!it->second.referenced_from_root) {
      if (n++ <= limit) {
        leaked.push_back(it->first);
      }
    }
  }

  if (num_leaks_out) {
    *num_leaks_out = num_leaks;
  }
  if (leak_bytes_out) {
    *leak_bytes_out = leak_bytes;
  }

  return true;
}
+75 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading