Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8e8f34c5 authored by Colin Cross's avatar Colin Cross
Browse files

Fold leaks that are referenced by other leaks

Find leaks that have no references at all, or are only referenced by
other leaks in the same strongly connected component, and hide all
referenced leaks.

Bug: 27208635
Change-Id: Ifbfd14e24e2ba0f8af7c1b887e57f34362720f2d
parent b8e20f55
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@ LOCAL_PATH := $(call my-dir)
memunreachable_srcs := \
   Allocator.cpp \
   HeapWalker.cpp \
   LeakFolding.cpp \
   LeakPipe.cpp \
   LineBuffer.cpp \
   MemUnreachable.cpp \
@@ -14,6 +15,7 @@ memunreachable_test_srcs := \
   tests/Allocator_test.cpp \
   tests/DisableMalloc_test.cpp \
   tests/HeapWalker_test.cpp \
   tests/LeakFolding_test.cpp \
   tests/MemUnreachable_test.cpp \
   tests/ThreadCapture_test.cpp \

@@ -49,9 +51,11 @@ LOCAL_MODULE := memunreachable_test
LOCAL_SRC_FILES := \
   Allocator.cpp \
   HeapWalker.cpp  \
   LeakFolding.cpp \
   tests/Allocator_test.cpp \
   tests/HeapWalker_test.cpp \
   tests/HostMallocStub.cpp \
   tests/LeakFolding_test.cpp \

LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_CLANG := true
+28 −28
Original line number Diff line number Diff line
@@ -21,17 +21,19 @@

#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "log.h"

bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
  if (end == begin) {
    end = begin + 1;
  }
  auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
  Range range{begin, end};
  auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
  if (inserted.second) {
    valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
    valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
    allocation_bytes_ += end - begin;
    allocation_bytes_ += range.size();
    return true;
  } else {
    Range overlap = inserted.first->first;
@@ -44,27 +46,30 @@ bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
  }
}

void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
  allocator::vector<Range> to_do(1, range, allocator_);
  while (!to_do.empty()) {
    Range range = to_do.back();
    to_do.pop_back();
    uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
    // TODO(ccross): we might need to consider a pointer to the end of a buffer
    // to be inside the buffer, which means the common case of a pointer to the
    // beginning of a buffer may keep two ranges live.
    for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
      uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
      if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
        RangeMap::iterator it = allocations_.find(Range{val, val + 1});
bool HeapWalker::IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info) {
  if (ptr >= valid_allocations_range_.begin && ptr < valid_allocations_range_.end) {
    AllocationMap::iterator it = allocations_.find(Range{ptr, ptr + 1});
    if (it != allocations_.end()) {
          if (!(it->second.*flag)) {
            to_do.push_back(it->first);
            it->second.*flag = true;
      *range = it->first;
      *info = &it->second;
      return true;
    }
  }
  return false;
}

void HeapWalker::RecurseRoot(const Range& root) {
  allocator::vector<Range> to_do(1, root, allocator_);
  while (!to_do.empty()) {
    Range range = to_do.back();
    to_do.pop_back();

    ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
      if (!ref_info->referenced_from_root) {
        ref_info->referenced_from_root = true;
        to_do.push_back(ref_range);
      }
    });
  }
}

@@ -85,27 +90,22 @@ size_t HeapWalker::AllocationBytes() {
}

bool HeapWalker::DetectLeaks() {
  // Recursively walk pointers from roots to mark referenced allocations
  for (auto it = roots_.begin(); it != roots_.end(); it++) {
    Walk(*it, &RangeInfo::referenced_from_root);
    RecurseRoot(*it);
  }

  Range vals;
  vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
  vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
  Walk(vals, &RangeInfo::referenced_from_root);

  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
    if (!it->second.referenced_from_root) {
      Walk(it->first, &RangeInfo::referenced_from_leak);
    }
  }
  RecurseRoot(vals);

  return true;
}

bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
    size_t* num_leaks_out, size_t* leak_bytes_out) {
  DetectLeaks();
  leaked.clear();

  size_t num_leaks = 0;
@@ -120,7 +120,7 @@ bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
  size_t n = 0;
  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
    if (!it->second.referenced_from_root) {
      if (n++ <= limit) {
      if (n++ < limit) {
        leaked.push_back(it->first);
      }
    }
+42 −7
Original line number Diff line number Diff line
@@ -20,11 +20,14 @@
#include "android-base/macros.h"

#include "Allocator.h"
#include "Tarjan.h"

// A range [begin, end)
struct Range {
  uintptr_t begin;
  uintptr_t end;

  size_t size() const { return end - begin; };
};

// Comparator for Ranges that returns equivalence for overlapping ranges
@@ -34,7 +37,6 @@ struct compare_range {
  }
};


class HeapWalker {
 public:
  HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
@@ -55,16 +57,25 @@ class HeapWalker {
  size_t Allocations();
  size_t AllocationBytes();

 private:
  struct RangeInfo {
  template<class F>
  void ForEachPtrInRange(const Range& range, F&& f);

  template<class F>
  void ForEachAllocation(F&& f);

  struct AllocationInfo {
    bool referenced_from_root;
    bool referenced_from_leak;
  };
  void Walk(const Range& range, bool RangeInfo::* flag);

 private:

  void RecurseRoot(const Range& root);
  bool IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);

  DISALLOW_COPY_AND_ASSIGN(HeapWalker);
  Allocator<HeapWalker> allocator_;
  using RangeMap = allocator::map<RangeInfo, Range, compare_range>;
  RangeMap allocations_;
  using AllocationMap = allocator::map<AllocationInfo, Range, compare_range>;
  AllocationMap allocations_;
  size_t allocation_bytes_;
  Range valid_allocations_range_;

@@ -72,4 +83,28 @@ class HeapWalker {
  allocator::vector<uintptr_t> root_vals_;
};

template<class F>
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
  uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
  // TODO(ccross): we might need to consider a pointer to the end of a buffer
  // to be inside the buffer, which means the common case of a pointer to the
  // beginning of a buffer may keep two ranges live.
  for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
    Range ref_range;
    AllocationInfo* ref_info;
    if (IsAllocationPtr(*reinterpret_cast<uintptr_t*>(i), &ref_range, &ref_info)) {
      f(ref_range, ref_info);
    }
  }
}

template<class F>
inline void HeapWalker::ForEachAllocation(F&& f) {
  for (auto& it : allocations_) {
    const Range& range = it.first;
    HeapWalker::AllocationInfo& allocation = it.second;
    f(range, allocation);
  }
}

#endif
+143 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2016 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <inttypes.h>

#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "Tarjan.h"
#include "log.h"

// Converts possibly cyclic graph of leaks to a DAG by combining
// strongly-connected components into a object, stored in the scc pointer
// of each node in the component.
void LeakFolding::ComputeDAG() {
  SCCList<LeakInfo> scc_list{allocator_};
  Tarjan(leak_graph_, scc_list);

  Allocator<SCCInfo> scc_allocator = allocator_;

  for (auto& scc_nodes: scc_list) {
    Allocator<SCCInfo>::unique_ptr leak_scc;
    leak_scc = scc_allocator.make_unique(scc_allocator);

    for (auto& node: scc_nodes) {
      node->ptr->scc = leak_scc.get();
      leak_scc->count++;
      leak_scc->size += node->ptr->range.size();
    }

    leak_scc_.emplace_back(std::move(leak_scc));
  }

  for (auto& it : leak_map_) {
    LeakInfo& leak = it.second;
    for (auto& ref: leak.node.references_out) {
      if (leak.scc != ref->ptr->scc) {
        leak.scc->node.Edge(&ref->ptr->scc->node);
      }
    }
  }
}

void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
  std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_,
      [&](SCCInfo* scc) {
        if (scc->accumulator != dominator) {
          scc->accumulator = dominator;
          dominator->cuumulative_size += scc->size;
          dominator->cuumulative_count += scc->count;
          scc->node.Foreach([&](SCCInfo* ref) {
            walk(ref);
          });
        }
      });
  walk(dominator);
}

bool LeakFolding::FoldLeaks() {
  Allocator<LeakInfo> leak_allocator = allocator_;

  // Find all leaked allocations insert them into leak_map_ and leak_graph_
  heap_walker_.ForEachAllocation(
      [&](const Range& range, HeapWalker::AllocationInfo& allocation) {
        if (!allocation.referenced_from_root) {
          auto it = leak_map_.emplace(std::piecewise_construct,
              std::forward_as_tuple(range),
              std::forward_as_tuple(range, allocator_));
          LeakInfo& leak = it.first->second;
          leak_graph_.push_back(&leak.node);
        }
      });

  // Find references between leaked allocations and connect them in leak_graph_
  for (auto& it : leak_map_) {
    LeakInfo& leak = it.second;
    heap_walker_.ForEachPtrInRange(leak.range,
        [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
          if (!ptr_info->referenced_from_root) {
            LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
            leak.node.Edge(&ptr_leak->node);
          }
        });
  }

  // Convert the cyclic graph to a DAG by grouping strongly connected components
  ComputeDAG();

  // Compute dominators and cuumulative sizes
  for (auto& scc : leak_scc_) {
    if (scc->node.references_in.size() == 0) {
      scc->dominator = true;
      AccumulateLeaks(scc.get());
    }
  }

  return true;
}

bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
    size_t limit, size_t* num_leaks_out, size_t* leak_bytes_out) {
  size_t num_leaks = 0;
  size_t leak_bytes = 0;
  for (auto& it : leak_map_) {
    const LeakInfo& leak = it.second;
    num_leaks++;
    leak_bytes += leak.range.size();
  }

  size_t n = 0;
  for (auto& it : leak_map_) {
    const LeakInfo& leak = it.second;
    if (leak.scc->dominator) {
      if (n++ < limit) {
        leaked.emplace_back(Leak{leak.range,
          leak.scc->cuumulative_count - 1,
          leak.scc->cuumulative_size - leak.range.size()});
      }
    }
  }

  if (num_leaks_out) {
    *num_leaks_out = num_leaks;
  }
  if (leak_bytes_out) {
    *leak_bytes_out = leak_bytes;
  }

  return true;
}
+89 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2016 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#define LIBMEMUNREACHABLE_LEAK_FOLDING_H_

#include "HeapWalker.h"

class LeakFolding {
 public:
  LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
   : allocator_(allocator), heap_walker_(heap_walker),
     leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {}

  bool FoldLeaks();

  struct Leak {
    const Range range;
    size_t referenced_count;
    size_t referenced_size;
  };

  bool Leaked(allocator::vector<Leak>& leaked, size_t limit,
      size_t* num_leaks_out, size_t* leak_bytes_out);

 private:
  DISALLOW_COPY_AND_ASSIGN(LeakFolding);
  Allocator<void> allocator_;
  HeapWalker& heap_walker_;

  struct SCCInfo {
   public:
    Node<SCCInfo> node;

    size_t count;
    size_t size;

    size_t cuumulative_count;
    size_t cuumulative_size;

    bool dominator;
    SCCInfo* accumulator;

    SCCInfo(Allocator<SCCInfo> allocator) : node(this, allocator),
        count(0), size(0), cuumulative_count(0), cuumulative_size(0),
        dominator(false), accumulator(nullptr) {}
   private:
    SCCInfo(SCCInfo&&) = delete;
    DISALLOW_COPY_AND_ASSIGN(SCCInfo);
  };

  struct LeakInfo {
   public:
    Node<LeakInfo> node;

    const Range range;

    SCCInfo* scc;

    LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
        : node(this, allocator), range(range),
          scc(nullptr) {}

   private:
    DISALLOW_COPY_AND_ASSIGN(LeakInfo);
  };

  void ComputeDAG();
  void AccumulateLeaks(SCCInfo* dominator);

  allocator::map<LeakInfo, Range, compare_range> leak_map_;
  Graph<LeakInfo> leak_graph_;
  allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
};

#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_
Loading