Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1799b892 authored by Edgar Arriaga García's avatar Edgar Arriaga García Committed by Automerger Merge Worker
Browse files

Merge "Revert "Resend remaining VMAs on partial compaction due to failure""...

Merge "Revert "Resend remaining VMAs on partial compaction due to failure"" into tm-dev am: 11879837

Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/base/+/18174406



Change-Id: I3283f3d367a775ae68e8378b266e37a9c042ef93
Signed-off-by: default avatarAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
parents 6d7bcb46 11879837
Loading
Loading
Loading
Loading
+53 −192
Original line number Original line Diff line number Diff line
@@ -66,13 +66,13 @@ using android::base::unique_fd;
// Defines the maximum amount of VMAs we can send per process_madvise syscall.
// Defines the maximum amount of VMAs we can send per process_madvise syscall.
// Currently this is set to UIO_MAXIOV which is the maximum segments allowed by
// Currently this is set to UIO_MAXIOV which is the maximum segments allowed by
// iovec implementation used by process_madvise syscall
// iovec implementation used by process_madvise syscall
#define MAX_VMAS_PER_BATCH UIO_MAXIOV
#define MAX_VMAS_PER_COMPACTION UIO_MAXIOV


// Maximum bytes that we can send per process_madvise syscall once this limit
// Maximum bytes that we can send per process_madvise syscall once this limit
// is reached we split the remaining VMAs into another syscall. The MAX_RW_COUNT
// is reached we split the remaining VMAs into another syscall. The MAX_RW_COUNT
// limit is imposed by iovec implementation. However, if you want to use a smaller
// limit is imposed by iovec implementation. However, if you want to use a smaller
// limit, it has to be a page aligned value.
// limit, it has to be a page aligned value, otherwise, compaction would fail.
#define MAX_BYTES_PER_BATCH MAX_RW_COUNT
#define MAX_BYTES_PER_COMPACTION MAX_RW_COUNT


// Selected a high enough number to avoid clashing with linux errno codes
// Selected a high enough number to avoid clashing with linux errno codes
#define ERROR_COMPACTION_CANCELLED -1000
#define ERROR_COMPACTION_CANCELLED -1000
@@ -83,181 +83,6 @@ namespace android {
// before starting next VMA batch
// before starting next VMA batch
static std::atomic<bool> cancelRunningCompaction;
static std::atomic<bool> cancelRunningCompaction;


// A VmaBatch represents a set of VMAs that can be processed
// as VMAs are processed by client code it is expected that the
// VMAs get consumed which means they are discarded as they are
// processed so that the first element always is the next element
// to be sent
struct VmaBatch {
    struct iovec* vmas;
    // total amount of VMAs to reach the end of iovec
    int totalVmas;
    // total amount of bytes that are remaining within iovec
    uint64_t totalBytes;
};

// Advances the iterator by the specified amount of bytes.
// This is used to remove already processed or no longer
// needed parts of the batch.
// Returns total bytes consumed
int consumeBytes(VmaBatch& batch, uint64_t bytesToConsume) {
    int index = 0;
    if (CC_UNLIKELY(bytesToConsume) < 0) {
        LOG(ERROR) << "Cannot consume negative bytes for VMA batch !";
        return 0;
    }

    if (bytesToConsume > batch.totalBytes) {
        // Avoid consuming more bytes than available
        bytesToConsume = batch.totalBytes;
    }

    uint64_t bytesConsumed = 0;
    while (bytesConsumed < bytesToConsume) {
        if (CC_UNLIKELY(index >= batch.totalVmas)) {
            // reach the end of the batch
            return bytesConsumed;
        }
        if (CC_UNLIKELY(bytesConsumed + batch.vmas[index].iov_len > bytesToConsume)) {
            // this is the whole VMA that will be consumed
            break;
        }
        bytesConsumed += batch.vmas[index].iov_len;
        batch.totalBytes -= batch.vmas[index].iov_len;
        --batch.totalVmas;
        ++index;
    }

    // Move pointer to consume all the whole VMAs
    batch.vmas = batch.vmas + index;

    // Consume the rest of the bytes partially at last VMA in batch
    uint64_t bytesLeftToConsume = bytesToConsume - bytesConsumed;
    bytesConsumed += bytesLeftToConsume;
    if (batch.totalVmas > 0) {
        batch.vmas[0].iov_base = (void*)((uint64_t)batch.vmas[0].iov_base + bytesLeftToConsume);
    }

    return bytesConsumed;
}

// given a source of vmas this class will act as a factory
// of VmaBatch objects and it will allow generating batches
// until there are no more left in the source vector.
// Note: the class does not actually modify the given
// vmas vector, instead it iterates on it until the end.
class VmaBatchCreator {
    const std::vector<Vma>* sourceVmas;
    // This is the destination array where batched VMAs will be stored
    // it gets encapsulated into a VmaBatch which is the object
    // meant to be used by client code.
    struct iovec* destVmas;

    // Parameters to keep track of the iterator on the source vmas
    int currentIndex_;
    uint64_t currentOffset_;

public:
    VmaBatchCreator(const std::vector<Vma>* vmasToBatch, struct iovec* destVmasVec)
          : sourceVmas(vmasToBatch), destVmas(destVmasVec), currentIndex_(0), currentOffset_(0) {}

    int currentIndex() { return currentIndex_; }
    uint64_t currentOffset() { return currentOffset_; }

    // Generates a batch and moves the iterator on the source vmas
    // past the last VMA in the batch.
    // Returns true on success, false on failure
    bool createNextBatch(VmaBatch& batch) {
        if (currentIndex_ >= MAX_VMAS_PER_BATCH && currentIndex_ >= sourceVmas->size()) {
            return false;
        }

        const std::vector<Vma>& vmas = *sourceVmas;
        batch.vmas = destVmas;
        uint64_t totalBytesInBatch = 0;
        int indexInBatch = 0;

        // Add VMAs to the batch up until we consumed all the VMAs or
        // reached any imposed limit of VMAs per batch.
        while (indexInBatch < MAX_VMAS_PER_BATCH && currentIndex_ < vmas.size()) {
            uint64_t vmaStart = vmas[currentIndex_].start + currentOffset_;
            uint64_t vmaSize = vmas[currentIndex_].end - vmaStart;
            if (CC_UNLIKELY(vmaSize == 0)) {
                // No more bytes to batch for this VMA, move to next one
                // this only happens if a batch partially consumed bytes
                // and offset landed at exactly the end of a vma
                continue;
            }
            batch.vmas[indexInBatch].iov_base = (void*)vmaStart;
            uint64_t bytesAvailableInBatch = MAX_BYTES_PER_BATCH - totalBytesInBatch;

            if (vmaSize >= bytesAvailableInBatch) {
                // VMA would exceed the max available bytes in batch
                // clamp with available bytes and finish batch.
                vmaSize = bytesAvailableInBatch;
                currentOffset_ += bytesAvailableInBatch;
            }

            batch.vmas[indexInBatch].iov_len = vmaSize;
            totalBytesInBatch += vmaSize;

            ++indexInBatch;
            if (totalBytesInBatch >= MAX_BYTES_PER_BATCH) {
                // Reached max bytes quota so this marks
                // the end of the batch
                break;
            }

            // Fully finished current VMA, move to next one
            currentOffset_ = 0;
            ++currentIndex_;
        }
        // Vmas where fully filled and we are past the last filled index.
        batch.totalVmas = indexInBatch;
        batch.totalBytes = totalBytesInBatch;
        return true;
    }
};

// Madvise a set of VMAs given in a batch for a specific process
// The total number of bytes successfully madvised will be set on
// outBytesProcessed.
// Returns 0 on success and standard linux -errno code returned by
// process_madvise on failure
int madviseVmasFromBatch(unique_fd& pidfd, VmaBatch& batch, int madviseType,
                         uint64_t* outBytesProcessed) {
    if (batch.totalVmas == 0) {
        // No VMAs in Batch, skip.
        *outBytesProcessed = 0;
        return 0;
    }

    ATRACE_BEGIN(StringPrintf("Madvise %d: %d VMAs", madviseType, batch.totalVmas).c_str());
    uint64_t bytesProcessedInSend =
            process_madvise(pidfd, batch.vmas, batch.totalVmas, madviseType, 0);
    ATRACE_END();

    if (CC_UNLIKELY(bytesProcessedInSend == -1)) {
        bytesProcessedInSend = 0;
        if (errno != EINVAL) {
            // Forward irrecoverable errors and bail out compaction
            *outBytesProcessed = 0;
            return -errno;
        }
    }

    if (bytesProcessedInSend < batch.totalBytes) {
        // Did not process all the bytes requested
        // skip last page which likely failed
        bytesProcessedInSend += PAGE_SIZE;
    }

    bytesProcessedInSend = consumeBytes(batch, bytesProcessedInSend);

    *outBytesProcessed = bytesProcessedInSend;
    return 0;
}

// Legacy method for compacting processes, any new code should
// Legacy method for compacting processes, any new code should
// use compactProcess instead.
// use compactProcess instead.
static inline void compactProcessProcfs(int pid, const std::string& compactionType) {
static inline void compactProcessProcfs(int pid, const std::string& compactionType) {
@@ -271,6 +96,8 @@ static inline void compactProcessProcfs(int pid, const std::string& compactionTy
// If any VMA fails compaction due to -EINVAL it will be skipped and continue.
// If any VMA fails compaction due to -EINVAL it will be skipped and continue.
// However, if it fails for any other reason, it will bail out and forward the error
// However, if it fails for any other reason, it will bail out and forward the error
static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseType) {
static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseType) {
    static struct iovec vmasToKernel[MAX_VMAS_PER_COMPACTION];

    if (vmas.empty()) {
    if (vmas.empty()) {
        return 0;
        return 0;
    }
    }
@@ -281,16 +108,13 @@ static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseT
        return -errno;
        return -errno;
    }
    }


    struct iovec destVmas[MAX_VMAS_PER_BATCH];

    VmaBatch batch;
    VmaBatchCreator batcher(&vmas, destVmas);

    int64_t totalBytesProcessed = 0;
    int64_t totalBytesProcessed = 0;
    while (batcher.createNextBatch(batch)) {
        uint64_t bytesProcessedInSend;


        do {
    int64_t vmaOffset = 0;
    for (int iVma = 0; iVma < vmas.size();) {
        uint64_t bytesSentToCompact = 0;
        int iVec = 0;
        while (iVec < MAX_VMAS_PER_COMPACTION && iVma < vmas.size()) {
            if (CC_UNLIKELY(cancelRunningCompaction.load())) {
            if (CC_UNLIKELY(cancelRunningCompaction.load())) {
                // There could be a significant delay between when a compaction
                // There could be a significant delay between when a compaction
                // is requested and when it is handled during this time our
                // is requested and when it is handled during this time our
@@ -300,13 +124,50 @@ static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseT
                                         StringPrintf("Cancelled compaction for %d", pid).c_str());
                                         StringPrintf("Cancelled compaction for %d", pid).c_str());
                return ERROR_COMPACTION_CANCELLED;
                return ERROR_COMPACTION_CANCELLED;
            }
            }
            int error = madviseVmasFromBatch(pidfd, batch, madviseType, &bytesProcessedInSend);

            if (error < 0) {
            uint64_t vmaStart = vmas[iVma].start + vmaOffset;
                // Returns standard linux errno code
            uint64_t vmaSize = vmas[iVma].end - vmaStart;
                return error;
            if (vmaSize == 0) {
                goto next_vma;
            }
            vmasToKernel[iVec].iov_base = (void*)vmaStart;
            if (vmaSize > MAX_BYTES_PER_COMPACTION - bytesSentToCompact) {
                // Exceeded the max bytes that could be sent, so clamp
                // the end to avoid exceeding limit and issue compaction
                vmaSize = MAX_BYTES_PER_COMPACTION - bytesSentToCompact;
            }
            }
            totalBytesProcessed += bytesProcessedInSend;

        } while (batch.totalBytes > 0);
            vmasToKernel[iVec].iov_len = vmaSize;
            bytesSentToCompact += vmaSize;
            ++iVec;
            if (bytesSentToCompact >= MAX_BYTES_PER_COMPACTION) {
                // Ran out of bytes within iovec, dispatch compaction.
                vmaOffset += vmaSize;
                break;
            }

        next_vma:
            // Finished current VMA, and have more bytes remaining
            vmaOffset = 0;
            ++iVma;
        }

        ATRACE_BEGIN(StringPrintf("Compact %d VMAs", iVec).c_str());
        auto bytesProcessed = process_madvise(pidfd, vmasToKernel, iVec, madviseType, 0);
        ATRACE_END();

        if (CC_UNLIKELY(bytesProcessed == -1)) {
            if (errno == EINVAL) {
                // This error is somewhat common due to an unevictable VMA if this is
                // the case silently skip the bad VMA and continue compacting the rest.
                continue;
            } else {
                // Forward irrecoverable errors and bail out compaction
                return -errno;
            }
        }

        totalBytesProcessed += bytesProcessed;
    }
    }


    return totalBytesProcessed;
    return totalBytesProcessed;