Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 88c9441d authored by TreeHugger Robot's avatar TreeHugger Robot Committed by Automerger Merge Worker
Browse files

Merge changes from topic "compact-improvements-v2" into tm-dev am: ee628e28 am: 959f2b43

parents c767408a 959f2b43
Loading
Loading
Loading
Loading
+36 −0
Original line number Diff line number Diff line
@@ -219,6 +219,8 @@ final class ActivityManagerShellCommand extends ShellCommand {
                    return runStopService(pw);
                case "broadcast":
                    return runSendBroadcast(pw);
                case "compact":
                    return runCompact(pw);
                case "instrument":
                    getOutPrintWriter().println("Error: must be invoked through 'am instrument'.");
                    return -1;
@@ -966,6 +968,36 @@ final class ActivityManagerShellCommand extends ShellCommand {
        return 0;
    }

    @NeverCompile
    int runCompact(PrintWriter pw) {
        String processName = getNextArgRequired();
        String uid = getNextArgRequired();
        String op = getNextArgRequired();
        ProcessRecord app;
        synchronized (mInternal.mProcLock) {
            app = mInternal.getProcessRecordLocked(processName, Integer.parseInt(uid));
        }
        pw.println("Process record found pid: " + app.mPid);
        if (op.equals("full")) {
            pw.println("Executing full compaction for " + app.mPid);
            synchronized (mInternal.mProcLock) {
                mInternal.mOomAdjuster.mCachedAppOptimizer.compactAppFull(app, true);
            }
            pw.println("Finished full compaction for " + app.mPid);
        } else if (op.equals("some")) {
            pw.println("Executing some compaction for " + app.mPid);
            synchronized (mInternal.mProcLock) {
                mInternal.mOomAdjuster.mCachedAppOptimizer.compactAppSome(app, true);
            }
            pw.println("Finished some compaction for " + app.mPid);
        } else {
            getErrPrintWriter().println("Error: unknown compact command '" + op + "'");
            return -1;
        }

        return 0;
    }

    int runDumpHeap(PrintWriter pw) throws RemoteException {
        final PrintWriter err = getErrPrintWriter();
        boolean managed = true;
@@ -3446,6 +3478,10 @@ final class ActivityManagerShellCommand extends ShellCommand {
            pw.println("      --allow-background-activity-starts: The receiver may start activities");
            pw.println("          even if in the background.");
            pw.println("      --async: Send without waiting for the completion of the receiver.");
            pw.println("  compact <process_name> <Package UID> [some|full]");
            pw.println("      Force process compaction.");
            pw.println("      some: execute file compaction.");
            pw.println("      full: execute anon + file compaction.");
            pw.println("  instrument [-r] [-e <NAME> <VALUE>] [-p <FILE>] [-w]");
            pw.println("          [--user <USER_ID> | current]");
            pw.println("          [--no-hidden-api-checks [--no-test-api-access]]");
+331 −174

File changed.

Preview size limit exceeded, changes collapsed.

+15 −0
Original line number Diff line number Diff line
@@ -56,6 +56,8 @@ final class ProcessCachedOptimizerRecord {
    @GuardedBy("mProcLock")
    private boolean mPendingCompact;

    @GuardedBy("mProcLock") private boolean mForceCompact;

    /**
     * True when the process is frozen.
     */
@@ -132,6 +134,16 @@ final class ProcessCachedOptimizerRecord {
        mPendingCompact = pendingCompact;
    }

    @GuardedBy("mProcLock")
    boolean isForceCompact() {
        return mForceCompact;
    }

    @GuardedBy("mProcLock")
    void setForceCompact(boolean forceCompact) {
        mForceCompact = forceCompact;
    }

    @GuardedBy("mProcLock")
    boolean isFrozen() {
        return mFrozen;
@@ -205,6 +217,9 @@ final class ProcessCachedOptimizerRecord {
    void dump(PrintWriter pw, String prefix, long nowUptime) {
        pw.print(prefix); pw.print("lastCompactTime="); pw.print(mLastCompactTime);
        pw.print(" lastCompactAction="); pw.println(mLastCompactAction);
        pw.print(prefix);
        pw.print("hasPendingCompaction=");
        pw.print(mPendingCompact);
        pw.print(prefix); pw.print("isFreezeExempt="); pw.print(mFreezeExempt);
        pw.print(" isPendingFreeze="); pw.print(mPendingFreeze);
        pw.print(" " + IS_FROZEN + "="); pw.println(mFrozen);
+226 −68
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@

#define LOG_TAG "CachedAppOptimizer"
//#define LOG_NDEBUG 0
#define ATRACE_TAG ATRACE_TAG_ACTIVITY_MANAGER
#define ATRACE_COMPACTION_TRACK "Compaction"

#include <android-base/file.h>
#include <android-base/logging.h>
@@ -37,8 +39,10 @@
#include <sys/pidfd.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/sysinfo.h>
#include <sys/types.h>
#include <unistd.h>
#include <utils/Trace.h>

#include <algorithm>

@@ -62,18 +66,197 @@ using android::base::unique_fd;
// Defines the maximum amount of VMAs we can send per process_madvise syscall.
// Currently this is set to UIO_MAXIOV which is the maximum segments allowed by
// iovec implementation used by process_madvise syscall
#define MAX_VMAS_PER_COMPACTION UIO_MAXIOV
#define MAX_VMAS_PER_BATCH UIO_MAXIOV

// Maximum bytes that we can send per process_madvise syscall once this limit
// is reached we split the remaining VMAs into another syscall. The MAX_RW_COUNT
// limit is imposed by iovec implementation. However, if you want to use a smaller
// limit, it has to be a page aligned value, otherwise, compaction would fail.
#define MAX_BYTES_PER_COMPACTION MAX_RW_COUNT
// limit, it has to be a page aligned value.
#define MAX_BYTES_PER_BATCH MAX_RW_COUNT

// Selected a high enough number to avoid clashing with linux errno codes
#define ERROR_COMPACTION_CANCELLED -1000

namespace android {

static bool cancelRunningCompaction;
static bool compactionInProgress;
// Signal happening in separate thread that would bail out compaction
// before starting next VMA batch
static std::atomic<bool> cancelRunningCompaction;

// A VmaBatch represents a set of VMAs that can be processed
// as VMAs are processed by client code it is expected that the
// VMAs get consumed which means they are discarded as they are
// processed so that the first element always is the next element
// to be sent
struct VmaBatch {
    struct iovec* vmas;
    // total amount of VMAs to reach the end of iovec
    int totalVmas;
    // total amount of bytes that are remaining within iovec
    uint64_t totalBytes;
};

// Advances the iterator by the specified amount of bytes.
// This is used to remove already processed or no longer
// needed parts of the batch.
// Returns total bytes consumed
int consumeBytes(VmaBatch& batch, uint64_t bytesToConsume) {
    int index = 0;
    if (CC_UNLIKELY(bytesToConsume) < 0) {
        LOG(ERROR) << "Cannot consume negative bytes for VMA batch !";
        return 0;
    }

    if (bytesToConsume > batch.totalBytes) {
        // Avoid consuming more bytes than available
        bytesToConsume = batch.totalBytes;
    }

    uint64_t bytesConsumed = 0;
    while (bytesConsumed < bytesToConsume) {
        if (CC_UNLIKELY(index >= batch.totalVmas)) {
            // reach the end of the batch
            return bytesConsumed;
        }
        if (CC_UNLIKELY(bytesConsumed + batch.vmas[index].iov_len > bytesToConsume)) {
            // this is the whole VMA that will be consumed
            break;
        }
        bytesConsumed += batch.vmas[index].iov_len;
        batch.totalBytes -= batch.vmas[index].iov_len;
        --batch.totalVmas;
        ++index;
    }

    // Move pointer to consume all the whole VMAs
    batch.vmas = batch.vmas + index;

    // Consume the rest of the bytes partially at last VMA in batch
    uint64_t bytesLeftToConsume = bytesToConsume - bytesConsumed;
    bytesConsumed += bytesLeftToConsume;
    if (batch.totalVmas > 0) {
        batch.vmas[0].iov_base = (void*)((uint64_t)batch.vmas[0].iov_base + bytesLeftToConsume);
    }

    return bytesConsumed;
}

// given a source of vmas this class will act as a factory
// of VmaBatch objects and it will allow generating batches
// until there are no more left in the source vector.
// Note: the class does not actually modify the given
// vmas vector, instead it iterates on it until the end.
class VmaBatchCreator {
    const std::vector<Vma>* sourceVmas;
    // This is the destination array where batched VMAs will be stored
    // it gets encapsulated into a VmaBatch which is the object
    // meant to be used by client code.
    struct iovec* destVmas;

    // Parameters to keep track of the iterator on the source vmas
    int currentIndex_;
    uint64_t currentOffset_;

public:
    VmaBatchCreator(const std::vector<Vma>* vmasToBatch, struct iovec* destVmasVec)
          : sourceVmas(vmasToBatch), destVmas(destVmasVec), currentIndex_(0), currentOffset_(0) {}

    int currentIndex() { return currentIndex_; }
    uint64_t currentOffset() { return currentOffset_; }

    // Generates a batch and moves the iterator on the source vmas
    // past the last VMA in the batch.
    // Returns true on success, false on failure
    bool createNextBatch(VmaBatch& batch) {
        if (currentIndex_ >= MAX_VMAS_PER_BATCH && currentIndex_ >= sourceVmas->size()) {
            return false;
        }

        const std::vector<Vma>& vmas = *sourceVmas;
        batch.vmas = destVmas;
        uint64_t totalBytesInBatch = 0;
        int indexInBatch = 0;

        // Add VMAs to the batch up until we consumed all the VMAs or
        // reached any imposed limit of VMAs per batch.
        while (indexInBatch < MAX_VMAS_PER_BATCH && currentIndex_ < vmas.size()) {
            uint64_t vmaStart = vmas[currentIndex_].start + currentOffset_;
            uint64_t vmaSize = vmas[currentIndex_].end - vmaStart;
            if (CC_UNLIKELY(vmaSize == 0)) {
                // No more bytes to batch for this VMA, move to next one
                // this only happens if a batch partially consumed bytes
                // and offset landed at exactly the end of a vma
                continue;
            }
            batch.vmas[indexInBatch].iov_base = (void*)vmaStart;
            uint64_t bytesAvailableInBatch = MAX_BYTES_PER_BATCH - totalBytesInBatch;

            if (vmaSize >= bytesAvailableInBatch) {
                // VMA would exceed the max available bytes in batch
                // clamp with available bytes and finish batch.
                vmaSize = bytesAvailableInBatch;
                currentOffset_ += bytesAvailableInBatch;
            }

            batch.vmas[indexInBatch].iov_len = vmaSize;
            totalBytesInBatch += vmaSize;

            ++indexInBatch;
            if (totalBytesInBatch >= MAX_BYTES_PER_BATCH) {
                // Reached max bytes quota so this marks
                // the end of the batch
                break;
            }

            // Fully finished current VMA, move to next one
            currentOffset_ = 0;
            ++currentIndex_;
        }
        // Vmas where fully filled and we are past the last filled index.
        batch.totalVmas = indexInBatch;
        batch.totalBytes = totalBytesInBatch;
        return true;
    }
};

// Madvise a set of VMAs given in a batch for a specific process
// The total number of bytes successfully madvised will be set on
// outBytesProcessed.
// Returns 0 on success and standard linux -errno code returned by
// process_madvise on failure
int madviseVmasFromBatch(unique_fd& pidfd, VmaBatch& batch, int madviseType,
                         uint64_t* outBytesProcessed) {
    if (batch.totalVmas == 0) {
        // No VMAs in Batch, skip.
        *outBytesProcessed = 0;
        return 0;
    }

    ATRACE_BEGIN(StringPrintf("Madvise %d: %d VMAs", madviseType, batch.totalVmas).c_str());
    uint64_t bytesProcessedInSend =
            process_madvise(pidfd, batch.vmas, batch.totalVmas, madviseType, 0);
    ATRACE_END();

    if (CC_UNLIKELY(bytesProcessedInSend == -1)) {
        bytesProcessedInSend = 0;
        if (errno != EINVAL) {
            // Forward irrecoverable errors and bail out compaction
            *outBytesProcessed = 0;
            return -errno;
        }
    }

    if (bytesProcessedInSend < batch.totalBytes) {
        // Did not process all the bytes requested
        // skip last page which likely failed
        bytesProcessedInSend += PAGE_SIZE;
    }

    bytesProcessedInSend = consumeBytes(batch, bytesProcessedInSend);

    *outBytesProcessed = bytesProcessedInSend;
    return 0;
}

// Legacy method for compacting processes, any new code should
// use compactProcess instead.
@@ -88,8 +271,6 @@ static inline void compactProcessProcfs(int pid, const std::string& compactionTy
// If any VMA fails compaction due to -EINVAL it will be skipped and continue.
// However, if it fails for any other reason, it will bail out and forward the error
static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseType) {
    static struct iovec vmasToKernel[MAX_VMAS_PER_COMPACTION];

    if (vmas.empty()) {
        return 0;
    }
@@ -99,73 +280,34 @@ static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseT
        // Skip compaction if failed to open pidfd with any error
        return -errno;
    }
    compactionInProgress = true;
    cancelRunningCompaction = false;

    struct iovec destVmas[MAX_VMAS_PER_BATCH];

    VmaBatch batch;
    VmaBatchCreator batcher(&vmas, destVmas);

    int64_t totalBytesProcessed = 0;
    while (batcher.createNextBatch(batch)) {
        uint64_t bytesProcessedInSend;

    int64_t vmaOffset = 0;
    for (int iVma = 0; iVma < vmas.size();) {
        uint64_t bytesSentToCompact = 0;
        int iVec = 0;
        while (iVec < MAX_VMAS_PER_COMPACTION && iVma < vmas.size()) {
            if (CC_UNLIKELY(cancelRunningCompaction)) {
        do {
            if (CC_UNLIKELY(cancelRunningCompaction.load())) {
                // There could be a significant delay between when a compaction
                // is requested and when it is handled during this time our
                // OOM adjust could have improved.
                LOG(DEBUG) << "Cancelled running compaction for " << pid;
                break;
            }

            uint64_t vmaStart = vmas[iVma].start + vmaOffset;
            uint64_t vmaSize = vmas[iVma].end - vmaStart;
            if (vmaSize == 0) {
                goto next_vma;
            }
            vmasToKernel[iVec].iov_base = (void*)vmaStart;
            if (vmaSize > MAX_BYTES_PER_COMPACTION - bytesSentToCompact) {
                // Exceeded the max bytes that could be sent, so clamp
                // the end to avoid exceeding limit and issue compaction
                vmaSize = MAX_BYTES_PER_COMPACTION - bytesSentToCompact;
            }

            vmasToKernel[iVec].iov_len = vmaSize;
            bytesSentToCompact += vmaSize;
            ++iVec;
            if (bytesSentToCompact >= MAX_BYTES_PER_COMPACTION) {
                // Ran out of bytes within iovec, dispatch compaction.
                vmaOffset += vmaSize;
                break;
            }

        next_vma:
            // Finished current VMA, and have more bytes remaining
            vmaOffset = 0;
            ++iVma;
        }

        if (cancelRunningCompaction) {
            cancelRunningCompaction = false;
            break;
        }

        auto bytesProcessed = process_madvise(pidfd, vmasToKernel, iVec, madviseType, 0);

        if (CC_UNLIKELY(bytesProcessed == -1)) {
            if (errno == EINVAL) {
                // This error is somewhat common due to an unevictable VMA if this is
                // the case silently skip the bad VMA and continue compacting the rest.
                continue;
            } else {
                // Forward irrecoverable errors and bail out compaction
                compactionInProgress = false;
                return -errno;
                ATRACE_INSTANT_FOR_TRACK(ATRACE_COMPACTION_TRACK,
                                         StringPrintf("Cancelled compaction for %d", pid).c_str());
                return ERROR_COMPACTION_CANCELLED;
            }
            int error = madviseVmasFromBatch(pidfd, batch, madviseType, &bytesProcessedInSend);
            if (error < 0) {
                // Returns standard linux errno code
                return error;
            }

        totalBytesProcessed += bytesProcessed;
            totalBytesProcessed += bytesProcessedInSend;
        } while (batch.totalBytes > 0);
    }
    compactionInProgress = false;

    return totalBytesProcessed;
}
@@ -194,9 +336,12 @@ static int getAnyPageAdvice(const Vma& vma) {
//
// Currently supported behaviors are MADV_COLD and MADV_PAGEOUT.
//
// Returns the total number of bytes compacted or forwards an
// process_madvise error.
// Returns the total number of bytes compacted on success. On error
// returns process_madvise errno code or if compaction was cancelled
// it returns ERROR_COMPACTION_CANCELLED.
static int64_t compactProcess(int pid, VmaToAdviseFunc vmaToAdviseFunc) {
    cancelRunningCompaction.store(false);

    ProcMemInfo meminfo(pid);
    std::vector<Vma> pageoutVmas, coldVmas;
    auto vmaCollectorCb = [&coldVmas,&pageoutVmas,&vmaToAdviseFunc](const Vma& vma) {
@@ -215,12 +360,14 @@ static int64_t compactProcess(int pid, VmaToAdviseFunc vmaToAdviseFunc) {
    int64_t pageoutBytes = compactMemory(pageoutVmas, pid, MADV_PAGEOUT);
    if (pageoutBytes < 0) {
        // Error, just forward it.
        cancelRunningCompaction.store(false);
        return pageoutBytes;
    }

    int64_t coldBytes = compactMemory(coldVmas, pid, MADV_COLD);
    if (coldBytes < 0) {
        // Error, just forward it.
        cancelRunningCompaction.store(false);
        return coldBytes;
    }

@@ -300,9 +447,18 @@ static void com_android_server_am_CachedAppOptimizer_compactSystem(JNIEnv *, job
}

static void com_android_server_am_CachedAppOptimizer_cancelCompaction(JNIEnv*, jobject) {
    if (compactionInProgress) {
        cancelRunningCompaction = true;
    cancelRunningCompaction.store(true);
    ATRACE_INSTANT_FOR_TRACK(ATRACE_COMPACTION_TRACK, "Cancel compaction");
}

static jdouble com_android_server_am_CachedAppOptimizer_getFreeSwapPercent(JNIEnv*, jobject) {
    struct sysinfo memoryInfo;
    int error = sysinfo(&memoryInfo);
    if(error == -1) {
        LOG(ERROR) << "Could not check free swap space";
        return 0;
    }
    return (double)memoryInfo.freeswap / (double)memoryInfo.totalswap;
}

static void com_android_server_am_CachedAppOptimizer_compactProcess(JNIEnv*, jobject, jint pid,
@@ -358,6 +514,8 @@ static const JNINativeMethod sMethods[] = {
        /* name, signature, funcPtr */
        {"cancelCompaction", "()V",
         (void*)com_android_server_am_CachedAppOptimizer_cancelCompaction},
        {"getFreeSwapPercent", "()D",
         (void*)com_android_server_am_CachedAppOptimizer_getFreeSwapPercent},
        {"compactSystem", "()V", (void*)com_android_server_am_CachedAppOptimizer_compactSystem},
        {"compactProcess", "(II)V", (void*)com_android_server_am_CachedAppOptimizer_compactProcess},
        {"freezeBinder", "(IZ)I", (void*)com_android_server_am_CachedAppOptimizer_freezeBinder},
+1 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@ cc_library_shared {
        "libnativehelper",
        "libprocessgroup",
        "libutils",
        "libcutils",
        "android.hardware.graphics.bufferqueue@1.0",
        "android.hardware.graphics.bufferqueue@2.0",
        "android.hardware.graphics.common@1.2",
Loading