Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d20d4fa0 authored by Kweku Adams's avatar Kweku Adams
Browse files

Add logic to persist jobs in separate files.

Make it possible to persist jobs in a separate file for each calling
UID. This makes it easier to persist changes to jobs of individual apps
instead of having to write the entire set of jobs every time something
changes.

Read times decrease by 2-5% when there are many jobs for each app but
increase by 33-35% when there are only a few jobs per app.
Write times increase by 5% (for many jobs) to 64% (for few jobs) for a
full write but decrease by 1% (for few jobs) to 38% (for many jobs) when
there are partial writes.

JobScheduler is read-once, write-many system, and apart from migration
from between monolithic and split file architectures, should mostly be
in the partial write scenario.

The code is disabled for now and will be turned on separately.

Performance Test Results:

(With monolithic file):

com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_fewJobs_badRTC
	testPersistedJobReading_fewJobs_badRTC_mean (ns): 4632075
	testPersistedJobReading_fewJobs_badRTC_median (ns): 4568177
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_manyJobs
	testPersistedJobWriting_manyJobs_mean (ns): 38133928
	testPersistedJobWriting_manyJobs_median (ns): 37794691
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_manyJobs_goodRTC
	testPersistedJobReading_manyJobs_goodRTC_mean (ns): 46015412
	testPersistedJobReading_manyJobs_goodRTC_median (ns): 45553494
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_fewJobs
	testPersistedJobWriting_fewJobs_mean (ns): 4903462
	testPersistedJobWriting_fewJobs_median (ns): 4914740
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_fewJobs_goodRTC
	testPersistedJobReading_fewJobs_goodRTC_mean (ns): 4432127
	testPersistedJobReading_fewJobs_goodRTC_median (ns): 4395469
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_delta_manyJobs
	testPersistedJobWriting_delta_manyJobs_mean (ns): 42629601
	testPersistedJobWriting_delta_manyJobs_median (ns): 42282061
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_delta_fewJobs
	testPersistedJobWriting_delta_fewJobs_mean (ns): 5070691
	testPersistedJobWriting_delta_fewJobs_median (ns): 4914688
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_manyJobs_badRTC
	testPersistedJobReading_manyJobs_badRTC_mean (ns): 45184576
	testPersistedJobReading_manyJobs_badRTC_median (ns): 44922660

(With split files):

com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_fewJobs_badRTC
	testPersistedJobReading_fewJobs_badRTC_mean (ns): 6350549
	testPersistedJobReading_fewJobs_badRTC_median (ns): 6232605
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_manyJobs
	testPersistedJobWriting_manyJobs_mean (ns): 39963524
	testPersistedJobWriting_manyJobs_median (ns): 39628389
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_manyJobs_goodRTC
	testPersistedJobReading_manyJobs_goodRTC_mean (ns): 43956702
	testPersistedJobReading_manyJobs_goodRTC_median (ns): 43569874
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_fewJobs
	testPersistedJobWriting_fewJobs_mean (ns): 8250152
	testPersistedJobWriting_fewJobs_median (ns): 8090939
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_fewJobs_goodRTC
	testPersistedJobReading_fewJobs_goodRTC_mean (ns): 6006753
	testPersistedJobReading_fewJobs_goodRTC_median (ns): 5869193
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_delta_manyJobs
	testPersistedJobWriting_delta_manyJobs_mean (ns): 26459508
	testPersistedJobWriting_delta_manyJobs_median (ns): 26147607
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobWriting_delta_fewJobs
	testPersistedJobWriting_delta_fewJobs_mean (ns): 4958038
	testPersistedJobWriting_delta_fewJobs_median (ns): 4881588
com.android.frameworks.perftests.job.JobStorePerfTests#testPersistedJobReading_manyJobs_badRTC
	testPersistedJobReading_manyJobs_badRTC_mean (ns): 44476839
	testPersistedJobReading_manyJobs_badRTC_median (ns): 43993285

Bug: 255352252
Test: atest JobSchedulerPerfTests:JobStorePerfTests
Test: atest FrameworksServicesTests:JobStoreTest
Change-Id: Ib7bfa2c974f7663dea79d707c9e1b2bd53ef89b3
parent 2d4746d1
Loading
Loading
Loading
Loading
+21 −1
Original line number Diff line number Diff line
@@ -177,7 +177,7 @@ public class JobSchedulerService extends com.android.server.SystemService
    @EnabledAfter(targetSdkVersion = Build.VERSION_CODES.TIRAMISU)
    private static final long REQUIRE_NETWORK_CONSTRAINT_FOR_NETWORK_JOB_WORK_ITEMS = 241104082L;

    @VisibleForTesting
    @VisibleForTesting(visibility = VisibleForTesting.Visibility.PACKAGE)
    public static Clock sSystemClock = Clock.systemUTC();

    private abstract static class MySimpleClock extends Clock {
@@ -454,6 +454,10 @@ public class JobSchedulerService extends com.android.server.SystemService
                                runtimeUpdated = true;
                            }
                            break;
                        case Constants.KEY_PERSIST_IN_SPLIT_FILES:
                            mConstants.updatePersistingConstantsLocked();
                            mJobs.setUseSplitFiles(mConstants.PERSIST_IN_SPLIT_FILES);
                            break;
                        default:
                            if (name.startsWith(JobConcurrencyManager.CONFIG_KEY_PREFIX_CONCURRENCY)
                                    && !concurrencyUpdated) {
@@ -537,6 +541,8 @@ public class JobSchedulerService extends com.android.server.SystemService
        private static final String KEY_RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS =
                "runtime_min_high_priority_guarantee_ms";

        private static final String KEY_PERSIST_IN_SPLIT_FILES = "persist_in_split_files";

        private static final int DEFAULT_MIN_READY_NON_ACTIVE_JOBS_COUNT = 5;
        private static final long DEFAULT_MAX_NON_ACTIVE_JOB_BATCH_DELAY_MS = 31 * MINUTE_IN_MILLIS;
        private static final float DEFAULT_HEAVY_USE_FACTOR = .9f;
@@ -563,6 +569,7 @@ public class JobSchedulerService extends com.android.server.SystemService
        public static final long DEFAULT_RUNTIME_MIN_EJ_GUARANTEE_MS = 3 * MINUTE_IN_MILLIS;
        @VisibleForTesting
        static final long DEFAULT_RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS = 5 * MINUTE_IN_MILLIS;
        static final boolean DEFAULT_PERSIST_IN_SPLIT_FILES = false;
        private static final boolean DEFAULT_USE_TARE_POLICY = false;

        /**
@@ -677,6 +684,12 @@ public class JobSchedulerService extends com.android.server.SystemService
        public long RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS =
                DEFAULT_RUNTIME_MIN_HIGH_PRIORITY_GUARANTEE_MS;

        /**
         * Whether to persist jobs in split files (by UID). If false, all persisted jobs will be
         * saved in a single file.
         */
        public boolean PERSIST_IN_SPLIT_FILES = DEFAULT_PERSIST_IN_SPLIT_FILES;

        /**
         * If true, use TARE policy for job limiting. If false, use quotas.
         */
@@ -735,6 +748,11 @@ public class JobSchedulerService extends com.android.server.SystemService
                    DEFAULT_CONN_LOW_SIGNAL_STRENGTH_RELAX_FRAC);
        }

        private void updatePersistingConstantsLocked() {
            PERSIST_IN_SPLIT_FILES = DeviceConfig.getBoolean(DeviceConfig.NAMESPACE_JOB_SCHEDULER,
                    KEY_PERSIST_IN_SPLIT_FILES, DEFAULT_PERSIST_IN_SPLIT_FILES);
        }

        private void updatePrefetchConstantsLocked() {
            PREFETCH_FORCE_BATCH_RELAX_THRESHOLD_MS = DeviceConfig.getLong(
                    DeviceConfig.NAMESPACE_JOB_SCHEDULER,
@@ -835,6 +853,8 @@ public class JobSchedulerService extends com.android.server.SystemService
            pw.print(KEY_RUNTIME_FREE_QUOTA_MAX_LIMIT_MS, RUNTIME_FREE_QUOTA_MAX_LIMIT_MS)
                    .println();

            pw.print(KEY_PERSIST_IN_SPLIT_FILES, PERSIST_IN_SPLIT_FILES).println();

            pw.print(Settings.Global.ENABLE_TARE, USE_TARE_POLICY).println();

            pw.decreaseIndent();
+226 −47
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ import android.util.AtomicFile;
import android.util.Pair;
import android.util.Slog;
import android.util.SparseArray;
import android.util.SparseBooleanArray;
import android.util.SystemConfigFileCommitEventLogger;
import android.util.Xml;

@@ -89,6 +90,8 @@ public final class JobStore {

    /** Threshold to adjust how often we want to write to the db. */
    private static final long JOB_PERSIST_DELAY = 2000L;
    private static final String JOB_FILE_SPLIT_PREFIX = "jobs_";
    private static final int ALL_UIDS = -1;

    final Object mLock;
    final Object mWriteScheduleLock;    // used solely for invariants around write scheduling
@@ -105,13 +108,20 @@ public final class JobStore {
    @GuardedBy("mWriteScheduleLock")
    private boolean mWriteInProgress;

    @GuardedBy("mWriteScheduleLock")
    private boolean mSplitFileMigrationNeeded;

    private static final Object sSingletonLock = new Object();
    private final SystemConfigFileCommitEventLogger mEventLogger;
    private final AtomicFile mJobsFile;
    private final File mJobFileDirectory;
    private final SparseBooleanArray mPendingJobWriteUids = new SparseBooleanArray();
    /** Handler backed by IoThread for writing to disk. */
    private final Handler mIoHandler = IoThread.getHandler();
    private static JobStore sSingleton;

    private boolean mUseSplitFiles = JobSchedulerService.Constants.DEFAULT_PERSIST_IN_SPLIT_FILES;

    private JobStorePersistStats mPersistInfo = new JobStorePersistStats();

    /** Used by the {@link JobSchedulerService} to instantiate the JobStore. */
@@ -144,10 +154,10 @@ public final class JobStore {
        mContext = context;

        File systemDir = new File(dataDir, "system");
        File jobDir = new File(systemDir, "job");
        jobDir.mkdirs();
        mJobFileDirectory = new File(systemDir, "job");
        mJobFileDirectory.mkdirs();
        mEventLogger = new SystemConfigFileCommitEventLogger("jobs");
        mJobsFile = new AtomicFile(new File(jobDir, "jobs.xml"), mEventLogger);
        mJobsFile = createJobFile(new File(mJobFileDirectory, "jobs.xml"));

        mJobSet = new JobSet();

@@ -162,12 +172,21 @@ public final class JobStore {
        // an incorrect historical timestamp.  That's fine; at worst we'll reboot with
        // a *correct* timestamp, see a bunch of overdue jobs, and run them; then
        // settle into normal operation.
        mXmlTimestamp = mJobsFile.getLastModifiedTime();
        mXmlTimestamp = mJobsFile.exists()
                ? mJobsFile.getLastModifiedTime() : mJobFileDirectory.lastModified();
        mRtcGood = (sSystemClock.millis() > mXmlTimestamp);

        readJobMapFromDisk(mJobSet, mRtcGood);
    }

    private AtomicFile createJobFile(String baseName) {
        return createJobFile(new File(mJobFileDirectory, baseName + ".xml"));
    }

    private AtomicFile createJobFile(File file) {
        return new AtomicFile(file, mEventLogger);
    }

    public boolean jobTimesInflatedValid() {
        return mRtcGood;
    }
@@ -211,6 +230,7 @@ public final class JobStore {
    public void add(JobStatus jobStatus) {
        mJobSet.add(jobStatus);
        if (jobStatus.isPersisted()) {
            mPendingJobWriteUids.put(jobStatus.getUid(), true);
            maybeWriteStatusToDiskAsync();
        }
        if (DEBUG) {
@@ -224,6 +244,9 @@ public final class JobStore {
    @VisibleForTesting
    public void addForTesting(JobStatus jobStatus) {
        mJobSet.add(jobStatus);
        if (jobStatus.isPersisted()) {
            mPendingJobWriteUids.put(jobStatus.getUid(), true);
        }
    }

    boolean containsJob(JobStatus jobStatus) {
@@ -257,11 +280,23 @@ public final class JobStore {
            return false;
        }
        if (removeFromPersisted && jobStatus.isPersisted()) {
            mPendingJobWriteUids.put(jobStatus.getUid(), true);
            maybeWriteStatusToDiskAsync();
        }
        return removed;
    }

    /**
     * Like {@link #remove(JobStatus, boolean)}, but doesn't schedule a disk write.
     */
    @VisibleForTesting
    public void removeForTesting(JobStatus jobStatus) {
        mJobSet.remove(jobStatus);
        if (jobStatus.isPersisted()) {
            mPendingJobWriteUids.put(jobStatus.getUid(), true);
        }
    }

    /**
     * Remove the jobs of users not specified in the keepUserIds.
     * @param keepUserIds Array of User IDs whose jobs should be kept and not removed.
@@ -273,6 +308,7 @@ public final class JobStore {
    @VisibleForTesting
    public void clear() {
        mJobSet.clear();
        mPendingJobWriteUids.put(ALL_UIDS, true);
        maybeWriteStatusToDiskAsync();
    }

@@ -282,6 +318,36 @@ public final class JobStore {
    @VisibleForTesting
    public void clearForTesting() {
        mJobSet.clear();
        mPendingJobWriteUids.put(ALL_UIDS, true);
    }

    void setUseSplitFiles(boolean useSplitFiles) {
        synchronized (mLock) {
            if (mUseSplitFiles != useSplitFiles) {
                mUseSplitFiles = useSplitFiles;
                migrateJobFilesAsync();
            }
        }
    }

    /**
     * The same as above but does not schedule writing. This makes perf benchmarks more stable.
     */
    @VisibleForTesting
    public void setUseSplitFilesForTesting(boolean useSplitFiles) {
        final boolean changed;
        synchronized (mLock) {
            changed = mUseSplitFiles != useSplitFiles;
            if (changed) {
                mUseSplitFiles = useSplitFiles;
                mPendingJobWriteUids.put(ALL_UIDS, true);
            }
        }
        if (changed) {
            synchronized (mWriteScheduleLock) {
                mSplitFileMigrationNeeded = true;
            }
        }
    }

    /**
@@ -352,6 +418,16 @@ public final class JobStore {
    private static final String XML_TAG_ONEOFF = "one-off";
    private static final String XML_TAG_EXTRAS = "extras";

    private void migrateJobFilesAsync() {
        synchronized (mLock) {
            mPendingJobWriteUids.put(ALL_UIDS, true);
        }
        synchronized (mWriteScheduleLock) {
            mSplitFileMigrationNeeded = true;
            maybeWriteStatusToDiskAsync();
        }
    }

    /**
     * Every time the state changes we write all the jobs in one swath, instead of trying to
     * track incremental changes.
@@ -449,10 +525,38 @@ public final class JobStore {
     * NOTE: This Runnable locks on mLock
     */
    private final Runnable mWriteRunnable = new Runnable() {
        private final SparseArray<AtomicFile> mJobFiles = new SparseArray<>();
        private final CopyConsumer mPersistedJobCopier = new CopyConsumer();

        class CopyConsumer implements Consumer<JobStatus> {
            private final SparseArray<List<JobStatus>> mJobStoreCopy = new SparseArray<>();
            private boolean mCopyAllJobs;

            private void prepare() {
                mCopyAllJobs = !mUseSplitFiles || mPendingJobWriteUids.get(ALL_UIDS);
            }

            @Override
            public void accept(JobStatus jobStatus) {
                final int uid = mUseSplitFiles ? jobStatus.getUid() : ALL_UIDS;
                if (jobStatus.isPersisted() && (mCopyAllJobs || mPendingJobWriteUids.get(uid))) {
                    List<JobStatus> uidJobList = mJobStoreCopy.get(uid);
                    if (uidJobList == null) {
                        uidJobList = new ArrayList<>();
                        mJobStoreCopy.put(uid, uidJobList);
                    }
                    uidJobList.add(new JobStatus(jobStatus));
                }
            }

            private void reset() {
                mJobStoreCopy.clear();
            }
        }

        @Override
        public void run() {
            final long startElapsed = sElapsedRealtimeClock.millis();
            final List<JobStatus> storeCopy = new ArrayList<JobStatus>();
            // Intentionally allow new scheduling of a write operation *before* we clone
            // the job set.  If we reset it to false after cloning, there's a window in
            // which no new write will be scheduled but mLock is not held, i.e. a new
@@ -469,31 +573,73 @@ public final class JobStore {
                }
                mWriteInProgress = true;
            }
            final boolean useSplitFiles;
            synchronized (mLock) {
                // Clone the jobs so we can release the lock before writing.
                mJobSet.forEachJob(null, (job) -> {
                    if (job.isPersisted()) {
                        storeCopy.add(new JobStatus(job));
                useSplitFiles = mUseSplitFiles;
                mPersistedJobCopier.prepare();
                mJobSet.forEachJob(null, mPersistedJobCopier);
                mPendingJobWriteUids.clear();
            }
            mPersistInfo.countAllJobsSaved = 0;
            mPersistInfo.countSystemServerJobsSaved = 0;
            mPersistInfo.countSystemSyncManagerJobsSaved = 0;
            for (int i = mPersistedJobCopier.mJobStoreCopy.size() - 1; i >= 0; --i) {
                AtomicFile file;
                if (useSplitFiles) {
                    final int uid = mPersistedJobCopier.mJobStoreCopy.keyAt(i);
                    file = mJobFiles.get(uid);
                    if (file == null) {
                        file = createJobFile(JOB_FILE_SPLIT_PREFIX + uid);
                        mJobFiles.put(uid, file);
                    }
                });
                } else {
                    file = mJobsFile;
                }
                if (DEBUG) {
                    Slog.d(TAG, "Writing for " + mPersistedJobCopier.mJobStoreCopy.keyAt(i)
                            + " to " + file.getBaseFile().getName() + ": "
                            + mPersistedJobCopier.mJobStoreCopy.valueAt(i).size() + " jobs");
                }
                writeJobsMapImpl(file, mPersistedJobCopier.mJobStoreCopy.valueAt(i));
            }
            writeJobsMapImpl(storeCopy);
            if (DEBUG) {
                Slog.v(TAG, "Finished writing, took " + (sElapsedRealtimeClock.millis()
                        - startElapsed) + "ms");
            }
            mPersistedJobCopier.reset();
            if (!useSplitFiles) {
                mJobFiles.clear();
            }
            // Update the last modified time of the directory to aid in RTC time verification
            // (see the JobStore constructor).
            mJobFileDirectory.setLastModified(sSystemClock.millis());
            synchronized (mWriteScheduleLock) {
                if (mSplitFileMigrationNeeded) {
                    final File[] files = mJobFileDirectory.listFiles();
                    for (File file : files) {
                        if (useSplitFiles) {
                            if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
                                // Delete the now unused file so there's no confusion in the future.
                                file.delete();
                            }
                        } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
                            // Delete the now unused file so there's no confusion in the future.
                            file.delete();
                        }
                    }
                }
                mWriteInProgress = false;
                mWriteScheduleLock.notifyAll();
            }
        }

        private void writeJobsMapImpl(List<JobStatus> jobList) {
        private void writeJobsMapImpl(@NonNull AtomicFile file, @NonNull List<JobStatus> jobList) {
            int numJobs = 0;
            int numSystemJobs = 0;
            int numSyncJobs = 0;
            mEventLogger.setStartTime(SystemClock.uptimeMillis());
            try (FileOutputStream fos = mJobsFile.startWrite()) {
            try (FileOutputStream fos = file.startWrite()) {
                TypedXmlSerializer out = Xml.resolveSerializer(fos);
                out.startDocument(null, true);
                out.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true);
@@ -523,7 +669,7 @@ public final class JobStore {
                out.endTag(null, "job-info");
                out.endDocument();

                mJobsFile.finishWrite(fos);
                file.finishWrite(fos);
            } catch (IOException e) {
                if (DEBUG) {
                    Slog.v(TAG, "Error writing out job data.", e);
@@ -533,9 +679,9 @@ public final class JobStore {
                    Slog.d(TAG, "Error persisting bundle.", e);
                }
            } finally {
                mPersistInfo.countAllJobsSaved = numJobs;
                mPersistInfo.countSystemServerJobsSaved = numSystemJobs;
                mPersistInfo.countSystemSyncManagerJobsSaved = numSyncJobs;
                mPersistInfo.countAllJobsSaved += numJobs;
                mPersistInfo.countSystemServerJobsSaved += numSystemJobs;
                mPersistInfo.countSystemSyncManagerJobsSaved += numSyncJobs;
            }
        }

@@ -720,15 +866,35 @@ public final class JobStore {

        @Override
        public void run() {
            if (!mJobFileDirectory.isDirectory()) {
                Slog.wtf(TAG, "jobs directory isn't a directory O.O");
                mJobFileDirectory.mkdirs();
                return;
            }

            int numJobs = 0;
            int numSystemJobs = 0;
            int numSyncJobs = 0;
            List<JobStatus> jobs;
            try (FileInputStream fis = mJobsFile.openRead()) {
            final File[] files;
            try {
                files = mJobFileDirectory.listFiles();
            } catch (SecurityException e) {
                Slog.wtf(TAG, "Not allowed to read job file directory", e);
                return;
            }
            if (files == null) {
                Slog.wtfStack(TAG, "Couldn't get job file list");
                return;
            }
            boolean needFileMigration = false;
            long now = sElapsedRealtimeClock.millis();
            for (File file : files) {
                final AtomicFile aFile = createJobFile(file);
                try (FileInputStream fis = aFile.openRead()) {
                    synchronized (mLock) {
                        jobs = readJobMapImpl(fis, rtcGood);
                        if (jobs != null) {
                        long now = sElapsedRealtimeClock.millis();
                            for (int i = 0; i < jobs.size(); i++) {
                                JobStatus js = jobs.get(i);
                                js.prepareLocked();
@@ -746,23 +912,36 @@ public final class JobStore {
                        }
                    }
                } catch (FileNotFoundException e) {
                if (DEBUG) {
                    Slog.d(TAG, "Could not find jobs file, probably there was nothing to load.");
                }
                    // mJobFileDirectory.listFiles() gave us this file...why can't we find it???
                    Slog.e(TAG, "Could not find jobs file: " + file.getName());
                } catch (XmlPullParserException | IOException e) {
                Slog.wtf(TAG, "Error jobstore xml.", e);
                    Slog.wtf(TAG, "Error in " + file.getName(), e);
                } catch (Exception e) {
                    // Crashing at this point would result in a boot loop, so live with a general
                    // Exception for system stability's sake.
                    Slog.wtf(TAG, "Unexpected exception", e);
            } finally {
                }
                if (mUseSplitFiles) {
                    if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
                        // We're supposed to be using the split file architecture, but we still have
                        // the old job file around. Fully migrate and remove the old file.
                        needFileMigration = true;
                    }
                } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) {
                    // We're supposed to be using the legacy single file architecture, but we still
                    // have some job split files around. Fully migrate and remove the split files.
                    needFileMigration = true;
                }
            }
            if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once.
                mPersistInfo.countAllJobsLoaded = numJobs;
                mPersistInfo.countSystemServerJobsLoaded = numSystemJobs;
                mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs;
            }
            }
            Slog.i(TAG, "Read " + numJobs + " jobs");
            if (needFileMigration) {
                migrateJobFilesAsync();
            }
        }

        private List<JobStatus> readJobMapImpl(InputStream fis, boolean rtcIsGood)
+16 −3
Original line number Diff line number Diff line
@@ -155,7 +155,18 @@ public class JobStoreTest {
    }

    @Test
    public void testWritingTwoFilesToDisk() throws Exception {
    public void testWritingTwoJobsToDisk_singleFile() throws Exception {
        mTaskStoreUnderTest.setUseSplitFiles(false);
        runWritingTwoJobsToDisk();
    }

    @Test
    public void testWritingTwoJobsToDisk_splitFiles() throws Exception {
        mTaskStoreUnderTest.setUseSplitFiles(true);
        runWritingTwoJobsToDisk();
    }

    private void runWritingTwoJobsToDisk() throws Exception {
        final JobInfo task1 = new Builder(8, mComponent)
                .setRequiresDeviceIdle(true)
                .setPeriodic(10000L)
@@ -169,8 +180,10 @@ public class JobStoreTest {
                .setRequiredNetworkType(JobInfo.NETWORK_TYPE_UNMETERED)
                .setPersisted(true)
                .build();
        final JobStatus taskStatus1 = JobStatus.createFromJobInfo(task1, SOME_UID, null, -1, null);
        final JobStatus taskStatus2 = JobStatus.createFromJobInfo(task2, SOME_UID, null, -1, null);
        final int uid1 = SOME_UID;
        final int uid2 = uid1 + 1;
        final JobStatus taskStatus1 = JobStatus.createFromJobInfo(task1, uid1, null, -1, null);
        final JobStatus taskStatus2 = JobStatus.createFromJobInfo(task2, uid2, null, -1, null);
        mTaskStoreUnderTest.add(taskStatus1);
        mTaskStoreUnderTest.add(taskStatus2);
        waitForPendingIo();
+64 −6
Original line number Diff line number Diff line
@@ -15,7 +15,6 @@
 */
package com.android.frameworks.perftests.job;


import android.app.job.JobInfo;
import android.content.ComponentName;
import android.content.Context;
@@ -46,7 +45,8 @@ import java.util.List;
public class JobStorePerfTests {
    private static final String SOURCE_PACKAGE = "com.android.frameworks.perftests.job";
    private static final int SOURCE_USER_ID = 0;
    private static final int CALLING_UID = 10079;
    private static final int BASE_CALLING_UID = 10079;
    private static final int MAX_UID_COUNT = 10;

    private static Context sContext;
    private static File sTestDir;
@@ -65,10 +65,10 @@ public class JobStorePerfTests {
        sJobStore = JobStore.initAndGetForTesting(sContext, sTestDir);

        for (int i = 0; i < 50; i++) {
            sFewJobs.add(createJobStatus("fewJobs", i));
            sFewJobs.add(createJobStatus("fewJobs", i, BASE_CALLING_UID + (i % MAX_UID_COUNT)));
        }
        for (int i = 0; i < 500; i++) {
            sManyJobs.add(createJobStatus("manyJobs", i));
            sManyJobs.add(createJobStatus("manyJobs", i, BASE_CALLING_UID + (i % MAX_UID_COUNT)));
        }
    }

@@ -104,6 +104,64 @@ public class JobStorePerfTests {
        runPersistedJobWriting(sManyJobs);
    }

    private void runPersistedJobWriting_delta(List<JobStatus> jobList,
            List<JobStatus> jobAdditions, List<JobStatus> jobRemovals) {
        final ManualBenchmarkState benchmarkState = mPerfManualStatusReporter.getBenchmarkState();

        long elapsedTimeNs = 0;
        while (benchmarkState.keepRunning(elapsedTimeNs)) {
            sJobStore.clearForTesting();
            for (JobStatus job : jobList) {
                sJobStore.addForTesting(job);
            }
            sJobStore.writeStatusToDiskForTesting();

            for (JobStatus job : jobAdditions) {
                sJobStore.addForTesting(job);
            }
            for (JobStatus job : jobRemovals) {
                sJobStore.removeForTesting(job);
            }

            final long startTime = SystemClock.elapsedRealtimeNanos();
            sJobStore.writeStatusToDiskForTesting();
            final long endTime = SystemClock.elapsedRealtimeNanos();
            elapsedTimeNs = endTime - startTime;
        }
    }

    @Test
    public void testPersistedJobWriting_delta_fewJobs() {
        List<JobStatus> additions = new ArrayList<>();
        List<JobStatus> removals = new ArrayList<>();
        final int numModifiedUids = MAX_UID_COUNT / 2;
        for (int i = 0; i < sFewJobs.size() / 3; ++i) {
            JobStatus job = createJobStatus("fewJobs", i, BASE_CALLING_UID + (i % numModifiedUids));
            if (i % 2 == 0) {
                additions.add(job);
            } else {
                removals.add(job);
            }
        }
        runPersistedJobWriting_delta(sFewJobs, additions, removals);
    }

    @Test
    public void testPersistedJobWriting_delta_manyJobs() {
        List<JobStatus> additions = new ArrayList<>();
        List<JobStatus> removals = new ArrayList<>();
        final int numModifiedUids = MAX_UID_COUNT / 2;
        for (int i = 0; i < sManyJobs.size() / 3; ++i) {
            JobStatus job = createJobStatus("fewJobs", i, BASE_CALLING_UID + (i % numModifiedUids));
            if (i % 2 == 0) {
                additions.add(job);
            } else {
                removals.add(job);
            }
        }
        runPersistedJobWriting_delta(sManyJobs, additions, removals);
    }

    private void runPersistedJobReading(List<JobStatus> jobList, boolean rtcIsGood) {
        final ManualBenchmarkState benchmarkState = mPerfManualStatusReporter.getBenchmarkState();

@@ -144,12 +202,12 @@ public class JobStorePerfTests {
        runPersistedJobReading(sManyJobs, false);
    }

    private static JobStatus createJobStatus(String testTag, int jobId) {
    private static JobStatus createJobStatus(String testTag, int jobId, int callingUid) {
        JobInfo jobInfo = new JobInfo.Builder(jobId,
                new ComponentName(sContext, "JobStorePerfTestJobService"))
                .setPersisted(true)
                .build();
        return JobStatus.createFromJobInfo(
                jobInfo, CALLING_UID, SOURCE_PACKAGE, SOURCE_USER_ID, testTag);
                jobInfo, callingUid, SOURCE_PACKAGE, SOURCE_USER_ID, testTag);
    }
}