Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cb920aa authored by Jackeagle's avatar Jackeagle
Browse files

Fix large blob loss: keep extracted files in memory and fix IndexedDB commit



setInDBStore resolved on request.onsuccess which fires before the
IndexedDB transaction commits. For large blobs (e.g. super.img),
the transaction can abort during commit due to storage quota limits,
silently rolling back the write while the code assumes it succeeded.

- Keep extracted blobs in memory (this.blobs) so they are always
  available for the current session regardless of IndexedDB state
- getFile checks in-memory store first, falls back to IndexedDB
  for blobs cached from a previous session
- setInDBStore now waits on transaction.oncomplete instead of
  request.onsuccess and handles transaction.onabort
- IndexedDB write failures are caught and logged as warnings
  without blocking the flash process

Signed-off-by: default avatarJackeagle <jackeagle102@gmail.com>
parent 6acda75b
Loading
Loading
Loading
Loading
+31 −13
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ export class Downloader {
    this.db = null;
    this.stored = {};
    this.localZipFile = null;
    this.blobs = {};
  }

  async init() {
@@ -106,8 +107,15 @@ export class Downloader {
                file.mapping,
              );
              if (filesRequired.includes(filename)) {
                await this.setInDBStore(unzippedEntry.blob, filename);
                this.blobs[filename] = unzippedEntry.blob;
                this.stored[filename] = true;
                try {
                  await this.setInDBStore(unzippedEntry.blob, filename);
                } catch (e) {
                  console.warn(
                    `IndexedDB write failed for ${filename}: ${e.message || e}`,
                  );
                }
                const fileSHA = await this.computeSha256(
                  unzippedEntry.blob,
                  (loaded, total) => {
@@ -119,8 +127,15 @@ export class Downloader {
            }
            await zipReader.close();
          } else {
            await this.setInDBStore(blob, file.name);
            this.blobs[file.name] = blob;
            this.stored[file.name] = true;
            try {
              await this.setInDBStore(blob, file.name);
            } catch (e) {
              console.warn(
                `IndexedDB write failed for ${file.name}: ${e.message || e}`,
              );
            }
          }
        }
      }
@@ -253,10 +268,14 @@ export class Downloader {
   * this function retrieve the promise linked to the fileName
   */
  async getFile(name) {
    const file = this.stored[name];
    if (!file) {
    if (!this.stored[name]) {
      throw new Error(`File ${name} was not previously downloaded`);
    }
    // Prefer in-memory blob (always available in the current session)
    // over IndexedDB (large blobs can silently fail to commit).
    if (this.blobs[name]) {
      return this.blobs[name];
    }
    return await this.getFromDBStore(name);
  }

@@ -305,15 +324,14 @@ export class Downloader {
    return new Promise((resolve, reject) => {
      const transaction = this.db.transaction(DB_NAME, "readwrite");
      const store = transaction.objectStore(DB_NAME);
      const request = store.put(blob, key);

      request.onsuccess = () => {
        resolve();
      };

      request.onerror = (event) => {
        reject(event.target.error);
      };
      store.put(blob, key);

      // Wait for the transaction to fully commit — request.onsuccess fires
      // before commit and can't detect QuotaExceededError on large blobs.
      transaction.oncomplete = () => resolve();
      transaction.onerror = (event) => reject(event.target.error);
      transaction.onabort = () =>
        reject(new Error("IndexedDB transaction aborted (storage quota?)"));
    });
  }