Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca884f5c authored by John Wu's avatar John Wu
Browse files

[HostStubGen] Fix off-by-one bug when incrementing index counter

This bug will cause the first element of the zip file to be skipped.
Applying jarjar on the output simply reorder some classes to the first
element, making the bug more apparent.

Also, take the opportunity to fix concurrent logging bugs

Bug: 414920323
Flag: EXEMPT host side change only
Test: atest RavenwoodCoreTest
Change-Id: I43d25eeea10d7a611d0474fd9a270823a48199d7
parent 6b58020b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ java_defaults {
        strip_mockito: true,
    },
    auto_gen_config: true,
    jarjar_rules: "empty.txt",
}

android_ravenwood_test {
+0 −0

Empty file added.

+4 −4
Original line number Diff line number Diff line
@@ -226,7 +226,9 @@ open class HostStubGenLogger(val options: HostStubGenLoggerOptions) {
        }

        override fun write(cbuf: CharArray, off: Int, len: Int) {
            println(level, String(cbuf, off, len))
            String(cbuf, off, len).lines().forEach {
                println(level, it)
            }
        }
    }
}
@@ -244,9 +246,9 @@ private class BufferedLogger(base: HostStubGenLogger) : HostStubGenLogger(base)
                    it.println(indent, message)
                }
            }
            output.clear()
            it.flush()
        }
        output.clear()
    }

    override fun println(level: LogLevel, message: String) {
@@ -330,8 +332,6 @@ interface LogPrinter {

    fun println(indent: Int, message: String)

    // TODO: This should be removed once MultiplexingWriter starts applying indent, at which point
    // println() should be used instead.
    fun write(cbuf: CharArray, off: Int, len: Int)

    fun flush()
+15 −6
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@ import java.io.InputStream
import java.nio.ByteBuffer
import java.nio.channels.FileChannel
import java.util.concurrent.CountDownLatch
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executor
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
@@ -39,6 +39,8 @@ import org.apache.commons.compress.archivers.zip.ZipArchiveEntry
import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream
import org.apache.commons.compress.archivers.zip.ZipFile

// Enable to debug concurrency issues
const val DISABLE_PARALLELISM = false
const val DEFAULT_SHARD_COUNT = 20
const val MINIMUM_BATCH_SIZE = 100

@@ -148,7 +150,7 @@ class ConcurrentListMapper<T>(val list: MutableList<T?>) {

    inline fun process(mapper: (T) -> T?) {
        while (true) {
            val idx = currentIndex.incrementAndGet()
            val idx = currentIndex.getAndIncrement()
            if (idx < list.size) {
                list[idx]?.let { list[idx] = mapper(it) }
                continue
@@ -163,7 +165,7 @@ class ConcurrentZipFile(
    parallelism: Int,
) {
    val entries: MutableList<ZipEntryData?>
    val executor: ExecutorService
    val executor: Executor
    val shardCount: Int

    init {
@@ -175,10 +177,16 @@ class ConcurrentZipFile(
                .map { ZipEntryData.Entry(it, mappedBytes) }
                .toMutableList()
        }
        if (DISABLE_PARALLELISM) {
            shardCount = 1
            // Directly run on the same thread as the caller
            executor = Executor { r -> r.run() }
        } else {
            val count = min(parallelism, Runtime.getRuntime().availableProcessors())
            shardCount = min(count, entries.size / MINIMUM_BATCH_SIZE + 1)
            executor = Executors.newFixedThreadPool(shardCount)
        }
    }

    inline fun forEach(action: (ZipEntryData) -> Unit) {
        entries.asSequence().filterNotNull().forEach(action)
@@ -206,6 +214,7 @@ class ConcurrentZipFile(
                } catch (e: Throwable) {
                    exception.compareAndSet(null, e)
                } finally {
                    log.flush()
                    latch.countDown()
                }
            }