Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0f707941 authored by David Anderson's avatar David Anderson
Browse files

libsnapshot: Fix inconsistency in how merge ops are counted.

A recent change to libsnapshot caused us to filter out duplicate COW
ops. The merge consistency check relied on the old method of manually
counting ops, causing it to come up with a different number. Fix this by
using the already computed "official" count.

Bug: 193532829
Test: new test case in vts_libsnapshot_test
      manual test with incremental OTA
Change-Id: I68d1e41f5c140af20a04ba80e3db0780a916ecf8
parent 40a0664b
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -381,10 +381,10 @@ bool CowReader::PrepMergeOps() {
    std::set<int, std::greater<int>> other_ops;
    auto seq_ops_set = std::unordered_set<uint32_t>();
    auto block_map = std::make_shared<std::unordered_map<uint32_t, int>>();
    int num_seqs = 0;
    size_t num_seqs = 0;
    size_t read;

    for (int i = 0; i < ops_->size(); i++) {
    for (size_t i = 0; i < ops_->size(); i++) {
        auto& current_op = ops_->data()[i];

        if (current_op.type == kCowSequenceOp) {
@@ -396,7 +396,7 @@ bool CowReader::PrepMergeOps() {
                PLOG(ERROR) << "Failed to read sequence op!";
                return false;
            }
            for (int j = num_seqs; j < num_seqs + seq_len; j++) {
            for (size_t j = num_seqs; j < num_seqs + seq_len; j++) {
                seq_ops_set.insert(merge_op_blocks->data()[j]);
            }
            num_seqs += seq_len;
@@ -413,10 +413,11 @@ bool CowReader::PrepMergeOps() {
        }
        block_map->insert({current_op.new_block, i});
    }
    if (merge_op_blocks->size() > header_.num_merge_ops)
    if (merge_op_blocks->size() > header_.num_merge_ops) {
        num_ordered_ops_to_merge_ = merge_op_blocks->size() - header_.num_merge_ops;
    else
    } else {
        num_ordered_ops_to_merge_ = 0;
    }
    merge_op_blocks->reserve(merge_op_blocks->size() + other_ops.size());
    for (auto block : other_ops) {
        merge_op_blocks->emplace_back(block);
+4 −0
Original line number Diff line number Diff line
@@ -126,6 +126,10 @@ class CowReader : public ICowReader {

    bool GetRawBytes(uint64_t offset, void* buffer, size_t len, size_t* read);

    // Returns the total number of data ops that should be merged. This is the
    // count of the merge sequence before removing already-merged operations.
    // It may be different than the actual data op count, for example, if there
    // are duplicate ops in the stream.
    uint64_t get_num_total_data_ops() { return num_total_data_ops_; }

    uint64_t get_num_ordered_ops_to_merge() { return num_ordered_ops_to_merge_; }
+1 −5
Original line number Diff line number Diff line
@@ -1193,11 +1193,7 @@ MergeFailureCode SnapshotManager::CheckMergeConsistency(LockedFile* lock, const
            return MergeFailureCode::ParseCowConsistencyCheck;
        }

        for (auto iter = reader.GetOpIter(); !iter->Done(); iter->Next()) {
            if (!IsMetadataOp(iter->Get())) {
                num_ops++;
            }
        }
        num_ops = reader.get_num_total_data_ops();
    }

    // Second pass, try as hard as we can to get the actual number of blocks
+47 −0
Original line number Diff line number Diff line
@@ -1184,6 +1184,53 @@ TEST_F(SnapshotUpdateTest, FullUpdateFlow) {
    }
}

TEST_F(SnapshotUpdateTest, DuplicateOps) {
    if (!IsCompressionEnabled()) {
        GTEST_SKIP() << "Compression-only test";
    }

    // OTA client blindly unmaps all partitions that are possibly mapped.
    for (const auto& name : {"sys_b", "vnd_b", "prd_b"}) {
        ASSERT_TRUE(sm->UnmapUpdateSnapshot(name));
    }

    // Execute the update.
    ASSERT_TRUE(sm->BeginUpdate());
    ASSERT_TRUE(sm->CreateUpdateSnapshots(manifest_));

    // Write some data to target partitions.
    for (const auto& name : {"sys_b", "vnd_b", "prd_b"}) {
        ASSERT_TRUE(WriteSnapshotAndHash(name));
    }

    std::vector<PartitionUpdate*> partitions = {sys_, vnd_, prd_};
    for (auto* partition : partitions) {
        AddOperation(partition);

        std::unique_ptr<ISnapshotWriter> writer;
        auto res = MapUpdateSnapshot(partition->partition_name() + "_b", &writer);
        ASSERT_TRUE(res);
        ASSERT_TRUE(writer->AddZeroBlocks(0, 1));
        ASSERT_TRUE(writer->AddZeroBlocks(0, 1));
        ASSERT_TRUE(writer->Finalize());
    }

    ASSERT_TRUE(sm->FinishedSnapshotWrites(false));

    // Simulate shutting down the device.
    ASSERT_TRUE(UnmapAll());

    // After reboot, init does first stage mount.
    auto init = NewManagerForFirstStageMount("_b");
    ASSERT_NE(init, nullptr);
    ASSERT_TRUE(init->NeedSnapshotsInFirstStageMount());
    ASSERT_TRUE(init->CreateLogicalAndSnapshotPartitions("super", snapshot_timeout_));

    // Initiate the merge and wait for it to be completed.
    ASSERT_TRUE(init->InitiateMerge());
    ASSERT_EQ(UpdateState::MergeCompleted, init->ProcessUpdateState());
}

// Test that shrinking and growing partitions at the same time is handled
// correctly in VABC.
TEST_F(SnapshotUpdateTest, SpaceSwapUpdate) {
+1 −1
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ bool Snapuserd::CommitMerge(int num_merge_ops) {

    int ret = msync(mapped_addr_, BLOCK_SZ, MS_SYNC);
    if (ret < 0) {
        PLOG(ERROR) << "msync header failed: " << ret;
        SNAP_PLOG(ERROR) << "msync header failed: " << ret;
        return false;
    }