Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e7b1c64 authored by Xusong Wang's avatar Xusong Wang Committed by Automerger Merge Worker
Browse files

Add VTS tests for reusable execution. am: 72e06c28 am: 369fbedc am: 89d07b80

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1954276

Change-Id: I7f0acd4432b25aa7a510c52a442c3001ae03ed61
parents 76d9d1cb 89d07b80
Loading
Loading
Loading
Loading
+261 −190
Original line number Diff line number Diff line
@@ -58,25 +58,52 @@ struct TestConfig {
    bool measureTiming;
    OutputType outputType;
    MemoryType memoryType;
    bool reusable;
    // `reportSkipping` indicates if a test should print an info message in case
    // it is skipped. The field is set to true by default and is set to false in
    // quantization coupling tests to suppress skipping a test
    bool reportSkipping;
    TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType)
    TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
               bool reusable)
        : executor(executor),
          measureTiming(measureTiming),
          outputType(outputType),
          memoryType(memoryType),
          reusable(reusable),
          reportSkipping(true) {}
    TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
               bool reportSkipping)
               bool reusable, bool reportSkipping)
        : executor(executor),
          measureTiming(measureTiming),
          outputType(outputType),
          memoryType(memoryType),
          reusable(reusable),
          reportSkipping(reportSkipping) {}
};

std::string toString(OutputType type) {
    switch (type) {
        case OutputType::FULLY_SPECIFIED:
            return "FULLY_SPECIFIED";
        case OutputType::UNSPECIFIED:
            return "UNSPECIFIED";
        case OutputType::INSUFFICIENT:
            return "INSUFFICIENT";
        case OutputType::MISSED_DEADLINE:
            return "MISSED_DEADLINE";
    }
}

std::string toString(const TestConfig& config) {
    std::stringstream ss;
    ss << "TestConfig{.executor=" << toString(config.executor)
       << ", .measureTiming=" << (config.measureTiming ? "true" : "false")
       << ", .outputType=" << toString(config.outputType)
       << ", .memoryType=" << toString(config.memoryType)
       << ", .reusable=" << (config.reusable ? "true" : "false") << "}";
    return ss.str();
}

enum class IOType { INPUT, OUTPUT };

class DeviceMemoryAllocator {
@@ -558,6 +585,16 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
        loopTimeoutDurationNs = 1 * kMillisecond;
    }

    std::shared_ptr<IExecution> execution;
    if (testConfig.reusable) {
        const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming,
                                                                loopTimeoutDurationNs, &execution);
        ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
        ASSERT_NE(nullptr, execution.get());
    }

    const auto executeAndCheckResults = [&preparedModel, &execution, &testConfig, &testModel,
                                         &context, &request, loopTimeoutDurationNs, skipped]() {
        ErrorStatus executionStatus;
        std::vector<OutputShape> outputShapes;
        Timing timing = kNoTiming;
@@ -567,9 +604,14 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,

                ExecutionResult executionResult;
                // execute
            const auto ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
                ::ndk::ScopedAStatus ret;
                if (testConfig.reusable) {
                    ret = execution->executeSynchronously(kNoDeadline, &executionResult);
                } else {
                    ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
                                                              kNoDeadline, loopTimeoutDurationNs,
                                                              &executionResult);
                }
                ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
                        << ret.getDescription();
                if (ret.isOk()) {
@@ -607,8 +649,9 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,

                ExecutionResult executionResult;
                // execute
            ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, kNoDeadline,
                                              loopTimeoutDurationNs, &executionResult);
                ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
                                                  kNoDeadline, loopTimeoutDurationNs,
                                                  &executionResult);
                ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
                        << ret.getDescription();
                if (ret.isOk()) {
@@ -621,8 +664,8 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
                    executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
                }

            // Mark each slot as unused after the execution. This is unnecessary because the burst
            // is freed after this scope ends, but this is here to test the functionality.
                // Mark each slot as unused after the execution. This is unnecessary because the
                // burst is freed after this scope ends, but this is here to test the functionality.
                for (int64_t slot : slots) {
                    ret = burst->releaseMemoryResource(slot);
                    ASSERT_TRUE(ret.isOk()) << ret.getDescription();
@@ -634,9 +677,14 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
                SCOPED_TRACE("fenced");
                ErrorStatus result = ErrorStatus::NONE;
                FencedExecutionResult executionResult;
            auto ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
                                                    kNoDeadline, loopTimeoutDurationNs, kNoDuration,
                                                    &executionResult);
                ::ndk::ScopedAStatus ret;
                if (testConfig.reusable) {
                    ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
                } else {
                    ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
                                                       kNoDeadline, loopTimeoutDurationNs,
                                                       kNoDuration, &executionResult);
                }
                ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
                        << ret.getDescription();
                if (!ret.isOk()) {
@@ -647,10 +695,11 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
                    auto dupFd = dup(executionResult.syncFence.get());
                    ASSERT_NE(dupFd, -1);
                    waitFor.emplace_back(dupFd);
                // If a sync fence is returned, try start another run waiting for the sync fence.
                    // If a sync fence is returned, try start another run waiting for the sync
                    // fence.
                    ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
                                                   kNoDeadline, loopTimeoutDurationNs, kNoDuration,
                                                   &executionResult);
                                                       kNoDeadline, loopTimeoutDurationNs,
                                                       kNoDuration, &executionResult);
                    ASSERT_TRUE(ret.isOk());
                    waitForSyncFence(executionResult.syncFence.get());
                }
@@ -749,7 +798,8 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
        // Go through all outputs, check returned output shapes.
        for (uint32_t i = 0; i < outputShapes.size(); i++) {
            EXPECT_TRUE(outputShapes[i].isSufficient);
        const auto& expect = testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
            const auto& expect =
                    testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
            const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
            ASSERT_TRUE(unsignedActual.has_value());
            const std::vector<uint32_t>& actual = unsignedActual.value();
@@ -761,6 +811,15 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,

        // We want "close-enough" results.
        checkResults(testModel, outputs);
    };

    executeAndCheckResults();

    // For reusable execution tests, run the execution twice.
    if (testConfig.reusable) {
        SCOPED_TRACE("Second execution");
        executeAndCheckResults();
    }
}

void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
@@ -770,6 +829,13 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
    std::vector<bool> measureTimingList;
    std::vector<Executor> executorList;
    std::vector<MemoryType> memoryTypeList;
    std::vector<bool> reusableList = {false};

    int deviceVersion;
    ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
    if (deviceVersion >= kMinAidlLevelForFL8) {
        reusableList.push_back(true);
    }

    switch (testKind) {
        case TestKind::GENERAL: {
@@ -812,13 +878,18 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
        for (const bool measureTiming : measureTimingList) {
            for (const Executor executor : executorList) {
                for (const MemoryType memoryType : memoryTypeList) {
                    const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
                    for (const bool reusable : reusableList) {
                        if (executor == Executor::BURST && reusable) continue;
                        const TestConfig testConfig(executor, measureTiming, outputType, memoryType,
                                                    reusable);
                        SCOPED_TRACE(toString(testConfig));
                        EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
                    }
                }
            }
        }
    }
}

void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
                                   const std::shared_ptr<IPreparedModel>& preparedModel,
@@ -833,7 +904,7 @@ void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
        for (const bool measureTiming : measureTimingList) {
            for (const Executor executor : executorList) {
                const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
                                            /*reportSkipping=*/false);
                                            /*reusable=*/false, /*reportSkipping=*/false);
                bool baseSkipped = false;
                EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
                bool coupledSkipped = false;
+11 −0
Original line number Diff line number Diff line
@@ -177,6 +177,17 @@ std::string gtestCompliantName(std::string name) {
    return os << toString(errorStatus);
}

std::string toString(MemoryType type) {
    switch (type) {
        case MemoryType::ASHMEM:
            return "ASHMEM";
        case MemoryType::BLOB_AHWB:
            return "BLOB_AHWB";
        case MemoryType::DEVICE:
            return "DEVICE";
    }
}

Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
    CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);

+2 −0
Original line number Diff line number Diff line
@@ -111,6 +111,8 @@ class TestBlobAHWB : public TestMemoryBase {

enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };

std::string toString(MemoryType type);

// Manages the lifetime of memory resources used in an execution.
class ExecutionContext {
    DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
+53 −0
Original line number Diff line number Diff line
@@ -36,6 +36,51 @@ using ExecutionMutation = std::function<void(Request*)>;

///////////////////////// UTILITY FUNCTIONS /////////////////////////

// Test request validation with reusable execution.
static void validateReusableExecution(const std::shared_ptr<IPreparedModel>& preparedModel,
                                      const std::string& message, const Request& request,
                                      bool measure) {
    // createReusableExecution
    std::shared_ptr<IExecution> execution;
    {
        SCOPED_TRACE(message + " [createReusableExecution]");
        const auto createStatus = preparedModel->createReusableExecution(
                request, measure, kOmittedTimeoutDuration, &execution);
        if (!createStatus.isOk()) {
            ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
            ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()),
                      ErrorStatus::INVALID_ARGUMENT);
            ASSERT_EQ(nullptr, execution);
            return;
        } else {
            ASSERT_NE(nullptr, execution);
        }
    }

    // synchronous
    {
        SCOPED_TRACE(message + " [executeSynchronously]");
        ExecutionResult executionResult;
        const auto executeStatus = execution->executeSynchronously(kNoDeadline, &executionResult);
        ASSERT_FALSE(executeStatus.isOk());
        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
                  ErrorStatus::INVALID_ARGUMENT);
    }

    // fenced
    {
        SCOPED_TRACE(message + " [executeFenced]");
        FencedExecutionResult executionResult;
        const auto executeStatus =
                execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
        ASSERT_FALSE(executeStatus.isOk());
        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
                  ErrorStatus::INVALID_ARGUMENT);
    }
}

// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
// that use the request.
@@ -101,6 +146,14 @@ static void validate(const std::shared_ptr<IPreparedModel>& preparedModel,
        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
                  ErrorStatus::INVALID_ARGUMENT);
    }

    int32_t aidlVersion;
    ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk());

    // validate reusable execution
    if (aidlVersion >= kMinAidlLevelForFL8) {
        validateReusableExecution(preparedModel, message, request, measure);
    }
}

std::shared_ptr<IBurst> createBurst(const std::shared_ptr<IPreparedModel>& preparedModel) {
+2 −0
Original line number Diff line number Diff line
@@ -30,6 +30,8 @@ namespace aidl::android::hardware::neuralnetworks::vts::functional {
using NamedDevice = Named<std::shared_ptr<IDevice>>;
using NeuralNetworksAidlTestParam = NamedDevice;

constexpr int kMinAidlLevelForFL8 = 4;

class NeuralNetworksAidlTest : public testing::TestWithParam<NeuralNetworksAidlTestParam> {
  protected:
    void SetUp() override;