Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b7e8138 authored by Michael Butler's avatar Michael Butler
Browse files

Add Burst tests to NN AIDL HAL VTS

Bug: 180492058
Bug: 177267324
Test: mma
Test: VtsHalNeuralnetworksTargetTest
Change-Id: I1744005cbf750b70b42367b81a2fa6b8f24c1904
parent 5dc72d54
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -39,6 +39,8 @@ class MockPreparedModel final : public BnPreparedModel {
                 bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
                 int64_t duration, FencedExecutionResult* fencedExecutionResult),
                (override));
    MOCK_METHOD(ndk::ScopedAStatus, configureExecutionBurst, (std::shared_ptr<IBurst> * burst),
                (override));
};

inline std::shared_ptr<MockPreparedModel> MockPreparedModel::create() {
+53 −5
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@
#include "GeneratedTestHarness.h"

#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
#include <android/sync.h>
@@ -568,6 +569,53 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
            }
            break;
        }
        case Executor::BURST: {
            SCOPED_TRACE("burst");

            // create burst
            std::shared_ptr<IBurst> burst;
            auto ret = preparedModel->configureExecutionBurst(&burst);
            ASSERT_TRUE(ret.isOk()) << ret.getDescription();
            ASSERT_NE(nullptr, burst.get());

            // associate a unique slot with each memory pool
            int64_t currentSlot = 0;
            std::vector<int64_t> slots;
            slots.reserve(request.pools.size());
            for (const auto& pool : request.pools) {
                if (pool.getTag() == RequestMemoryPool::Tag::pool) {
                    slots.push_back(currentSlot++);
                } else {
                    EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
                    slots.push_back(-1);
                }
            }

            ExecutionResult executionResult;
            // execute
            ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, kNoDeadline,
                                              loopTimeoutDuration, &executionResult);
            ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
                    << ret.getDescription();
            if (ret.isOk()) {
                executionStatus = executionResult.outputSufficientSize
                                          ? ErrorStatus::NONE
                                          : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
                outputShapes = std::move(executionResult.outputShapes);
                timing = executionResult.timing;
            } else {
                executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
            }

            // Mark each slot as unused after the execution. This is unnecessary because the burst
            // is freed after this scope ends, but this is here to test the functionality.
            for (int64_t slot : slots) {
                ret = burst->releaseMemoryResource(slot);
                ASSERT_TRUE(ret.isOk()) << ret.getDescription();
            }

            break;
        }
        case Executor::FENCED: {
            SCOPED_TRACE("fenced");
            ErrorStatus result = ErrorStatus::NONE;
@@ -713,19 +761,19 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
        case TestKind::GENERAL: {
            outputTypesList = {OutputType::FULLY_SPECIFIED};
            measureTimingList = {false, true};
            executorList = {Executor::SYNC};
            executorList = {Executor::SYNC, Executor::BURST};
            memoryTypeList = {MemoryType::ASHMEM};
        } break;
        case TestKind::DYNAMIC_SHAPE: {
            outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
            measureTimingList = {false, true};
            executorList = {Executor::SYNC, Executor::FENCED};
            executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
            memoryTypeList = {MemoryType::ASHMEM};
        } break;
        case TestKind::MEMORY_DOMAIN: {
            outputTypesList = {OutputType::FULLY_SPECIFIED};
            measureTimingList = {false};
            executorList = {Executor::SYNC, Executor::FENCED};
            executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
            memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
        } break;
        case TestKind::FENCED_COMPUTE: {
@@ -741,7 +789,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
        case TestKind::INTINITE_LOOP_TIMEOUT: {
            outputTypesList = {OutputType::MISSED_DEADLINE};
            measureTimingList = {false, true};
            executorList = {Executor::SYNC, Executor::FENCED};
            executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
            memoryTypeList = {MemoryType::ASHMEM};
        } break;
    }
@@ -765,7 +813,7 @@ void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
                                   const TestModel& coupledModel) {
    const std::vector<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
    const std::vector<bool> measureTimingList = {false, true};
    const std::vector<Executor> executorList = {Executor::SYNC, Executor::FENCED};
    const std::vector<Executor> executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};

    for (const OutputType outputType : outputTypesList) {
        for (const bool measureTiming : measureTimingList) {
+37 −1
Original line number Diff line number Diff line
@@ -203,6 +203,10 @@ class InvalidPreparedModel : public BnPreparedModel {
        return ndk::ScopedAStatus::fromServiceSpecificError(
                static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
    }
    ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
        return ndk::ScopedAStatus::fromServiceSpecificError(
                static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
    }
};

template <typename... Args>
@@ -866,6 +870,9 @@ class MemoryDomainExecutionTest
            case Executor::SYNC:
                EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
                break;
            case Executor::BURST:
                EXPECT_EQ(executeBurst(preparedModel, request), expectedStatus);
                break;
            case Executor::FENCED:
                EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
                break;
@@ -916,6 +923,35 @@ class MemoryDomainExecutionTest
        return executionStatus;
    }

    ErrorStatus executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
                             const Request& request) {
        // create burst
        std::shared_ptr<IBurst> burst;
        auto ret = preparedModel->configureExecutionBurst(&burst);
        EXPECT_TRUE(ret.isOk()) << ret.getDescription();
        EXPECT_NE(nullptr, burst.get());
        if (!ret.isOk() || burst.get() == nullptr) {
            return ErrorStatus::GENERAL_FAILURE;
        }

        // use -1 for all memory identifier tokens
        const std::vector<int64_t> slots(request.pools.size(), -1);

        ExecutionResult executionResult;
        ret = burst->executeSynchronously(request, slots, false, kNoDeadline,
                                          kOmittedTimeoutDuration, &executionResult);

        if (!ret.isOk()) {
            EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
            return static_cast<ErrorStatus>(ret.getServiceSpecificError());
        }
        const ErrorStatus executionStatus = executionResult.outputSufficientSize
                                                    ? ErrorStatus::NONE
                                                    : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
        EXPECT_EQ(executionResult.timing, kNoTiming);
        return executionStatus;
    }

    const Executor kExecutor = std::get<Executor>(GetParam());
};

@@ -1159,7 +1195,7 @@ TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
                  ErrorStatus::GENERAL_FAILURE);
}

const auto kExecutorChoices = testing::Values(Executor::SYNC, Executor::FENCED);
const auto kExecutorChoices = testing::Values(Executor::SYNC, Executor::BURST, Executor::FENCED);

std::string printMemoryDomainExecutionTest(
        const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
+51 −4
Original line number Diff line number Diff line
@@ -51,6 +51,10 @@ constexpr auto kShortDuration = std::chrono::milliseconds{5};
using Results = std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>;
using MaybeResults = std::optional<Results>;

using ExecutionFunction =
        std::function<MaybeResults(const std::shared_ptr<IPreparedModel>& preparedModel,
                                   const Request& request, int64_t deadline)>;

static int64_t makeDeadline(DeadlineBoundType deadlineBoundType) {
    const auto getNanosecondsSinceEpoch = [](const auto& time) -> int64_t {
        const auto timeSinceEpoch = time.time_since_epoch();
@@ -177,13 +181,53 @@ static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>&
                         std::move(executionResult.outputShapes), executionResult.timing});
}

static MaybeResults executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
                                 const Request& request, int64_t deadline) {
    SCOPED_TRACE("burst");
    const bool measure = false;

    // create burst
    std::shared_ptr<IBurst> burst;
    auto ret = preparedModel->configureExecutionBurst(&burst);
    EXPECT_TRUE(ret.isOk()) << ret.getDescription();
    EXPECT_NE(nullptr, burst.get());
    if (!ret.isOk() || burst.get() == nullptr) {
        return std::nullopt;
    }

    // use -1 for all memory identifier tokens
    const std::vector<int64_t> slots(request.pools.size(), -1);

    // run execution
    ExecutionResult executionResult;
    ret = burst->executeSynchronously(request, slots, measure, deadline, kOmittedTimeoutDuration,
                                      &executionResult);
    EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
            << ret.getDescription();
    if (!ret.isOk()) {
        if (ret.getExceptionCode() != EX_SERVICE_SPECIFIC) {
            return std::nullopt;
        }
        return MaybeResults(
                {static_cast<ErrorStatus>(ret.getServiceSpecificError()), {}, kNoTiming});
    }

    // return results
    return MaybeResults({executionResult.outputSufficientSize
                                 ? ErrorStatus::NONE
                                 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
                         std::move(executionResult.outputShapes), executionResult.timing});
}

void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
                      const TestModel& testModel, const Request& request,
                      const ExecutionContext& context, DeadlineBoundType deadlineBound) {
                      const ExecutionContext& context, bool synchronous,
                      DeadlineBoundType deadlineBound) {
    const ExecutionFunction execute = synchronous ? executeSynchronously : executeBurst;
    const auto deadline = makeDeadline(deadlineBound);

    // Perform execution and unpack results.
    const auto results = executeSynchronously(preparedModel, request, deadline);
    const auto results = execute(preparedModel, request, deadline);
    if (!results.has_value()) return;
    const auto& [status, outputShapes, timing] = results.value();

@@ -235,8 +279,11 @@ void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
void runExecutionTests(const std::shared_ptr<IPreparedModel>& preparedModel,
                       const TestModel& testModel, const Request& request,
                       const ExecutionContext& context) {
    for (bool synchronous : {false, true}) {
        for (auto deadlineBound : deadlineBounds) {
        runExecutionTest(preparedModel, testModel, request, context, deadlineBound);
            runExecutionTest(preparedModel, testModel, request, context, synchronous,
                             deadlineBound);
        }
    }
}

+90 −0
Original line number Diff line number Diff line
@@ -16,7 +16,9 @@

#define LOG_TAG "neuralnetworks_aidl_hal_test"

#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
#include <android/binder_auto_utils.h>
#include <variant>

#include <chrono>

@@ -77,6 +79,35 @@ static void validate(const std::shared_ptr<IPreparedModel>& preparedModel,
        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
                  ErrorStatus::INVALID_ARGUMENT);
    }

    // burst
    {
        SCOPED_TRACE(message + " [burst]");

        // create burst
        std::shared_ptr<IBurst> burst;
        auto ret = preparedModel->configureExecutionBurst(&burst);
        ASSERT_TRUE(ret.isOk()) << ret.getDescription();
        ASSERT_NE(nullptr, burst.get());

        // use -1 for all memory identifier tokens
        const std::vector<int64_t> slots(request.pools.size(), -1);

        ExecutionResult executionResult;
        const auto executeStatus = burst->executeSynchronously(
                request, slots, measure, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
        ASSERT_FALSE(executeStatus.isOk());
        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
                  ErrorStatus::INVALID_ARGUMENT);
    }
}

std::shared_ptr<IBurst> createBurst(const std::shared_ptr<IPreparedModel>& preparedModel) {
    std::shared_ptr<IBurst> burst;
    const auto ret = preparedModel->configureExecutionBurst(&burst);
    if (!ret.isOk()) return nullptr;
    return burst;
}

///////////////////////// REMOVE INPUT ////////////////////////////////////
@@ -110,6 +141,65 @@ void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const
    removeOutputTest(preparedModel, request);
}

void validateBurst(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request) {
    // create burst
    std::shared_ptr<IBurst> burst;
    auto ret = preparedModel->configureExecutionBurst(&burst);
    ASSERT_TRUE(ret.isOk()) << ret.getDescription();
    ASSERT_NE(nullptr, burst.get());

    const auto test = [&burst, &request](const std::vector<int64_t>& slots) {
        ExecutionResult executionResult;
        const auto executeStatus =
                burst->executeSynchronously(request, slots, /*measure=*/false, kNoDeadline,
                                            kOmittedTimeoutDuration, &executionResult);
        ASSERT_FALSE(executeStatus.isOk());
        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
                  ErrorStatus::INVALID_ARGUMENT);
    };

    int64_t currentSlot = 0;
    std::vector<int64_t> slots;
    slots.reserve(request.pools.size());
    for (const auto& pool : request.pools) {
        if (pool.getTag() == RequestMemoryPool::Tag::pool) {
            slots.push_back(currentSlot++);
        } else {
            slots.push_back(-1);
        }
    }

    constexpr int64_t invalidSlot = -2;

    // validate failure when invalid memory identifier token value
    for (size_t i = 0; i < request.pools.size(); ++i) {
        const int64_t oldSlotValue = slots[i];

        slots[i] = invalidSlot;
        test(slots);

        slots[i] = oldSlotValue;
    }

    // validate failure when request.pools.size() != memoryIdentifierTokens.size()
    if (request.pools.size() > 0) {
        slots = std::vector<int64_t>(request.pools.size() - 1, -1);
        test(slots);
    }

    // validate failure when request.pools.size() != memoryIdentifierTokens.size()
    slots = std::vector<int64_t>(request.pools.size() + 1, -1);
    test(slots);

    // validate failure when invalid memory identifier token value
    const auto freeStatus = burst->releaseMemoryResource(invalidSlot);
    ASSERT_FALSE(freeStatus.isOk());
    ASSERT_EQ(freeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
    ASSERT_EQ(static_cast<ErrorStatus>(freeStatus.getServiceSpecificError()),
              ErrorStatus::INVALID_ARGUMENT);
}

void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
                            const Request& request) {
    SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
Loading