Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b03498e0 authored by Michael Butler's avatar Michael Butler Committed by android-build-merger
Browse files

NNAPI Burst -- HAL VTS tests am: 29471a89

am: 4c942b99

Change-Id: Ibcb8719ac0adc66ced844dd96ccd5bc364bd483f
parents bb513380 4c942b99
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@ cc_library_static {
    defaults: ["VtsHalTargetTestDefaults"],
    export_include_dirs: ["."],
    shared_libs: [
        "libfmq",
        "libnativewindow",
    ],
    static_libs: [
@@ -51,6 +52,7 @@ cc_defaults {
        "VtsHalNeuralnetworks.cpp",
    ],
    shared_libs: [
        "libfmq",
        "libnativewindow",
    ],
    static_libs: [
+87 −46
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
 */

#include "Callbacks.h"
#include "ExecutionBurstController.h"
#include "TestHarness.h"
#include "Utils.h"

@@ -109,14 +110,22 @@ static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& prepar
    }
    return result;
}
enum class Synchronously { NO, YES };
static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
        const sp<V1_0::IPreparedModel>&) {
    ADD_FAILURE() << "asking for burst execution at V1_0";
    return nullptr;
}
static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
        const sp<V1_2::IPreparedModel>& preparedModel) {
    return ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
}
enum class Executor { ASYNC, SYNC, BURST };
const float kDefaultAtol = 1e-5f;
const float kDefaultRtol = 1e-5f;
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExample>& examples,
                           bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
                           Synchronously sync, MeasureTiming measure, bool testDynamicOutputShape) {
        const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model, float fpAtol,
        float fpRtol, Executor executor, MeasureTiming measure, bool testDynamicOutputShape) {
    const uint32_t INPUT = 0;
    const uint32_t OUTPUT = 1;

@@ -209,18 +218,20 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
        inputMemory->commit();
        outputMemory->commit();

        const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};

        ErrorStatus executionStatus;
        hidl_vec<OutputShape> outputShapes;
        Timing timing;
        if (sync == Synchronously::NO) {
        switch (executor) {
            case Executor::ASYNC: {
                SCOPED_TRACE("asynchronous");

                // launch execution
                sp<ExecutionCallback> executionCallback = new ExecutionCallback();
                ASSERT_NE(nullptr, executionCallback.get());
            Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
                    preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
                    measure, executionCallback);
                Return<ErrorStatus> executionLaunchStatus =
                        ExecutePreparedModel(preparedModel, request, measure, executionCallback);
                ASSERT_TRUE(executionLaunchStatus.isOk());
                EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));

@@ -229,15 +240,40 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
                executionStatus = executionCallback->getStatus();
                outputShapes = executionCallback->getOutputShapes();
                timing = executionCallback->getTiming();
        } else {

                break;
            }
            case Executor::SYNC: {
                SCOPED_TRACE("synchronous");

                // execute
                Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
                    preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
                    measure, &outputShapes, &timing);
                        preparedModel, request, measure, &outputShapes, &timing);
                ASSERT_TRUE(executionReturnStatus.isOk());
                executionStatus = static_cast<ErrorStatus>(executionReturnStatus);

                break;
            }
            case Executor::BURST: {
                SCOPED_TRACE("burst");

                // create burst
                const std::unique_ptr<::android::nn::ExecutionBurstController> controller =
                        CreateBurst(preparedModel);
                ASSERT_NE(nullptr, controller.get());

                // create memory keys
                std::vector<intptr_t> keys(request.pools.size());
                for (size_t i = 0; i < keys.size(); ++i) {
                    keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
                }

                // execute burst
                std::tie(executionStatus, outputShapes, timing) =
                        controller->compute(request, measure, keys);

                break;
            }
        }

        if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) {
@@ -285,11 +321,10 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
}
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExample>& examples,
                           bool hasRelaxedFloat32Model, Synchronously sync, MeasureTiming measure,
                           bool testDynamicOutputShape) {
        const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model,
        Executor executor, MeasureTiming measure, bool testDynamicOutputShape) {
    EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
                          kDefaultRtol, sync, measure, testDynamicOutputShape);
            kDefaultRtol, executor, measure, testDynamicOutputShape);
}

static void getPreparedModel(sp<PreparedModelCallback> callback,
@@ -345,8 +380,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c

    float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO,
                          MeasureTiming::NO, /*testDynamicOutputShape=*/false);
            /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Executor::ASYNC, MeasureTiming::NO,
            /*testDynamicOutputShape=*/false);
}

void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -392,7 +427,7 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c
    ASSERT_NE(nullptr, preparedModel.get());

    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO,
            model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Executor::ASYNC,
            MeasureTiming::NO, /*testDynamicOutputShape=*/false);
}

@@ -441,17 +476,23 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
    ASSERT_NE(nullptr, preparedModel.get());

    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16, Synchronously::NO,
                          MeasureTiming::NO, testDynamicOutputShape);
            model.relaxComputationFloat32toFloat16, Executor::ASYNC, MeasureTiming::NO,
            testDynamicOutputShape);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
            model.relaxComputationFloat32toFloat16, Executor::SYNC, MeasureTiming::NO,
            testDynamicOutputShape);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
            model.relaxComputationFloat32toFloat16, Executor::BURST, MeasureTiming::NO,
            testDynamicOutputShape);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16, Synchronously::YES,
                          MeasureTiming::NO, testDynamicOutputShape);
            model.relaxComputationFloat32toFloat16, Executor::ASYNC, MeasureTiming::YES,
            testDynamicOutputShape);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16, Synchronously::NO,
                          MeasureTiming::YES, testDynamicOutputShape);
            model.relaxComputationFloat32toFloat16, Executor::SYNC, MeasureTiming::YES,
            testDynamicOutputShape);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16, Synchronously::YES,
                          MeasureTiming::YES, testDynamicOutputShape);
            model.relaxComputationFloat32toFloat16, Executor::BURST, MeasureTiming::YES,
            testDynamicOutputShape);
}

}  // namespace generated_tests
+40 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"

#include "Callbacks.h"
#include "ExecutionBurstController.h"
#include "TestHarness.h"
#include "Utils.h"

@@ -112,6 +113,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
    };
    MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;

    // asynchronous
    {
        SCOPED_TRACE(message + " [execute_1_2]");

@@ -131,6 +133,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
        ASSERT_TRUE(badTiming(timing));
    }

    // synchronous
    {
        SCOPED_TRACE(message + " [executeSynchronously]");

@@ -144,6 +147,43 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
                });
        ASSERT_TRUE(executeStatus.isOk());
    }

    // burst
    {
        SCOPED_TRACE(message + " [burst]");

        // create burst
        std::unique_ptr<::android::nn::ExecutionBurstController> burst =
                ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
        ASSERT_NE(nullptr, burst.get());

        // create memory keys
        std::vector<intptr_t> keys(request.pools.size());
        for (size_t i = 0; i < keys.size(); ++i) {
            keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
        }

        // execute and verify
        ErrorStatus error;
        std::vector<OutputShape> outputShapes;
        Timing timing;
        std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
        EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
        EXPECT_EQ(outputShapes.size(), 0);
        EXPECT_TRUE(badTiming(timing));

        // additional burst testing
        if (request.pools.size() > 0) {
            // valid free
            burst->freeMemory(keys.front());

            // negative test: invalid free of unknown (blank) memory
            burst->freeMemory(intptr_t{});

            // negative test: double free of memory
            burst->freeMemory(keys.front());
        }
    }
}

// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,