Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 455b5b3c authored by Miao Wang's avatar Miao Wang Committed by android-build-merger
Browse files

Refactor NN API VTS tests and add v1.1 tests

am: 4862d612

Change-Id: If8f36f03eec01fe7522154890670faeee58a22db
parents f1912b0d 4862d612
Loading
Loading
Loading
Loading
+31 −4
Original line number Diff line number Diff line
@@ -14,22 +14,49 @@
// limitations under the License.
//

cc_test {
    name: "VtsHalNeuralnetworksV1_0TargetTest",
cc_library_static {
    name: "VtsHalNeuralnetworksTest_utils",
    srcs: [
        "Callbacks.cpp",
        "GeneratedTestHarness.cpp",
        "Models.cpp",
        "VtsHalNeuralnetworksV1_0TargetTest.cpp",
        "GeneratedTestHarness.cpp",
    ],
    defaults: ["VtsHalTargetTestDefaults"],
    export_include_dirs: ["."],
    static_libs: [
        "android.hardware.neuralnetworks@1.0",
        "android.hardware.neuralnetworks@1.1",
        "android.hidl.allocator@1.0",
        "android.hidl.memory@1.0",
        "libhidlmemory",
        "libneuralnetworks_utils",
    ],
    header_libs: [
        "libneuralnetworks_headers",
        "libneuralnetworks_generated_test_harness_headers",
        "libneuralnetworks_generated_tests",
    ],
}

cc_test {
    name: "VtsHalNeuralnetworksV1_0TargetTest",
    srcs: [
        "VtsHalNeuralnetworksV1_0.cpp",
        "VtsHalNeuralnetworksV1_0BasicTest.cpp",
        "VtsHalNeuralnetworksV1_0GeneratedTest.cpp",
    ],
    defaults: ["VtsHalTargetTestDefaults"],
    static_libs: [
        "android.hardware.neuralnetworks@1.0",
        "android.hardware.neuralnetworks@1.1",
        "android.hidl.allocator@1.0",
        "android.hidl.memory@1.0",
        "libhidlmemory",
        "libneuralnetworks_utils",
        "VtsHalNeuralnetworksTest_utils",
    ],
    header_libs: [
        "libneuralnetworks_headers",
        "libneuralnetworks_generated_test_harness_headers",
        "libneuralnetworks_generated_tests",
    ],
+110 −57
Original line number Diff line number Diff line
@@ -16,9 +16,15 @@

#include "Callbacks.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include "Utils.h"

#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <iostream>
@@ -26,11 +32,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {
// allocator helper
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem");

namespace generated_tests {
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
@@ -64,54 +65,10 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr

// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
             std::function<bool(int)> is_ignored,
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExampleType>& examples) {
    const uint32_t INPUT = 0;
    const uint32_t OUTPUT = 1;
    Model model = create_model();

    // see if service can handle model
    ErrorStatus supportedStatus;
    bool fullySupportsModel = false;
    Return<void> supportedCall = device->getSupportedOperations(
        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
            supportedStatus = status;
            ASSERT_NE(0ul, supported.size());
            fullySupportsModel =
                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
        });
    ASSERT_TRUE(supportedCall.isOk());
    ASSERT_EQ(ErrorStatus::NONE, supportedStatus);

    // launch prepare model
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());
    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
    if (fullySupportsModel) {
        EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    } else {
        EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
                    prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
    }

    // early termination if vendor service cannot fully prepare model
    if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
        ASSERT_EQ(nullptr, preparedModel.get());
        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Early termination of test because vendor service cannot "
                     "prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_NE(nullptr, preparedModel.get());

    int example_no = 1;
    for (auto& example : examples) {
@@ -167,8 +124,8 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
                offset += i.location.length;
            }
        }
        std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
                                          allocateSharedMemory(outputSize)};
        std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
                                          nn::allocateSharedMemory(outputSize)};
        ASSERT_NE(0ull, pools[INPUT].size());
        ASSERT_NE(0ull, pools[OUTPUT].size());

@@ -221,11 +178,107 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
    }
}

void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
             std::function<bool(int)> is_ignored,
             const std::vector<MixedTypedExampleType>& examples) {
    V1_0::Model model = create_model();

    // see if service can handle model
    bool fullySupportsModel = false;
    ErrorStatus supportedStatus;
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());

    Return<void> supportedCall = device->getSupportedOperations(
        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
            supportedStatus = status;
            ASSERT_NE(0ul, supported.size());
            fullySupportsModel =
                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
        });
    ASSERT_TRUE(supportedCall.isOk());
    ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
    if (fullySupportsModel) {
        EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    } else {
        EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
                    prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
    }

    // early termination if vendor service cannot fully prepare model
    if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
        ASSERT_EQ(nullptr, preparedModel.get());
        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Early termination of test because vendor service cannot "
                     "prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_NE(nullptr, preparedModel.get());

    EvaluatePreparedModel(preparedModel, is_ignored, examples);
}

void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
             std::function<bool(int)> is_ignored,
             const std::vector<MixedTypedExampleType>& examples) {
    V1_1::Model model = create_model();

    // see if service can handle model
    bool fullySupportsModel = false;
    ErrorStatus supportedStatus;
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());

    Return<void> supportedCall = device->getSupportedOperations_1_1(
        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
            supportedStatus = status;
            ASSERT_NE(0ul, supported.size());
            fullySupportsModel =
                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
        });
    ASSERT_TRUE(supportedCall.isOk());
    ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
    Return<ErrorStatus> prepareLaunchStatus =
        device->prepareModel_1_1(model, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
    if (fullySupportsModel) {
        EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    } else {
        EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
                    prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
    }

    // early termination if vendor service cannot fully prepare model
    if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
        ASSERT_EQ(nullptr, preparedModel.get());
        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Early termination of test because vendor service cannot "
                     "prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_NE(nullptr, preparedModel.get());

    EvaluatePreparedModel(preparedModel, is_ignored, examples);
}

}  // namespace generated_tests

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android
+24 −29
Original line number Diff line number Diff line
@@ -17,19 +17,22 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"

#include "Models.h"
#include "Utils.h"

#include <android-base/logging.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <vector>

using ::android::sp;

namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {

// create a valid model
Model createValidTestModel() {
V1_1::Model createValidTestModel_1_1() {
    const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
    const uint32_t size = operand2Data.size() * sizeof(float);

@@ -103,39 +106,34 @@ Model createValidTestModel() {
}

// create first invalid model
Model createInvalidTestModel1() {
    Model model = createValidTestModel();
V1_1::Model createInvalidTestModel1_1_1() {
    Model model = createValidTestModel_1_1();
    model.operations[0].type = static_cast<OperationType>(0xDEADBEEF); /* INVALID */
    return model;
}

// create second invalid model
Model createInvalidTestModel2() {
    Model model = createValidTestModel();
V1_1::Model createInvalidTestModel2_1_1() {
    Model model = createValidTestModel_1_1();
    const uint32_t operand1 = 0;
    const uint32_t operand5 = 4;  // INVALID OPERAND
    model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
    return model;
}

// allocator helper
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
    hidl_memory memory;

    sp<IAllocator> allocator = IAllocator::getService(type);
    if (!allocator.get()) {
        return {};
V1_0::Model createValidTestModel_1_0() {
    V1_1::Model model = createValidTestModel_1_1();
    return nn::convertToV1_0(model);
}

    Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
        ASSERT_TRUE(success);
        memory = mem;
    });
    if (!ret.isOk()) {
        return {};
V1_0::Model createInvalidTestModel1_1_0() {
    V1_1::Model model = createInvalidTestModel1_1_1();
    return nn::convertToV1_0(model);
}

    return memory;
V1_0::Model createInvalidTestModel2_1_0() {
    V1_1::Model model = createInvalidTestModel2_1_1();
    return nn::convertToV1_0(model);
}

// create a valid request
@@ -154,8 +152,8 @@ Request createValidTestRequest() {
    std::vector<RequestArgument> outputs = {{
        .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
    }};
    std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
                                      allocateSharedMemory(outputSize)};
    std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
                                      nn::allocateSharedMemory(outputSize)};
    if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
        return {};
    }
@@ -199,9 +197,6 @@ Request createInvalidTestRequest2() {
    return request;
}

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android
+13 −14
Original line number Diff line number Diff line
@@ -16,28 +16,27 @@

#define LOG_TAG "neuralnetworks_hidl_hal_test"

#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include <android/hardware/neuralnetworks/1.1/types.h>

namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {

// create the model
Model createValidTestModel();
Model createInvalidTestModel1();
Model createInvalidTestModel2();
// create V1_1 model
V1_1::Model createValidTestModel_1_1();
V1_1::Model createInvalidTestModel1_1_1();
V1_1::Model createInvalidTestModel2_1_1();

// create V1_0 model
V1_0::Model createValidTestModel_1_0();
V1_0::Model createInvalidTestModel1_1_0();
V1_0::Model createInvalidTestModel2_1_0();

// create the request
Request createValidTestRequest();
Request createInvalidTestRequest1();
Request createInvalidTestRequest2();
V1_0::Request createValidTestRequest();
V1_0::Request createInvalidTestRequest1();
V1_0::Request createInvalidTestRequest2();

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android
+73 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2018 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "neuralnetworks_hidl_hal_test"

#include "VtsHalNeuralnetworksV1_0.h"
#include "Utils.h"

#include <android-base/logging.h>

using ::android::hardware::hidl_memory;
using ::android::hidl::allocator::V1_0::IAllocator;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::sp;

namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {

// allocator helper
hidl_memory allocateSharedMemory(int64_t size) {
    return nn::allocateSharedMemory(size);
}

// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}

NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}

NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
    // This has to return a "new" object because it is freed inside
    // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
    static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
    return instance;
}

void NeuralnetworksHidlEnvironment::registerTestServices() {
    registerTestService<V1_0::IDevice>();
}

// The main test class for NEURALNETWORK HIDL HAL.
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}

void NeuralnetworksHidlTest::SetUp() {
    device = ::testing::VtsHalHidlTargetTestBase::getService<V1_0::IDevice>(
        NeuralnetworksHidlEnvironment::getInstance());
    ASSERT_NE(nullptr, device.get());
}

void NeuralnetworksHidlTest::TearDown() {}

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android
Loading