Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64f9eb4b authored by Lev Proleev's avatar Lev Proleev
Browse files

Revert "Modify NNAPI VTS tests to run on version 1.3"

This reverts commit 543606fb.

Reason for revert: the topic breaks git_qt-dev-plus-aosp

Change-Id: I74859bae687d65d344ed7edc617e1a7d086960a1
parent 543606fb
Loading
Loading
Loading
Loading
+1 −18
Original line number Diff line number Diff line
@@ -14,28 +14,12 @@
// limitations under the License.
//

cc_library_static {
    name: "VtsHalNeuralNetworksV1_2Callbacks",
    defaults: ["VtsHalTargetTestDefaults"],
    export_include_dirs: ["include"],
    srcs: [
        "Callbacks.cpp",
    ],
    static_libs: [
        "android.hardware.neuralnetworks@1.0",
        "android.hardware.neuralnetworks@1.1",
        "android.hardware.neuralnetworks@1.2",
    ],
    header_libs: [
        "libbase_headers",
    ]
}

cc_test {
    name: "VtsHalNeuralnetworksV1_2TargetTest",
    defaults: ["VtsHalTargetTestDefaults"],
    srcs: [
        "BasicTests.cpp",
        "Callbacks.cpp",
        "CompilationCachingTests.cpp",
        "GeneratedTestHarness.cpp",
        "TestAssertions.cpp",
@@ -61,7 +45,6 @@ cc_test {
        "libneuralnetworks_generated_test_harness",
        "libneuralnetworks_utils",
        "VtsHalNeuralNetworksV1_0_utils",
        "VtsHalNeuralNetworksV1_2Callbacks",
    ],
    whole_static_libs: [
        "neuralnetworks_generated_V1_0_example",
+0 −58
Original line number Diff line number Diff line
//
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

cc_test {
    name: "VtsHalNeuralNetworksV1_3TargetTest",
    defaults: ["VtsHalTargetTestDefaults"],
    srcs: [
        "BasicTests.cpp",
        "CompilationCachingTests.cpp",
        "GeneratedTestHarness.cpp",
        "TestAssertions.cpp",
        "ValidateBurst.cpp",
        "ValidateModel.cpp",
        "ValidateRequest.cpp",
        "VtsHalNeuralnetworks.cpp",
    ],
    shared_libs: [
        "libfmq",
        "libnativewindow",
    ],
    static_libs: [
        "android.hardware.neuralnetworks@1.0",
        "android.hardware.neuralnetworks@1.1",
        "android.hardware.neuralnetworks@1.2",
        "android.hardware.neuralnetworks@1.3",
        "android.hidl.allocator@1.0",
        "android.hidl.memory@1.0",
        "libgmock",
        "libhidlmemory",
        "libneuralnetworks_generated_test_harness",
        "libneuralnetworks_utils",
        "VtsHalNeuralNetworksV1_0_utils",
        "VtsHalNeuralNetworksV1_2Callbacks",
    ],
    whole_static_libs: [
        "neuralnetworks_generated_V1_0_example",
        "neuralnetworks_generated_V1_1_example",
        "neuralnetworks_generated_V1_2_example",
        "neuralnetworks_generated_V1_3_example",
    ],
    header_libs: [
        "libneuralnetworks_headers",
    ],
    test_suites: ["general-tests"],
}
+56 −6
Original line number Diff line number Diff line
@@ -18,14 +18,11 @@

#include "VtsHalNeuralnetworks.h"

namespace android::hardware::neuralnetworks::V1_3::vts::functional {
namespace android::hardware::neuralnetworks::V1_2::vts::functional {

using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
using V1_0::PerformanceInfo;
using V1_2::Constant;
using V1_2::DeviceType;
using V1_2::Extension;

// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -40,7 +37,7 @@ TEST_P(NeuralnetworksHidlTest, StatusTest) {
// initialization
TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
    using OperandPerformance = Capabilities::OperandPerformance;
    Return<void> ret = kDevice->getCapabilities_1_3([](ErrorStatus status,
    Return<void> ret = kDevice->getCapabilities_1_2([](ErrorStatus status,
                                                       const Capabilities& capabilities) {
        EXPECT_EQ(ErrorStatus::NONE, status);

@@ -61,4 +58,57 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
    });
    EXPECT_TRUE(ret.isOk());
}
}  // namespace android::hardware::neuralnetworks::V1_3::vts::functional

// device version test
TEST_P(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
    Return<void> ret =
            kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) {
                EXPECT_EQ(ErrorStatus::NONE, status);
                EXPECT_LT(0, version.size());
            });
    EXPECT_TRUE(ret.isOk());
}

// device type test
TEST_P(NeuralnetworksHidlTest, GetDeviceTypeTest) {
    Return<void> ret = kDevice->getType([](ErrorStatus status, DeviceType type) {
        EXPECT_EQ(ErrorStatus::NONE, status);
        EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
                    type == DeviceType::GPU || type == DeviceType::ACCELERATOR);
    });
    EXPECT_TRUE(ret.isOk());
}

// device supported extensions test
TEST_P(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
    Return<void> ret = kDevice->getSupportedExtensions(
            [](ErrorStatus status, const hidl_vec<Extension>& extensions) {
                EXPECT_EQ(ErrorStatus::NONE, status);
                for (auto& extension : extensions) {
                    std::string extensionName = extension.name;
                    EXPECT_FALSE(extensionName.empty());
                    for (char c : extensionName) {
                        EXPECT_TRUE(('a' <= c && c <= 'z') || ('0' <= c && c <= '9') || c == '_' ||
                                    c == '.')
                                << "Extension name contains an illegal character: " << c;
                    }
                    EXPECT_NE(extensionName.find('.'), std::string::npos)
                            << "Extension name must start with the reverse domain name of the "
                               "vendor";
                }
            });
    EXPECT_TRUE(ret.isOk());
}

// getNumberOfCacheFilesNeeded test
TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
    Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
            [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
                EXPECT_EQ(ErrorStatus::NONE, status);
                EXPECT_LE(numModelCache,
                          static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
                EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
            });
    EXPECT_TRUE(ret.isOk());
}
}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
+143 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2019 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "Callbacks"

#include "1.2/Callbacks.h"

#include <android-base/logging.h>

#include <limits>

namespace android::hardware::neuralnetworks::V1_2::implementation {

using V1_0::ErrorStatus;

constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
                              .timeInDriver = std::numeric_limits<uint64_t>::max()};

// PreparedModelCallback methods begin here

Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
                                           const sp<V1_0::IPreparedModel>& preparedModel) {
    {
        std::lock_guard<std::mutex> hold(mMutex);

        // quick-return if object has already been notified
        if (mNotified) {
            return Void();
        }

        // store results and mark as notified
        mErrorStatus = errorStatus;
        mPreparedModel = preparedModel;
        mNotified = true;
    }

    mCondition.notify_all();
    return Void();
}

Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
                                               const sp<V1_2::IPreparedModel>& preparedModel) {
    return notify(errorStatus, preparedModel);
}

void PreparedModelCallback::wait() const {
    std::unique_lock<std::mutex> lock(mMutex);
    mCondition.wait(lock, [this] { return mNotified; });
}

ErrorStatus PreparedModelCallback::getStatus() const {
    wait();
    return mErrorStatus;
}

sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const {
    wait();
    return mPreparedModel;
}

// ExecutionCallback methods begin here

Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
    notifyInternal(errorStatus, {}, kNoTiming);
    return Void();
}

Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
                                           const hidl_vec<OutputShape>& outputShapes,
                                           const Timing& timing) {
    if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
        // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE.
        if (outputShapes.size() == 0) {
            LOG(ERROR) << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
            notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
            return Void();
        }
    } else if (errorStatus != ErrorStatus::NONE) {
        // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE.
        if (outputShapes.size() != 0) {
            LOG(ERROR) << "Notified with non-empty output shape vector when error status is "
                          "neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
            notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
            return Void();
        }
    }
    notifyInternal(errorStatus, outputShapes, timing);
    return Void();
}

void ExecutionCallback::wait() const {
    std::unique_lock<std::mutex> lock(mMutex);
    mCondition.wait(lock, [this] { return mNotified; });
}

ErrorStatus ExecutionCallback::getStatus() const {
    wait();
    return mErrorStatus;
}

const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() const {
    wait();
    return mOutputShapes;
}

Timing ExecutionCallback::getTiming() const {
    wait();
    return mTiming;
}

void ExecutionCallback::notifyInternal(ErrorStatus errorStatus,
                                       const hidl_vec<OutputShape>& outputShapes,
                                       const Timing& timing) {
    {
        std::lock_guard<std::mutex> hold(mMutex);

        // quick-return if object has already been notified
        if (mNotified) {
            return;
        }

        mErrorStatus = errorStatus;
        mOutputShapes = outputShapes;
        mTiming = timing;
        mNotified = true;
    }
    mCondition.notify_all();
}

}  // namespace android::hardware::neuralnetworks::V1_2::implementation
+5 −8
Original line number Diff line number Diff line
@@ -45,15 +45,12 @@ namespace generated_tests::mobilenet_quantized {
const test_helper::TestModel& get_test_model();
}  // namespace generated_tests::mobilenet_quantized

namespace android::hardware::neuralnetworks::V1_3::vts::functional {
namespace android::hardware::neuralnetworks::V1_2::vts::functional {

using namespace test_helper;
using implementation::PreparedModelCallback;
using V1_0::ErrorStatus;
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::IPreparedModel;
using V1_2::OperationType;
using V1_2::implementation::PreparedModelCallback;

namespace float32_model {

@@ -305,7 +302,7 @@ class CompilationCachingTestBase : public testing::Test {
    // See if the service can handle the model.
    bool isModelFullySupported(const Model& model) {
        bool fullySupportsModel = false;
        Return<void> supportedCall = kDevice->getSupportedOperations_1_3(
        Return<void> supportedCall = kDevice->getSupportedOperations_1_2(
                model,
                [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
                    ASSERT_EQ(ErrorStatus::NONE, status);
@@ -326,7 +323,7 @@ class CompilationCachingTestBase : public testing::Test {
        sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
        hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
        Return<ErrorStatus> prepareLaunchStatus =
                kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER,
                kDevice->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER,
                                          modelCache, dataCache, cacheToken, preparedModelCallback);
        ASSERT_TRUE(prepareLaunchStatus.isOk());
        ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
@@ -1374,4 +1371,4 @@ INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
                                         testing::Range(0U, 10U)),
                        printCompilationCachingSecurityTest);

}  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
Loading