Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a080f41 authored by Miao Wang's avatar Miao Wang Committed by Gerrit Code Review
Browse files

Merge changes from topics "replace_asymm", "fp16-op-add"

* changes:
  Replace TENSOR_QUANT16_ASYMM with TENSOR_QUANT16_SYMM
  Fix VTS ValidationTest for 1.2 ops.
  Adds float16 support to generated tests.
  Autogenerates VTS ValidationTest tests.
  Fix VTS ValidationTest for 1.2 ops.
  Separates VTS tests by HAL version.
parents 8f7b63db 217c4071
Loading
Loading
Loading
Loading
+21 −6
Original line number Diff line number Diff line
@@ -39,17 +39,14 @@ cc_library_static {
    ],
}

cc_test {
    name: "VtsHalNeuralnetworksV1_0TargetTest",
cc_defaults {
    name: "VtsHalNeuralNetworksTargetTestDefaults",
    defaults: ["VtsHalTargetTestDefaults"],
    srcs: [
        "BasicTests.cpp",
        "GeneratedTests.cpp",
        "ValidateModel.cpp",
        "ValidateRequest.cpp",
        "ValidationTests.cpp",
        "VtsHalNeuralnetworks.cpp",
    ],
    defaults: ["VtsHalTargetTestDefaults"],
    static_libs: [
        "android.hardware.neuralnetworks@1.0",
        "android.hardware.neuralnetworks@1.1",
@@ -66,4 +63,22 @@ cc_test {
        "libneuralnetworks_generated_test_harness_headers",
        "libneuralnetworks_generated_tests",
    ],
    // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
    // error in ld.gold.
    arch: {
        arm: {
            sanitize: {
                never: true,
            },
        },
    },
}

cc_test {
    name: "VtsHalNeuralnetworksV1_0TargetTest",
    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
    srcs: [
        "BasicTests.cpp",
        "GeneratedTests.cpp",
    ],
}
+20 −19
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ using ::test_helper::for_each;
using ::test_helper::Int32Operands;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::MixedTypedIndex;
using ::test_helper::Quant8Operands;
using ::test_helper::resize_accordingly;

@@ -63,14 +64,16 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr
    copy_back_<int32_t>(dst, ra, src);
    copy_back_<uint8_t>(dst, ra, src);
    copy_back_<int16_t>(dst, ra, src);
    static_assert(4 == std::tuple_size<MixedTyped>::value,
    copy_back_<_Float16>(dst, ra, src);
    static_assert(5 == std::tuple_size<MixedTyped>::value,
                  "Number of types in MixedTyped changed, but copy_back function wasn't updated");
}

// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExample>& examples, float fpAtol = 1e-5f,
                           const std::vector<MixedTypedExample>& examples,
                           bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                           float fpRtol = 1e-5f) {
    const uint32_t INPUT = 0;
    const uint32_t OUTPUT = 1;
@@ -78,13 +81,20 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
    int example_no = 1;
    for (auto& example : examples) {
        SCOPED_TRACE(example_no++);

        const MixedTyped& inputs = example.operands.first;
        const MixedTyped& golden = example.operands.second;

        const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty();
        if (hasRelaxedFloat32Model || hasFloat16Inputs) {
            // TODO: Adjust the error limit based on testing.
            // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
            fpAtol = 5.0f * 0.0009765625f;
            // Set the relative tolerance to be 5ULP of the corresponding FP precision.
            fpRtol = 5.0f * 0.0009765625f;
        }

        std::vector<RequestArgument> inputs_info, outputs_info;
        uint32_t inputSize = 0, outputSize = 0;

        // This function only partially specifies the metadata (vector of RequestArguments).
        // The contents are copied over below.
        for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
@@ -228,7 +238,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c
    ASSERT_NE(nullptr, preparedModel.get());

    float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol);
}

void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -272,13 +283,8 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c
    EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    ASSERT_NE(nullptr, preparedModel.get());

    // TODO: Adjust the error limit based on testing.
    // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
    float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
    // Set the relative tolerance to be 5ULP of the corresponding FP precision.
    float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
                                                           : 5.0f * 0.0009765625f;
    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16);
}

// TODO: Reduce code duplication.
@@ -323,13 +329,8 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
    EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    ASSERT_NE(nullptr, preparedModel.get());

    // TODO: Adjust the error limit based on testing.
    // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
    float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
    // Set the relative tolerance to be 5ULP of the corresponding FP precision.
    float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
                                                           : 5.0f * 0.0009765625f;
    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16);
}

}  // namespace generated_tests
+2 −0
Original line number Diff line number Diff line
@@ -45,6 +45,8 @@ using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCa
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;

std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);

// in frameworks/ml/nn/runtime/tests/generated/
#include "all_generated_V1_0_vts_tests.cpp"

+0 −200
Original line number Diff line number Diff line
/*
 * Copyright (C) 2018 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H

#define LOG_TAG "neuralnetworks_hidl_hal_test"

#include "TestHarness.h"

#include <android/hardware/neuralnetworks/1.0/types.h>

namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {

using MixedTypedExample = test_helper::MixedTypedExample;

#define FOR_EACH_TEST_MODEL(FN)                          \
    FN(add_broadcast_quant8)                             \
    FN(add)                                              \
    FN(add_quant8)                                       \
    FN(avg_pool_float_1)                                 \
    FN(avg_pool_float_2)                                 \
    FN(avg_pool_float_3)                                 \
    FN(avg_pool_float_4)                                 \
    FN(avg_pool_float_5)                                 \
    FN(avg_pool_quant8_1)                                \
    FN(avg_pool_quant8_2)                                \
    FN(avg_pool_quant8_3)                                \
    FN(avg_pool_quant8_4)                                \
    FN(avg_pool_quant8_5)                                \
    FN(concat_float_1)                                   \
    FN(concat_float_2)                                   \
    FN(concat_float_3)                                   \
    FN(concat_quant8_1)                                  \
    FN(concat_quant8_2)                                  \
    FN(concat_quant8_3)                                  \
    FN(conv_1_h3_w2_SAME)                                \
    FN(conv_1_h3_w2_VALID)                               \
    FN(conv_3_h3_w2_SAME)                                \
    FN(conv_3_h3_w2_VALID)                               \
    FN(conv_float_2)                                     \
    FN(conv_float_channels)                              \
    FN(conv_float_channels_weights_as_inputs)            \
    FN(conv_float_large)                                 \
    FN(conv_float_large_weights_as_inputs)               \
    FN(conv_float)                                       \
    FN(conv_float_weights_as_inputs)                     \
    FN(conv_quant8_2)                                    \
    FN(conv_quant8_channels)                             \
    FN(conv_quant8_channels_weights_as_inputs)           \
    FN(conv_quant8_large)                                \
    FN(conv_quant8_large_weights_as_inputs)              \
    FN(conv_quant8)                                      \
    FN(conv_quant8_overflow)                             \
    FN(conv_quant8_overflow_weights_as_inputs)           \
    FN(conv_quant8_weights_as_inputs)                    \
    FN(depth_to_space_float_1)                           \
    FN(depth_to_space_float_2)                           \
    FN(depth_to_space_float_3)                           \
    FN(depth_to_space_quant8_1)                          \
    FN(depth_to_space_quant8_2)                          \
    FN(depthwise_conv2d_float_2)                         \
    FN(depthwise_conv2d_float_large_2)                   \
    FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
    FN(depthwise_conv2d_float_large)                     \
    FN(depthwise_conv2d_float_large_weights_as_inputs)   \
    FN(depthwise_conv2d_float)                           \
    FN(depthwise_conv2d_float_weights_as_inputs)         \
    FN(depthwise_conv2d_quant8_2)                        \
    FN(depthwise_conv2d_quant8_large)                    \
    FN(depthwise_conv2d_quant8_large_weights_as_inputs)  \
    FN(depthwise_conv2d_quant8)                          \
    FN(depthwise_conv2d_quant8_weights_as_inputs)        \
    FN(depthwise_conv)                                   \
    FN(dequantize)                                       \
    FN(embedding_lookup)                                 \
    FN(floor)                                            \
    FN(fully_connected_float_2)                          \
    FN(fully_connected_float_large)                      \
    FN(fully_connected_float_large_weights_as_inputs)    \
    FN(fully_connected_float)                            \
    FN(fully_connected_float_weights_as_inputs)          \
    FN(fully_connected_quant8_2)                         \
    FN(fully_connected_quant8_large)                     \
    FN(fully_connected_quant8_large_weights_as_inputs)   \
    FN(fully_connected_quant8)                           \
    FN(fully_connected_quant8_weights_as_inputs)         \
    FN(hashtable_lookup_float)                           \
    FN(hashtable_lookup_quant8)                          \
    FN(l2_normalization_2)                               \
    FN(l2_normalization_large)                           \
    FN(l2_normalization)                                 \
    FN(l2_pool_float_2)                                  \
    FN(l2_pool_float_large)                              \
    FN(l2_pool_float)                                    \
    FN(local_response_norm_float_1)                      \
    FN(local_response_norm_float_2)                      \
    FN(local_response_norm_float_3)                      \
    FN(local_response_norm_float_4)                      \
    FN(logistic_float_1)                                 \
    FN(logistic_float_2)                                 \
    FN(logistic_quant8_1)                                \
    FN(logistic_quant8_2)                                \
    FN(lsh_projection_2)                                 \
    FN(lsh_projection)                                   \
    FN(lsh_projection_weights_as_inputs)                 \
    FN(lstm2)                                            \
    FN(lstm2_state2)                                     \
    FN(lstm2_state)                                      \
    FN(lstm3)                                            \
    FN(lstm3_state2)                                     \
    FN(lstm3_state3)                                     \
    FN(lstm3_state)                                      \
    FN(lstm)                                             \
    FN(lstm_state2)                                      \
    FN(lstm_state)                                       \
    FN(max_pool_float_1)                                 \
    FN(max_pool_float_2)                                 \
    FN(max_pool_float_3)                                 \
    FN(max_pool_float_4)                                 \
    FN(max_pool_quant8_1)                                \
    FN(max_pool_quant8_2)                                \
    FN(max_pool_quant8_3)                                \
    FN(max_pool_quant8_4)                                \
    FN(mobilenet_224_gender_basic_fixed)                 \
    FN(mobilenet_quantized)                              \
    FN(mul_broadcast_quant8)                             \
    FN(mul)                                              \
    FN(mul_quant8)                                       \
    FN(mul_relu)                                         \
    FN(relu1_float_1)                                    \
    FN(relu1_float_2)                                    \
    FN(relu1_quant8_1)                                   \
    FN(relu1_quant8_2)                                   \
    FN(relu6_float_1)                                    \
    FN(relu6_float_2)                                    \
    FN(relu6_quant8_1)                                   \
    FN(relu6_quant8_2)                                   \
    FN(relu_float_1)                                     \
    FN(relu_float_2)                                     \
    FN(relu_quant8_1)                                    \
    FN(relu_quant8_2)                                    \
    FN(reshape)                                          \
    FN(reshape_quant8)                                   \
    FN(reshape_quant8_weights_as_inputs)                 \
    FN(reshape_weights_as_inputs)                        \
    FN(resize_bilinear_2)                                \
    FN(resize_bilinear)                                  \
    FN(rnn)                                              \
    FN(rnn_state)                                        \
    FN(softmax_float_1)                                  \
    FN(softmax_float_2)                                  \
    FN(softmax_quant8_1)                                 \
    FN(softmax_quant8_2)                                 \
    FN(space_to_depth_float_1)                           \
    FN(space_to_depth_float_2)                           \
    FN(space_to_depth_float_3)                           \
    FN(space_to_depth_quant8_1)                          \
    FN(space_to_depth_quant8_2)                          \
    FN(svdf2)                                            \
    FN(svdf)                                             \
    FN(svdf_state)                                       \
    FN(tanh)

#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
    namespace function {                            \
    extern std::vector<MixedTypedExample> examples; \
    Model createTestModel();                        \
    }

FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)

#undef FORWARD_DECLARE_GENERATED_OBJECTS

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android

#endif  // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
+11 −30
Original line number Diff line number Diff line
@@ -14,40 +14,21 @@
// limitations under the License.
//

// Tests for V1_0 models using the V1_1 HAL.
cc_test {
    name: "VtsHalNeuralnetworksV1_1CompatV1_0TargetTest",
    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
    srcs: [
        "GeneratedTestsV1_0.cpp",
    ],
}

// Tests for V1_1 models.
cc_test {
    name: "VtsHalNeuralnetworksV1_1TargetTest",
    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
    srcs: [
        "BasicTests.cpp",
        "GeneratedTests.cpp",
        "ValidateModel.cpp",
        "ValidateRequest.cpp",
        "ValidationTests.cpp",
        "VtsHalNeuralnetworks.cpp",
    ],
    defaults: ["VtsHalTargetTestDefaults"],
    static_libs: [
        "android.hardware.neuralnetworks@1.0",
        "android.hardware.neuralnetworks@1.1",
        "android.hardware.neuralnetworks@1.2",
        "android.hidl.allocator@1.0",
        "android.hidl.memory@1.0",
        "libgmock",
        "libhidlmemory",
        "libneuralnetworks_utils",
        "VtsHalNeuralnetworksTest_utils",
    ],
    header_libs: [
        "libneuralnetworks_headers",
        "libneuralnetworks_generated_test_harness_headers",
        "libneuralnetworks_generated_tests",
    ],
    // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
    // error in ld.gold.
    arch: {
        arm: {
            sanitize: {
                never: true,
            },
        },
    },
}
Loading