Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af51663e authored by David Gross's avatar David Gross Committed by Michael Butler
Browse files

More tests for graph validation.

- detect cycle (CycleTest)
- detect bad execution order (mutateExecutionOrderTest)
- detect lifetime inconsistent with whether operand is written (mutateOperandLifeTimeTest)
- detect lifetime inconsistent with Model inputIndexes/outputIndexes (mutateOperandInputOutputTest)
- detect incorrect number of consumers (mutateOperandNumberOfConsumersTest)
- detect operand written multiple times (mutateOperandAddWriterTest)
- detect operand never written (mutateOperationRemoveWriteTest)

Bug: 66478689
Test: VtsHalNeuralnetworksV1_*TargetTest

Change-Id: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
parent dbbe40a1
Loading
Loading
Loading
Loading
+136 −0
Original line number Diff line number Diff line
@@ -18,8 +18,12 @@

#include "VtsHalNeuralnetworks.h"

#include "1.0/Callbacks.h"

namespace android::hardware::neuralnetworks::V1_0::vts::functional {

using implementation::PreparedModelCallback;

// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}

@@ -43,4 +47,136 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
    EXPECT_TRUE(ret.isOk());
}

// detect cycle
TEST_P(NeuralnetworksHidlTest, CycleTest) {
    // opnd0 = TENSOR_FLOAT32            // model input
    // opnd1 = TENSOR_FLOAT32            // model input
    // opnd2 = INT32                     // model input
    // opnd3 = ADD(opnd0, opnd4, opnd2)
    // opnd4 = ADD(opnd1, opnd3, opnd2)
    // opnd5 = ADD(opnd4, opnd0, opnd2)  // model output
    //
    //            +-----+
    //            |     |
    //            v     |
    // 3 = ADD(0, 4, 2) |
    // |                |
    // +----------+     |
    //            |     |
    //            v     |
    // 4 = ADD(1, 3, 2) |
    // |                |
    // +----------------+
    // |
    // |
    // +-------+
    //         |
    //         v
    // 5 = ADD(4, 0, 2)

    const std::vector<Operand> operands = {
            {
                    // operands[0]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 2,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_INPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[1]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 1,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_INPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[2]
                    .type = OperandType::INT32,
                    .dimensions = {},
                    .numberOfConsumers = 3,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_INPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[3]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 1,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[4]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 2,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[5]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 0,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_OUTPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
    };

    const std::vector<Operation> operations = {
            {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
            {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
            {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
    };

    const Model model = {
            .operands = operands,
            .operations = operations,
            .inputIndexes = {0, 1, 2},
            .outputIndexes = {5},
            .operandValues = {},
            .pools = {},
    };

    // ensure that getSupportedOperations() checks model validity
    ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
    Return<void> supportedOpsReturn = kDevice->getSupportedOperations(
            model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
                                                      const hidl_vec<bool>& supported) {
                supportedOpsErrorStatus = status;
                if (status == ErrorStatus::NONE) {
                    ASSERT_EQ(supported.size(), model.operations.size());
                }
            });
    ASSERT_TRUE(supportedOpsReturn.isOk());
    ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);

    // ensure that prepareModel() checks model validity
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
    Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel(model, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchReturn.isOk());
    //     Note that preparation can fail for reasons other than an
    //     invalid model (invalid model should result in
    //     INVALID_ARGUMENT) -- for example, perhaps not all
    //     operations are supported, or perhaps the device hit some
    //     kind of capacity limit.
    EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
    EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
    EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
}

}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
+43 −0
Original line number Diff line number Diff line
@@ -29,7 +29,11 @@

#include <gtest/gtest.h>
#include <algorithm>
#include <cstring>
#include <functional>
#include <iostream>
#include <map>
#include <numeric>
#include <vector>

namespace android::hardware::neuralnetworks {
@@ -172,6 +176,45 @@ std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& reques
    return outputBuffers;
}

uint32_t sizeOfData(V1_0::OperandType type) {
    switch (type) {
        case V1_0::OperandType::FLOAT32:
        case V1_0::OperandType::INT32:
        case V1_0::OperandType::UINT32:
        case V1_0::OperandType::TENSOR_FLOAT32:
        case V1_0::OperandType::TENSOR_INT32:
            return 4;
        case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
            return 1;
        default:
            CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
            return 0;
    }
}

static bool isTensor(V1_0::OperandType type) {
    switch (type) {
        case V1_0::OperandType::FLOAT32:
        case V1_0::OperandType::INT32:
        case V1_0::OperandType::UINT32:
            return false;
        case V1_0::OperandType::TENSOR_FLOAT32:
        case V1_0::OperandType::TENSOR_INT32:
        case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
            return true;
        default:
            CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
            return false;
    }
}

uint32_t sizeOfData(const V1_0::Operand& operand) {
    const uint32_t dataSize = sizeOfData(operand.type);
    if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
    return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
                           std::multiplies<>{});
}

std::string gtestCompliantName(std::string name) {
    // gtest test names must only contain alphanumeric characters
    std::replace_if(
+468 −21

File changed.

Preview size limit exceeded, changes collapsed.

+22 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware_buffer.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <iosfwd>
#include <string>
@@ -108,6 +109,15 @@ inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
    vec->resize(vec->size() - 1);
}

// Assumes there is exactly one instance of the value in the vector.
template <typename Type>
inline void hidl_vec_remove(hidl_vec<Type>* vec, const Type& val) {
    CHECK(vec != nullptr);
    auto where = std::find(vec->begin(), vec->end(), val);
    ASSERT_NE(where, vec->end());
    hidl_vec_removeAt(vec, where - vec->begin());
}

template <typename Type>
inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
    CHECK(vec != nullptr);
@@ -117,6 +127,18 @@ inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
    return index;
}

// Returns the amount of space needed to store a value of the specified type.
//
// Aborts if the specified type is an extension type or OEM type.
uint32_t sizeOfData(V1_0::OperandType type);

// Returns the amount of space needed to store a value of the dimensions and
// type of this operand. For a non-extension, non-OEM tensor with unspecified
// rank or at least one unspecified dimension, returns zero.
//
// Aborts if the specified type is an extension type or OEM type.
uint32_t sizeOfData(const V1_0::Operand& operand);

template <typename Type>
using Named = std::pair<std::string, Type>;

+139 −0
Original line number Diff line number Diff line
@@ -18,10 +18,16 @@

#include "VtsHalNeuralnetworks.h"

#include "1.0/Callbacks.h"

namespace android::hardware::neuralnetworks::V1_1::vts::functional {

using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
using V1_0::Operand;
using V1_0::OperandLifeTime;
using V1_0::OperandType;
using V1_0::implementation::PreparedModelCallback;

// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -48,4 +54,137 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
    EXPECT_TRUE(ret.isOk());
}

// detect cycle
TEST_P(NeuralnetworksHidlTest, CycleTest) {
    // opnd0 = TENSOR_FLOAT32            // model input
    // opnd1 = TENSOR_FLOAT32            // model input
    // opnd2 = INT32                     // model input
    // opnd3 = ADD(opnd0, opnd4, opnd2)
    // opnd4 = ADD(opnd1, opnd3, opnd2)
    // opnd5 = ADD(opnd4, opnd0, opnd2)  // model output
    //
    //            +-----+
    //            |     |
    //            v     |
    // 3 = ADD(0, 4, 2) |
    // |                |
    // +----------+     |
    //            |     |
    //            v     |
    // 4 = ADD(1, 3, 2) |
    // |                |
    // +----------------+
    // |
    // |
    // +-------+
    //         |
    //         v
    // 5 = ADD(4, 0, 2)

    const std::vector<Operand> operands = {
            {
                    // operands[0]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 2,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_INPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[1]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 1,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_INPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[2]
                    .type = OperandType::INT32,
                    .dimensions = {},
                    .numberOfConsumers = 3,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_INPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[3]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 1,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[4]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 2,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
            {
                    // operands[5]
                    .type = OperandType::TENSOR_FLOAT32,
                    .dimensions = {1},
                    .numberOfConsumers = 0,
                    .scale = 0.0f,
                    .zeroPoint = 0,
                    .lifetime = OperandLifeTime::MODEL_OUTPUT,
                    .location = {.poolIndex = 0, .offset = 0, .length = 0},
            },
    };

    const std::vector<Operation> operations = {
            {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
            {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
            {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
    };

    const Model model = {
            .operands = operands,
            .operations = operations,
            .inputIndexes = {0, 1, 2},
            .outputIndexes = {5},
            .operandValues = {},
            .pools = {},
    };

    // ensure that getSupportedOperations_1_1() checks model validity
    ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
    Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_1(
            model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
                                                      const hidl_vec<bool>& supported) {
                supportedOpsErrorStatus = status;
                if (status == ErrorStatus::NONE) {
                    ASSERT_EQ(supported.size(), model.operations.size());
                }
            });
    ASSERT_TRUE(supportedOpsReturn.isOk());
    ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);

    // ensure that prepareModel_1_1() checks model validity
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
    Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_1(
            model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchReturn.isOk());
    //     Note that preparation can fail for reasons other than an
    //     invalid model (invalid model should result in
    //     INVALID_ARGUMENT) -- for example, perhaps not all
    //     operations are supported, or perhaps the device hit some
    //     kind of capacity limit.
    EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
    EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
    EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
}

}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
Loading