Loading neuralnetworks/1.0/vts/functional/BasicTests.cpp +136 −0 Original line number Diff line number Diff line Loading @@ -18,8 +18,12 @@ #include "VtsHalNeuralnetworks.h" #include "1.0/Callbacks.h" namespace android::hardware::neuralnetworks::V1_0::vts::functional { using implementation::PreparedModelCallback; // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} Loading @@ -43,4 +47,136 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { EXPECT_TRUE(ret.isOk()); } // detect cycle TEST_P(NeuralnetworksHidlTest, CycleTest) { // opnd0 = TENSOR_FLOAT32 // model input // opnd1 = TENSOR_FLOAT32 // model input // opnd2 = INT32 // model input // opnd3 = ADD(opnd0, opnd4, opnd2) // opnd4 = ADD(opnd1, opnd3, opnd2) // opnd5 = ADD(opnd4, opnd0, opnd2) // model output // // +-----+ // | | // v | // 3 = ADD(0, 4, 2) | // | | // +----------+ | // | | // v | // 4 = ADD(1, 3, 2) | // | | // +----------------+ // | // | // +-------+ // | // v // 5 = ADD(4, 0, 2) const std::vector<Operand> operands = { { // operands[0] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 2, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[1] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 1, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[2] .type = OperandType::INT32, .dimensions = {}, .numberOfConsumers = 3, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[3] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 1, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[4] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 2, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[5] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 0, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_OUTPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, }; const std::vector<Operation> operations = { {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}}, {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}}, {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}}, }; const Model model = { .operands = operands, .operations = operations, .inputIndexes = {0, 1, 2}, .outputIndexes = {5}, .operandValues = {}, .pools = {}, }; // ensure that getSupportedOperations() checks model validity ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; Return<void> supportedOpsReturn = kDevice->getSupportedOperations( model, [&model, &supportedOpsErrorStatus](ErrorStatus status, const hidl_vec<bool>& supported) { supportedOpsErrorStatus = status; if (status == ErrorStatus::NONE) { ASSERT_EQ(supported.size(), model.operations.size()); } }); ASSERT_TRUE(supportedOpsReturn.isOk()); ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT); // ensure that prepareModel() checks model validity sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel(model, preparedModelCallback); ASSERT_TRUE(prepareLaunchReturn.isOk()); // Note that preparation can fail for reasons other than an // invalid model (invalid model should result in // INVALID_ARGUMENT) -- for example, perhaps not all // operations are supported, or perhaps the device hit some // kind of capacity limit. EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE); EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE); EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr); } } // namespace android::hardware::neuralnetworks::V1_0::vts::functional neuralnetworks/1.0/vts/functional/Utils.cpp +43 −0 Original line number Diff line number Diff line Loading @@ -29,7 +29,11 @@ #include <gtest/gtest.h> #include <algorithm> #include <cstring> #include <functional> #include <iostream> #include <map> #include <numeric> #include <vector> namespace android::hardware::neuralnetworks { Loading Loading @@ -172,6 +176,45 @@ std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& reques return outputBuffers; } uint32_t sizeOfData(V1_0::OperandType type) { switch (type) { case V1_0::OperandType::FLOAT32: case V1_0::OperandType::INT32: case V1_0::OperandType::UINT32: case V1_0::OperandType::TENSOR_FLOAT32: case V1_0::OperandType::TENSOR_INT32: return 4; case V1_0::OperandType::TENSOR_QUANT8_ASYMM: return 1; default: CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); return 0; } } static bool isTensor(V1_0::OperandType type) { switch (type) { case V1_0::OperandType::FLOAT32: case V1_0::OperandType::INT32: case V1_0::OperandType::UINT32: return false; case V1_0::OperandType::TENSOR_FLOAT32: case V1_0::OperandType::TENSOR_INT32: case V1_0::OperandType::TENSOR_QUANT8_ASYMM: return true; default: CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); return false; } } uint32_t sizeOfData(const V1_0::Operand& operand) { const uint32_t dataSize = sizeOfData(operand.type); if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0; return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize, std::multiplies<>{}); } std::string gtestCompliantName(std::string name) { // gtest test names must only contain alphanumeric characters std::replace_if( Loading Loading
neuralnetworks/1.0/vts/functional/BasicTests.cpp +136 −0 Original line number Diff line number Diff line Loading @@ -18,8 +18,12 @@ #include "VtsHalNeuralnetworks.h" #include "1.0/Callbacks.h" namespace android::hardware::neuralnetworks::V1_0::vts::functional { using implementation::PreparedModelCallback; // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} Loading @@ -43,4 +47,136 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { EXPECT_TRUE(ret.isOk()); } // detect cycle TEST_P(NeuralnetworksHidlTest, CycleTest) { // opnd0 = TENSOR_FLOAT32 // model input // opnd1 = TENSOR_FLOAT32 // model input // opnd2 = INT32 // model input // opnd3 = ADD(opnd0, opnd4, opnd2) // opnd4 = ADD(opnd1, opnd3, opnd2) // opnd5 = ADD(opnd4, opnd0, opnd2) // model output // // +-----+ // | | // v | // 3 = ADD(0, 4, 2) | // | | // +----------+ | // | | // v | // 4 = ADD(1, 3, 2) | // | | // +----------------+ // | // | // +-------+ // | // v // 5 = ADD(4, 0, 2) const std::vector<Operand> operands = { { // operands[0] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 2, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[1] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 1, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[2] .type = OperandType::INT32, .dimensions = {}, .numberOfConsumers = 3, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[3] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 1, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[4] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 2, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { // operands[5] .type = OperandType::TENSOR_FLOAT32, .dimensions = {1}, .numberOfConsumers = 0, .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_OUTPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, }; const std::vector<Operation> operations = { {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}}, {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}}, {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}}, }; const Model model = { .operands = operands, .operations = operations, .inputIndexes = {0, 1, 2}, .outputIndexes = {5}, .operandValues = {}, .pools = {}, }; // ensure that getSupportedOperations() checks model validity ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; Return<void> supportedOpsReturn = kDevice->getSupportedOperations( model, [&model, &supportedOpsErrorStatus](ErrorStatus status, const hidl_vec<bool>& supported) { supportedOpsErrorStatus = status; if (status == ErrorStatus::NONE) { ASSERT_EQ(supported.size(), model.operations.size()); } }); ASSERT_TRUE(supportedOpsReturn.isOk()); ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT); // ensure that prepareModel() checks model validity sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel(model, preparedModelCallback); ASSERT_TRUE(prepareLaunchReturn.isOk()); // Note that preparation can fail for reasons other than an // invalid model (invalid model should result in // INVALID_ARGUMENT) -- for example, perhaps not all // operations are supported, or perhaps the device hit some // kind of capacity limit. EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE); EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE); EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr); } } // namespace android::hardware::neuralnetworks::V1_0::vts::functional
neuralnetworks/1.0/vts/functional/Utils.cpp +43 −0 Original line number Diff line number Diff line Loading @@ -29,7 +29,11 @@ #include <gtest/gtest.h> #include <algorithm> #include <cstring> #include <functional> #include <iostream> #include <map> #include <numeric> #include <vector> namespace android::hardware::neuralnetworks { Loading Loading @@ -172,6 +176,45 @@ std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& reques return outputBuffers; } uint32_t sizeOfData(V1_0::OperandType type) { switch (type) { case V1_0::OperandType::FLOAT32: case V1_0::OperandType::INT32: case V1_0::OperandType::UINT32: case V1_0::OperandType::TENSOR_FLOAT32: case V1_0::OperandType::TENSOR_INT32: return 4; case V1_0::OperandType::TENSOR_QUANT8_ASYMM: return 1; default: CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); return 0; } } static bool isTensor(V1_0::OperandType type) { switch (type) { case V1_0::OperandType::FLOAT32: case V1_0::OperandType::INT32: case V1_0::OperandType::UINT32: return false; case V1_0::OperandType::TENSOR_FLOAT32: case V1_0::OperandType::TENSOR_INT32: case V1_0::OperandType::TENSOR_QUANT8_ASYMM: return true; default: CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); return false; } } uint32_t sizeOfData(const V1_0::Operand& operand) { const uint32_t dataSize = sizeOfData(operand.type); if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0; return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize, std::multiplies<>{}); } std::string gtestCompliantName(std::string name) { // gtest test names must only contain alphanumeric characters std::replace_if( Loading