Loading neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +44 −24 Original line number Diff line number Diff line Loading @@ -51,14 +51,13 @@ using ::test_helper::for_each; using ::test_helper::Int32Operands; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; using ::test_helper::MixedTypedIndex; using ::test_helper::Quant8Operands; using ::test_helper::resize_accordingly; template <typename T> void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { MixedTyped& test = *dst; for_each<T>(test, [&ra, src](int index, std::vector<T>& m) { void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra, char* src) { for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) { ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T)); char* begin = src + ra[index].location.offset; memcpy(m.data(), begin, ra[index].location.length); Loading @@ -66,14 +65,14 @@ void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* s } void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { copy_back_<float>(dst, ra, src); copy_back_<int32_t>(dst, ra, src); copy_back_<uint8_t>(dst, ra, src); copy_back_<int16_t>(dst, ra, src); copy_back_<_Float16>(dst, ra, src); copy_back_<bool8>(dst, ra, src); copy_back_<int8_t>(dst, ra, src); static_assert(7 == std::tuple_size<MixedTyped>::value, copy_back_(&dst->float32Operands, ra, src); copy_back_(&dst->int32Operands, ra, src); copy_back_(&dst->quant8Operands, ra, src); copy_back_(&dst->quant16Operands, ra, src); copy_back_(&dst->float16Operands, ra, src); copy_back_(&dst->bool8Operands, ra, src); copy_back_(&dst->quant8ChannelOperands, ra, src); static_assert(7 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but copy_back function wasn't updated"); } Loading Loading @@ -115,7 +114,8 @@ template <typename T_IPreparedModel> void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model = false, float fpAtol = kDefaultAtol, float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO) { float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO, bool testDynamicOutputShape = false) { const uint32_t INPUT = 0; const uint32_t OUTPUT = 1; Loading @@ -125,7 +125,7 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo const MixedTyped& inputs = example.operands.first; const MixedTyped& golden = example.operands.second; const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty(); const bool hasFloat16Inputs = !inputs.float16Operands.empty(); if (hasRelaxedFloat32Model || hasFloat16Inputs) { // TODO: Adjust the error limit based on testing. // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. Loading Loading @@ -237,10 +237,24 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo executionStatus = static_cast<ErrorStatus>(executionReturnStatus); } if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) { LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " "execute model that it does not support."; std::cout << "[ ] Early termination of test because vendor service cannot " "execute model that it does not support." << std::endl; return; } ASSERT_EQ(ErrorStatus::NONE, executionStatus); // TODO(xusongw): Check if the returned output shapes match with expectation once the // sample driver implementation of dynamic output shape is finished. ASSERT_EQ(outputShapes.size(), 0); // Go through all outputs, overwrite output dimensions with returned output shapes if (testDynamicOutputShape) { ASSERT_NE(outputShapes.size(), 0); for_each<uint32_t>(test.operandDimensions, [&outputShapes](int idx, std::vector<uint32_t>& dim) { dim = outputShapes[idx].dimensions; }); } // validate results outputMemory->read(); Loading @@ -261,9 +275,10 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo template <typename T_IPreparedModel> void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model, Synchronously sync) { bool hasRelaxedFloat32Model, Synchronously sync, bool testDynamicOutputShape) { EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol, kDefaultRtol, sync); kDefaultRtol, sync, testDynamicOutputShape); } static void getPreparedModel(sp<PreparedModelCallback> callback, Loading Loading @@ -319,7 +334,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f; EvaluatePreparedModel(preparedModel, is_ignored, examples, /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol); /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO, /*testDynamicOutputShape=*/false); } void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model, Loading Loading @@ -365,12 +381,14 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c ASSERT_NE(nullptr, preparedModel.get()); EvaluatePreparedModel(preparedModel, is_ignored, examples, model.relaxComputationFloat32toFloat16); model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO, /*testDynamicOutputShape=*/false); } // TODO: Reduce code duplication. void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) { std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples, bool testDynamicOutputShape) { V1_2::Model model = create_model(); // see if service can handle model Loading Loading @@ -412,9 +430,11 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c ASSERT_NE(nullptr, preparedModel.get()); EvaluatePreparedModel(preparedModel, is_ignored, examples, model.relaxComputationFloat32toFloat16, Synchronously::NO); model.relaxComputationFloat32toFloat16, Synchronously::NO, testDynamicOutputShape); EvaluatePreparedModel(preparedModel, is_ignored, examples, model.relaxComputationFloat32toFloat16, Synchronously::YES); model.relaxComputationFloat32toFloat16, Synchronously::YES, testDynamicOutputShape); } } // namespace generated_tests Loading neuralnetworks/1.2/vts/functional/Android.bp +9 −0 Original line number Diff line number Diff line Loading @@ -21,6 +21,9 @@ cc_test { srcs: [ "GeneratedTestsV1_0.cpp", ], cflags: [ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE" ], test_suites: ["general-tests"], } Loading @@ -31,6 +34,9 @@ cc_test { srcs: [ "GeneratedTestsV1_1.cpp", ], cflags: [ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE" ], test_suites: ["general-tests"], } Loading @@ -42,5 +48,8 @@ cc_test { "BasicTests.cpp", "GeneratedTests.cpp", ], cflags: [ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE" ], test_suites: ["general-tests"], } neuralnetworks/1.2/vts/functional/GeneratedTests.cpp +2 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,8 @@ namespace neuralnetworks { namespace generated_tests { using ::test_helper::MixedTypedExample; extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExample>&); std::function<bool(int)>, const std::vector<MixedTypedExample>&, bool testDynamicOutputShape = false); } // namespace generated_tests namespace V1_2 { Loading neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp +2 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,8 @@ namespace neuralnetworks { namespace generated_tests { using ::test_helper::MixedTypedExample; extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExample>&); std::function<bool(int)>, const std::vector<MixedTypedExample>&, bool testDynamicOutputShape = false); } // namespace generated_tests namespace V1_2 { Loading neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp +2 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,8 @@ namespace neuralnetworks { namespace generated_tests { using ::test_helper::MixedTypedExample; extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExample>&); std::function<bool(int)>, const std::vector<MixedTypedExample>&, bool testDynamicOutputShape = false); } // namespace generated_tests namespace V1_2 { Loading Loading
neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +44 −24 Original line number Diff line number Diff line Loading @@ -51,14 +51,13 @@ using ::test_helper::for_each; using ::test_helper::Int32Operands; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; using ::test_helper::MixedTypedIndex; using ::test_helper::Quant8Operands; using ::test_helper::resize_accordingly; template <typename T> void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { MixedTyped& test = *dst; for_each<T>(test, [&ra, src](int index, std::vector<T>& m) { void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra, char* src) { for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) { ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T)); char* begin = src + ra[index].location.offset; memcpy(m.data(), begin, ra[index].location.length); Loading @@ -66,14 +65,14 @@ void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* s } void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { copy_back_<float>(dst, ra, src); copy_back_<int32_t>(dst, ra, src); copy_back_<uint8_t>(dst, ra, src); copy_back_<int16_t>(dst, ra, src); copy_back_<_Float16>(dst, ra, src); copy_back_<bool8>(dst, ra, src); copy_back_<int8_t>(dst, ra, src); static_assert(7 == std::tuple_size<MixedTyped>::value, copy_back_(&dst->float32Operands, ra, src); copy_back_(&dst->int32Operands, ra, src); copy_back_(&dst->quant8Operands, ra, src); copy_back_(&dst->quant16Operands, ra, src); copy_back_(&dst->float16Operands, ra, src); copy_back_(&dst->bool8Operands, ra, src); copy_back_(&dst->quant8ChannelOperands, ra, src); static_assert(7 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but copy_back function wasn't updated"); } Loading Loading @@ -115,7 +114,8 @@ template <typename T_IPreparedModel> void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model = false, float fpAtol = kDefaultAtol, float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO) { float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO, bool testDynamicOutputShape = false) { const uint32_t INPUT = 0; const uint32_t OUTPUT = 1; Loading @@ -125,7 +125,7 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo const MixedTyped& inputs = example.operands.first; const MixedTyped& golden = example.operands.second; const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty(); const bool hasFloat16Inputs = !inputs.float16Operands.empty(); if (hasRelaxedFloat32Model || hasFloat16Inputs) { // TODO: Adjust the error limit based on testing. // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. Loading Loading @@ -237,10 +237,24 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo executionStatus = static_cast<ErrorStatus>(executionReturnStatus); } if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) { LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " "execute model that it does not support."; std::cout << "[ ] Early termination of test because vendor service cannot " "execute model that it does not support." << std::endl; return; } ASSERT_EQ(ErrorStatus::NONE, executionStatus); // TODO(xusongw): Check if the returned output shapes match with expectation once the // sample driver implementation of dynamic output shape is finished. ASSERT_EQ(outputShapes.size(), 0); // Go through all outputs, overwrite output dimensions with returned output shapes if (testDynamicOutputShape) { ASSERT_NE(outputShapes.size(), 0); for_each<uint32_t>(test.operandDimensions, [&outputShapes](int idx, std::vector<uint32_t>& dim) { dim = outputShapes[idx].dimensions; }); } // validate results outputMemory->read(); Loading @@ -261,9 +275,10 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo template <typename T_IPreparedModel> void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model, Synchronously sync) { bool hasRelaxedFloat32Model, Synchronously sync, bool testDynamicOutputShape) { EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol, kDefaultRtol, sync); kDefaultRtol, sync, testDynamicOutputShape); } static void getPreparedModel(sp<PreparedModelCallback> callback, Loading Loading @@ -319,7 +334,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f; EvaluatePreparedModel(preparedModel, is_ignored, examples, /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol); /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO, /*testDynamicOutputShape=*/false); } void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model, Loading Loading @@ -365,12 +381,14 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c ASSERT_NE(nullptr, preparedModel.get()); EvaluatePreparedModel(preparedModel, is_ignored, examples, model.relaxComputationFloat32toFloat16); model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO, /*testDynamicOutputShape=*/false); } // TODO: Reduce code duplication. void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) { std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples, bool testDynamicOutputShape) { V1_2::Model model = create_model(); // see if service can handle model Loading Loading @@ -412,9 +430,11 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c ASSERT_NE(nullptr, preparedModel.get()); EvaluatePreparedModel(preparedModel, is_ignored, examples, model.relaxComputationFloat32toFloat16, Synchronously::NO); model.relaxComputationFloat32toFloat16, Synchronously::NO, testDynamicOutputShape); EvaluatePreparedModel(preparedModel, is_ignored, examples, model.relaxComputationFloat32toFloat16, Synchronously::YES); model.relaxComputationFloat32toFloat16, Synchronously::YES, testDynamicOutputShape); } } // namespace generated_tests Loading
neuralnetworks/1.2/vts/functional/Android.bp +9 −0 Original line number Diff line number Diff line Loading @@ -21,6 +21,9 @@ cc_test { srcs: [ "GeneratedTestsV1_0.cpp", ], cflags: [ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE" ], test_suites: ["general-tests"], } Loading @@ -31,6 +34,9 @@ cc_test { srcs: [ "GeneratedTestsV1_1.cpp", ], cflags: [ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE" ], test_suites: ["general-tests"], } Loading @@ -42,5 +48,8 @@ cc_test { "BasicTests.cpp", "GeneratedTests.cpp", ], cflags: [ "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE" ], test_suites: ["general-tests"], }
neuralnetworks/1.2/vts/functional/GeneratedTests.cpp +2 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,8 @@ namespace neuralnetworks { namespace generated_tests { using ::test_helper::MixedTypedExample; extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExample>&); std::function<bool(int)>, const std::vector<MixedTypedExample>&, bool testDynamicOutputShape = false); } // namespace generated_tests namespace V1_2 { Loading
neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp +2 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,8 @@ namespace neuralnetworks { namespace generated_tests { using ::test_helper::MixedTypedExample; extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExample>&); std::function<bool(int)>, const std::vector<MixedTypedExample>&, bool testDynamicOutputShape = false); } // namespace generated_tests namespace V1_2 { Loading
neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp +2 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,8 @@ namespace neuralnetworks { namespace generated_tests { using ::test_helper::MixedTypedExample; extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExample>&); std::function<bool(int)>, const std::vector<MixedTypedExample>&, bool testDynamicOutputShape = false); } // namespace generated_tests namespace V1_2 { Loading