Loading neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +17 −5 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem namespace generated_tests { using ::android::hardware::neuralnetworks::V1_0::implementation::Event; using ::generated_tests::filter; using ::generated_tests::for_all; using ::generated_tests::for_each; using ::generated_tests::resize_accordingly; Loading @@ -44,6 +45,7 @@ using ::generated_tests::Quant8Operands; // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExampleType>& examples) { Model model = create_model(); sp<IPreparedModel> preparedModel; Loading Loading @@ -166,18 +168,28 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model, COPY_BACK(uint8_t); #undef COPY_BACK outputMemory->commit(); // Filter out don't cares MixedTyped filtered_golden; MixedTyped filtered_test; filter<float>(golden, &filtered_golden, is_ignored); filter<float>(test, &filtered_test, is_ignored); filter<int32_t>(golden, &filtered_golden, is_ignored); filter<int32_t>(test, &filtered_test, is_ignored); filter<uint8_t>(golden, &filtered_golden, is_ignored); filter<uint8_t>(test, &filtered_test, is_ignored); // We want "close-enough" results for float for_each<float>(golden, [&test](int index, auto& golden_float) { auto& test_float_operands = std::get<Float32Operands>(test); for_each<float>(filtered_golden, [&filtered_test](int index, auto& golden_float) { auto& test_float_operands = std::get<Float32Operands>(filtered_test); auto& test_float = test_float_operands[index]; for (unsigned int i = 0; i < golden_float.size(); i++) { SCOPED_TRACE(i); EXPECT_NEAR(golden_float[i], test_float[i], 1.e-5); } }); EXPECT_EQ(std::get<Int32Operands>(golden), std::get<Int32Operands>(test)); EXPECT_EQ(std::get<Quant8Operands>(golden), std::get<Quant8Operands>(test)); EXPECT_EQ(std::get<Int32Operands>(filtered_golden), std::get<Int32Operands>(filtered_test)); EXPECT_EQ(std::get<Quant8Operands>(filtered_golden), std::get<Quant8Operands>(filtered_test)); } } Loading neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp +1 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,7 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_0::implementation::Event; using ::generated_tests::MixedTypedExampleType; namespace generated_tests { extern void Execute(const sp<IDevice>&, std::function<Model(void)>, extern void Execute(const sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); } Loading Loading
neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +17 −5 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem namespace generated_tests { using ::android::hardware::neuralnetworks::V1_0::implementation::Event; using ::generated_tests::filter; using ::generated_tests::for_all; using ::generated_tests::for_each; using ::generated_tests::resize_accordingly; Loading @@ -44,6 +45,7 @@ using ::generated_tests::Quant8Operands; // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model, std::function<bool(int)> is_ignored, const std::vector<MixedTypedExampleType>& examples) { Model model = create_model(); sp<IPreparedModel> preparedModel; Loading Loading @@ -166,18 +168,28 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model, COPY_BACK(uint8_t); #undef COPY_BACK outputMemory->commit(); // Filter out don't cares MixedTyped filtered_golden; MixedTyped filtered_test; filter<float>(golden, &filtered_golden, is_ignored); filter<float>(test, &filtered_test, is_ignored); filter<int32_t>(golden, &filtered_golden, is_ignored); filter<int32_t>(test, &filtered_test, is_ignored); filter<uint8_t>(golden, &filtered_golden, is_ignored); filter<uint8_t>(test, &filtered_test, is_ignored); // We want "close-enough" results for float for_each<float>(golden, [&test](int index, auto& golden_float) { auto& test_float_operands = std::get<Float32Operands>(test); for_each<float>(filtered_golden, [&filtered_test](int index, auto& golden_float) { auto& test_float_operands = std::get<Float32Operands>(filtered_test); auto& test_float = test_float_operands[index]; for (unsigned int i = 0; i < golden_float.size(); i++) { SCOPED_TRACE(i); EXPECT_NEAR(golden_float[i], test_float[i], 1.e-5); } }); EXPECT_EQ(std::get<Int32Operands>(golden), std::get<Int32Operands>(test)); EXPECT_EQ(std::get<Quant8Operands>(golden), std::get<Quant8Operands>(test)); EXPECT_EQ(std::get<Int32Operands>(filtered_golden), std::get<Int32Operands>(filtered_test)); EXPECT_EQ(std::get<Quant8Operands>(filtered_golden), std::get<Quant8Operands>(filtered_test)); } } Loading
neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp +1 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,7 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_0::implementation::Event; using ::generated_tests::MixedTypedExampleType; namespace generated_tests { extern void Execute(const sp<IDevice>&, std::function<Model(void)>, extern void Execute(const sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); } Loading