Loading neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +52 −14 Original line number Original line Diff line number Diff line Loading @@ -74,7 +74,7 @@ namespace { enum class Executor { ASYNC, SYNC, BURST, FENCED }; enum class Executor { ASYNC, SYNC, BURST, FENCED }; enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE }; enum class MemoryType { SHARED, DEVICE }; enum class MemoryType { SHARED, DEVICE }; Loading Loading @@ -495,16 +495,18 @@ static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, cons static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, const Request& request, MeasureTiming measure, const Request& request, MeasureTiming measure, const OptionalTimeoutDuration& loopTimeoutDuration, sp<ExecutionCallback>& callback) { sp<ExecutionCallback>& callback) { return preparedModel->execute_1_3(request, measure, {}, {}, callback); return preparedModel->execute_1_3(request, measure, {}, loopTimeoutDuration, callback); } } static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, const Request& request, MeasureTiming measure, const Request& request, MeasureTiming measure, const OptionalTimeoutDuration& loopTimeoutDuration, hidl_vec<OutputShape>* outputShapes, hidl_vec<OutputShape>* outputShapes, Timing* timing) { Timing* timing) { ErrorStatus result; ErrorStatus result; Return<void> ret = preparedModel->executeSynchronously_1_3( Return<void> ret = preparedModel->executeSynchronously_1_3( request, measure, {}, {}, request, measure, {}, loopTimeoutDuration, [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes, [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes, const Timing& time) { const Timing& time) { result = error; result = error; Loading Loading @@ -545,6 +547,17 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& makeOutputInsufficientSize(/*outputIndex=*/0, &request); makeOutputInsufficientSize(/*outputIndex=*/0, &request); } } OptionalTimeoutDuration loopTimeoutDuration; // OutputType::MISSED_DEADLINE is only used by // TestKind::INTINITE_LOOP_TIMEOUT tests to verify that an infinite loop is // aborted after a timeout. if (testConfig.outputType == OutputType::MISSED_DEADLINE) { // Override the default loop timeout duration with a small value to // speed up test execution. constexpr uint64_t kMillisecond = 1'000'000; loopTimeoutDuration.nanoseconds(1 * kMillisecond); } ErrorStatus executionStatus; ErrorStatus executionStatus; hidl_vec<OutputShape> outputShapes; hidl_vec<OutputShape> outputShapes; Timing timing; Timing timing; Loading @@ -554,8 +567,9 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& // launch execution // launch execution sp<ExecutionCallback> executionCallback = new ExecutionCallback(); sp<ExecutionCallback> executionCallback = new ExecutionCallback(); Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel( Return<ErrorStatus> executionLaunchStatus = preparedModel, request, testConfig.measureTiming, executionCallback); ExecutePreparedModel(preparedModel, request, testConfig.measureTiming, loopTimeoutDuration, executionCallback); ASSERT_TRUE(executionLaunchStatus.isOk()); ASSERT_TRUE(executionLaunchStatus.isOk()); EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus)); EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus)); Loading @@ -571,8 +585,9 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& SCOPED_TRACE("synchronous"); SCOPED_TRACE("synchronous"); // execute // execute Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel( Return<ErrorStatus> executionReturnStatus = preparedModel, request, testConfig.measureTiming, &outputShapes, &timing); ExecutePreparedModel(preparedModel, request, testConfig.measureTiming, loopTimeoutDuration, &outputShapes, &timing); ASSERT_TRUE(executionReturnStatus.isOk()); ASSERT_TRUE(executionReturnStatus.isOk()); executionStatus = static_cast<ErrorStatus>(executionReturnStatus); executionStatus = static_cast<ErrorStatus>(executionReturnStatus); Loading Loading @@ -612,7 +627,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& hidl_handle syncFenceHandle; hidl_handle syncFenceHandle; sp<IFencedExecutionCallback> fencedCallback; sp<IFencedExecutionCallback> fencedCallback; Return<void> ret = preparedModel->executeFenced( Return<void> ret = preparedModel->executeFenced( request, {}, testConfig.measureTiming, {}, {}, {}, request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, [&result, &syncFenceHandle, &fencedCallback]( [&result, &syncFenceHandle, &fencedCallback]( ErrorStatus error, const hidl_handle& handle, ErrorStatus error, const hidl_handle& handle, const sp<IFencedExecutionCallback>& callback) { const sp<IFencedExecutionCallback>& callback) { Loading Loading @@ -686,6 +701,11 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_FALSE(outputShapes[0].isSufficient); ASSERT_FALSE(outputShapes[0].isSufficient); return; return; case OutputType::MISSED_DEADLINE: ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT || executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT) << "executionStatus = " << executionStatus; return; } } // Go through all outputs, check returned output shapes. // Go through all outputs, check returned output shapes. Loading Loading @@ -736,6 +756,12 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel"; LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel"; return; return; } break; } break; case TestKind::INTINITE_LOOP_TIMEOUT: { outputTypesList = {OutputType::MISSED_DEADLINE}; measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; // Burst does not support V1_3 loop timeout. executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED}; } break; } } for (const OutputType outputType : outputTypesList) { for (const OutputType outputType : outputTypesList) { Loading Loading @@ -794,7 +820,8 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind tes case TestKind::GENERAL: case TestKind::GENERAL: case TestKind::DYNAMIC_SHAPE: case TestKind::DYNAMIC_SHAPE: case TestKind::MEMORY_DOMAIN: case TestKind::MEMORY_DOMAIN: case TestKind::FENCED_COMPUTE: { case TestKind::FENCED_COMPUTE: case TestKind::INTINITE_LOOP_TIMEOUT: { createPreparedModel(device, model, &preparedModel); createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; if (preparedModel == nullptr) return; EvaluatePreparedModel(device, preparedModel, testModel, testKind); EvaluatePreparedModel(device, preparedModel, testModel, testKind); Loading Loading @@ -863,24 +890,31 @@ class FencedComputeTest : public GeneratedTest {}; // Tag for the dynamic output shape tests // Tag for the dynamic output shape tests class QuantizationCouplingTest : public GeneratedTest {}; class QuantizationCouplingTest : public GeneratedTest {}; // Tag for the loop timeout tests class InfiniteLoopTimeoutTest : public GeneratedTest {}; TEST_P(GeneratedTest, Test) { TEST_P(GeneratedTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL); Execute(kDevice, kTestModel, TestKind::GENERAL); } } TEST_P(DynamicOutputShapeTest, Test) { TEST_P(DynamicOutputShapeTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE); Execute(kDevice, kTestModel, TestKind::DYNAMIC_SHAPE); } } TEST_P(MemoryDomainTest, Test) { TEST_P(MemoryDomainTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::MEMORY_DOMAIN); Execute(kDevice, kTestModel, TestKind::MEMORY_DOMAIN); } } TEST_P(FencedComputeTest, Test) { TEST_P(FencedComputeTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::FENCED_COMPUTE); Execute(kDevice, kTestModel, TestKind::FENCED_COMPUTE); } } TEST_P(QuantizationCouplingTest, Test) { TEST_P(QuantizationCouplingTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING); Execute(kDevice, kTestModel, TestKind::QUANTIZATION_COUPLING); } TEST_P(InfiniteLoopTimeoutTest, Test) { Execute(kDevice, kTestModel, TestKind::INTINITE_LOOP_TIMEOUT); } } INSTANTIATE_GENERATED_TEST(GeneratedTest, INSTANTIATE_GENERATED_TEST(GeneratedTest, Loading @@ -900,4 +934,8 @@ INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testMod return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; }); }); INSTANTIATE_GENERATED_TEST(InfiniteLoopTimeoutTest, [](const TestModel& testModel) { return testModel.isInfiniteLoopTimeoutTest(); }); } // namespace android::hardware::neuralnetworks::V1_3::vts::functional } // namespace android::hardware::neuralnetworks::V1_3::vts::functional neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h +3 −1 Original line number Original line Diff line number Diff line Loading @@ -70,7 +70,9 @@ enum class TestKind { // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result // (OK/SKIPPED/FAILED) as the model with all such tensors converted to // (OK/SKIPPED/FAILED) as the model with all such tensors converted to // TENSOR_QUANT8_ASYMM_SIGNED. // TENSOR_QUANT8_ASYMM_SIGNED. QUANTIZATION_COUPLING QUANTIZATION_COUPLING, // Runs a test model and verifies that MISSED_DEADLINE_* is returned. INTINITE_LOOP_TIMEOUT }; }; void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel, void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel, Loading Loading
neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +52 −14 Original line number Original line Diff line number Diff line Loading @@ -74,7 +74,7 @@ namespace { enum class Executor { ASYNC, SYNC, BURST, FENCED }; enum class Executor { ASYNC, SYNC, BURST, FENCED }; enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE }; enum class MemoryType { SHARED, DEVICE }; enum class MemoryType { SHARED, DEVICE }; Loading Loading @@ -495,16 +495,18 @@ static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, cons static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, const Request& request, MeasureTiming measure, const Request& request, MeasureTiming measure, const OptionalTimeoutDuration& loopTimeoutDuration, sp<ExecutionCallback>& callback) { sp<ExecutionCallback>& callback) { return preparedModel->execute_1_3(request, measure, {}, {}, callback); return preparedModel->execute_1_3(request, measure, {}, loopTimeoutDuration, callback); } } static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel, const Request& request, MeasureTiming measure, const Request& request, MeasureTiming measure, const OptionalTimeoutDuration& loopTimeoutDuration, hidl_vec<OutputShape>* outputShapes, hidl_vec<OutputShape>* outputShapes, Timing* timing) { Timing* timing) { ErrorStatus result; ErrorStatus result; Return<void> ret = preparedModel->executeSynchronously_1_3( Return<void> ret = preparedModel->executeSynchronously_1_3( request, measure, {}, {}, request, measure, {}, loopTimeoutDuration, [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes, [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes, const Timing& time) { const Timing& time) { result = error; result = error; Loading Loading @@ -545,6 +547,17 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& makeOutputInsufficientSize(/*outputIndex=*/0, &request); makeOutputInsufficientSize(/*outputIndex=*/0, &request); } } OptionalTimeoutDuration loopTimeoutDuration; // OutputType::MISSED_DEADLINE is only used by // TestKind::INTINITE_LOOP_TIMEOUT tests to verify that an infinite loop is // aborted after a timeout. if (testConfig.outputType == OutputType::MISSED_DEADLINE) { // Override the default loop timeout duration with a small value to // speed up test execution. constexpr uint64_t kMillisecond = 1'000'000; loopTimeoutDuration.nanoseconds(1 * kMillisecond); } ErrorStatus executionStatus; ErrorStatus executionStatus; hidl_vec<OutputShape> outputShapes; hidl_vec<OutputShape> outputShapes; Timing timing; Timing timing; Loading @@ -554,8 +567,9 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& // launch execution // launch execution sp<ExecutionCallback> executionCallback = new ExecutionCallback(); sp<ExecutionCallback> executionCallback = new ExecutionCallback(); Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel( Return<ErrorStatus> executionLaunchStatus = preparedModel, request, testConfig.measureTiming, executionCallback); ExecutePreparedModel(preparedModel, request, testConfig.measureTiming, loopTimeoutDuration, executionCallback); ASSERT_TRUE(executionLaunchStatus.isOk()); ASSERT_TRUE(executionLaunchStatus.isOk()); EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus)); EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus)); Loading @@ -571,8 +585,9 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& SCOPED_TRACE("synchronous"); SCOPED_TRACE("synchronous"); // execute // execute Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel( Return<ErrorStatus> executionReturnStatus = preparedModel, request, testConfig.measureTiming, &outputShapes, &timing); ExecutePreparedModel(preparedModel, request, testConfig.measureTiming, loopTimeoutDuration, &outputShapes, &timing); ASSERT_TRUE(executionReturnStatus.isOk()); ASSERT_TRUE(executionReturnStatus.isOk()); executionStatus = static_cast<ErrorStatus>(executionReturnStatus); executionStatus = static_cast<ErrorStatus>(executionReturnStatus); Loading Loading @@ -612,7 +627,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& hidl_handle syncFenceHandle; hidl_handle syncFenceHandle; sp<IFencedExecutionCallback> fencedCallback; sp<IFencedExecutionCallback> fencedCallback; Return<void> ret = preparedModel->executeFenced( Return<void> ret = preparedModel->executeFenced( request, {}, testConfig.measureTiming, {}, {}, {}, request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, [&result, &syncFenceHandle, &fencedCallback]( [&result, &syncFenceHandle, &fencedCallback]( ErrorStatus error, const hidl_handle& handle, ErrorStatus error, const hidl_handle& handle, const sp<IFencedExecutionCallback>& callback) { const sp<IFencedExecutionCallback>& callback) { Loading Loading @@ -686,6 +701,11 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_FALSE(outputShapes[0].isSufficient); ASSERT_FALSE(outputShapes[0].isSufficient); return; return; case OutputType::MISSED_DEADLINE: ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT || executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT) << "executionStatus = " << executionStatus; return; } } // Go through all outputs, check returned output shapes. // Go through all outputs, check returned output shapes. Loading Loading @@ -736,6 +756,12 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel"; LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel"; return; return; } break; } break; case TestKind::INTINITE_LOOP_TIMEOUT: { outputTypesList = {OutputType::MISSED_DEADLINE}; measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; // Burst does not support V1_3 loop timeout. executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED}; } break; } } for (const OutputType outputType : outputTypesList) { for (const OutputType outputType : outputTypesList) { Loading Loading @@ -794,7 +820,8 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind tes case TestKind::GENERAL: case TestKind::GENERAL: case TestKind::DYNAMIC_SHAPE: case TestKind::DYNAMIC_SHAPE: case TestKind::MEMORY_DOMAIN: case TestKind::MEMORY_DOMAIN: case TestKind::FENCED_COMPUTE: { case TestKind::FENCED_COMPUTE: case TestKind::INTINITE_LOOP_TIMEOUT: { createPreparedModel(device, model, &preparedModel); createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; if (preparedModel == nullptr) return; EvaluatePreparedModel(device, preparedModel, testModel, testKind); EvaluatePreparedModel(device, preparedModel, testModel, testKind); Loading Loading @@ -863,24 +890,31 @@ class FencedComputeTest : public GeneratedTest {}; // Tag for the dynamic output shape tests // Tag for the dynamic output shape tests class QuantizationCouplingTest : public GeneratedTest {}; class QuantizationCouplingTest : public GeneratedTest {}; // Tag for the loop timeout tests class InfiniteLoopTimeoutTest : public GeneratedTest {}; TEST_P(GeneratedTest, Test) { TEST_P(GeneratedTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL); Execute(kDevice, kTestModel, TestKind::GENERAL); } } TEST_P(DynamicOutputShapeTest, Test) { TEST_P(DynamicOutputShapeTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE); Execute(kDevice, kTestModel, TestKind::DYNAMIC_SHAPE); } } TEST_P(MemoryDomainTest, Test) { TEST_P(MemoryDomainTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::MEMORY_DOMAIN); Execute(kDevice, kTestModel, TestKind::MEMORY_DOMAIN); } } TEST_P(FencedComputeTest, Test) { TEST_P(FencedComputeTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::FENCED_COMPUTE); Execute(kDevice, kTestModel, TestKind::FENCED_COMPUTE); } } TEST_P(QuantizationCouplingTest, Test) { TEST_P(QuantizationCouplingTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING); Execute(kDevice, kTestModel, TestKind::QUANTIZATION_COUPLING); } TEST_P(InfiniteLoopTimeoutTest, Test) { Execute(kDevice, kTestModel, TestKind::INTINITE_LOOP_TIMEOUT); } } INSTANTIATE_GENERATED_TEST(GeneratedTest, INSTANTIATE_GENERATED_TEST(GeneratedTest, Loading @@ -900,4 +934,8 @@ INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testMod return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; }); }); INSTANTIATE_GENERATED_TEST(InfiniteLoopTimeoutTest, [](const TestModel& testModel) { return testModel.isInfiniteLoopTimeoutTest(); }); } // namespace android::hardware::neuralnetworks::V1_3::vts::functional } // namespace android::hardware::neuralnetworks::V1_3::vts::functional
neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h +3 −1 Original line number Original line Diff line number Diff line Loading @@ -70,7 +70,9 @@ enum class TestKind { // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result // (OK/SKIPPED/FAILED) as the model with all such tensors converted to // (OK/SKIPPED/FAILED) as the model with all such tensors converted to // TENSOR_QUANT8_ASYMM_SIGNED. // TENSOR_QUANT8_ASYMM_SIGNED. QUANTIZATION_COUPLING QUANTIZATION_COUPLING, // Runs a test model and verifies that MISSED_DEADLINE_* is returned. INTINITE_LOOP_TIMEOUT }; }; void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel, void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel, Loading