Loading neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +37 −17 Original line number Diff line number Diff line Loading @@ -626,21 +626,28 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& ErrorStatus result; hidl_handle syncFenceHandle; sp<IFencedExecutionCallback> fencedCallback; Return<void> ret = preparedModel->executeFenced( request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, [&result, &syncFenceHandle, &fencedCallback]( auto callbackFunc = [&result, &syncFenceHandle, &fencedCallback]( ErrorStatus error, const hidl_handle& handle, const sp<IFencedExecutionCallback>& callback) { result = error; syncFenceHandle = handle; fencedCallback = callback; }); }; Return<void> ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, callbackFunc); ASSERT_TRUE(ret.isOk()); if (result != ErrorStatus::NONE) { ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr); ASSERT_EQ(fencedCallback, nullptr); executionStatus = ErrorStatus::GENERAL_FAILURE; executionStatus = result; } else if (syncFenceHandle.getNativeHandle()) { // If a sync fence is returned, try start another run waiting for the sync fence. ret = preparedModel->executeFenced(request, {syncFenceHandle}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, callbackFunc); ASSERT_TRUE(ret.isOk()); ASSERT_EQ(result, ErrorStatus::NONE); waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]); } if (result == ErrorStatus::NONE) { Loading @@ -656,9 +663,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& } } // The driver is allowed to reject executeFenced, and if they do, we should skip. if ((testConfig.outputType != OutputType::FULLY_SPECIFIED || testConfig.executor == Executor::FENCED) && if (testConfig.outputType != OutputType::FULLY_SPECIFIED && executionStatus == ErrorStatus::GENERAL_FAILURE) { if (skipped != nullptr) { *skipped = true; Loading Loading @@ -691,12 +696,22 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& outputShapes.size() == testModel.main.outputIndexes.size()); break; case OutputType::UNSPECIFIED: if (testConfig.executor == Executor::FENCED) { // For Executor::FENCED, the output shape must be fully specified. ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus); return; } // If the model output operands are not fully specified, outputShapes must have // the same number of elements as the number of outputs. ASSERT_EQ(ErrorStatus::NONE, executionStatus); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); break; case OutputType::INSUFFICIENT: if (testConfig.executor == Executor::FENCED) { // For Executor::FENCED, the output shape must be fully specified. ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus); return; } ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_FALSE(outputShapes[0].isSufficient); Loading Loading @@ -739,12 +754,12 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& case TestKind::DYNAMIC_SHAPE: { outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT}; measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST}; executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED}; } break; case TestKind::MEMORY_DOMAIN: { outputTypesList = {OutputType::FULLY_SPECIFIED}; measureTimingList = {MeasureTiming::NO}; executorList = {Executor::ASYNC, Executor::SYNC}; executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED}; memoryType = MemoryType::DEVICE; } break; case TestKind::FENCED_COMPUTE: { Loading Loading @@ -921,8 +936,13 @@ INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel INSTANTIATE_GENERATED_TEST(MemoryDomainTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) { return !testModel.expectFailure && std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(), [&testModel](uint32_t index) { return testModel.main.operands[index].data.size() > 0; }); }); INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) { return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; Loading Loading
neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +37 −17 Original line number Diff line number Diff line Loading @@ -626,21 +626,28 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& ErrorStatus result; hidl_handle syncFenceHandle; sp<IFencedExecutionCallback> fencedCallback; Return<void> ret = preparedModel->executeFenced( request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, [&result, &syncFenceHandle, &fencedCallback]( auto callbackFunc = [&result, &syncFenceHandle, &fencedCallback]( ErrorStatus error, const hidl_handle& handle, const sp<IFencedExecutionCallback>& callback) { result = error; syncFenceHandle = handle; fencedCallback = callback; }); }; Return<void> ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, callbackFunc); ASSERT_TRUE(ret.isOk()); if (result != ErrorStatus::NONE) { ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr); ASSERT_EQ(fencedCallback, nullptr); executionStatus = ErrorStatus::GENERAL_FAILURE; executionStatus = result; } else if (syncFenceHandle.getNativeHandle()) { // If a sync fence is returned, try start another run waiting for the sync fence. ret = preparedModel->executeFenced(request, {syncFenceHandle}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, callbackFunc); ASSERT_TRUE(ret.isOk()); ASSERT_EQ(result, ErrorStatus::NONE); waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]); } if (result == ErrorStatus::NONE) { Loading @@ -656,9 +663,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& } } // The driver is allowed to reject executeFenced, and if they do, we should skip. if ((testConfig.outputType != OutputType::FULLY_SPECIFIED || testConfig.executor == Executor::FENCED) && if (testConfig.outputType != OutputType::FULLY_SPECIFIED && executionStatus == ErrorStatus::GENERAL_FAILURE) { if (skipped != nullptr) { *skipped = true; Loading Loading @@ -691,12 +696,22 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& outputShapes.size() == testModel.main.outputIndexes.size()); break; case OutputType::UNSPECIFIED: if (testConfig.executor == Executor::FENCED) { // For Executor::FENCED, the output shape must be fully specified. ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus); return; } // If the model output operands are not fully specified, outputShapes must have // the same number of elements as the number of outputs. ASSERT_EQ(ErrorStatus::NONE, executionStatus); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); break; case OutputType::INSUFFICIENT: if (testConfig.executor == Executor::FENCED) { // For Executor::FENCED, the output shape must be fully specified. ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus); return; } ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_FALSE(outputShapes[0].isSufficient); Loading Loading @@ -739,12 +754,12 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& case TestKind::DYNAMIC_SHAPE: { outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT}; measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST}; executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED}; } break; case TestKind::MEMORY_DOMAIN: { outputTypesList = {OutputType::FULLY_SPECIFIED}; measureTimingList = {MeasureTiming::NO}; executorList = {Executor::ASYNC, Executor::SYNC}; executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED}; memoryType = MemoryType::DEVICE; } break; case TestKind::FENCED_COMPUTE: { Loading Loading @@ -921,8 +936,13 @@ INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel INSTANTIATE_GENERATED_TEST(MemoryDomainTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) { return !testModel.expectFailure && std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(), [&testModel](uint32_t index) { return testModel.main.operands[index].data.size() > 0; }); }); INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) { return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; Loading