Loading neuralnetworks/1.0/vts/functional/Callbacks.cpp +8 −1 Original line number Diff line number Diff line Loading @@ -139,8 +139,10 @@ Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) { return Void(); } Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) { Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes) { mErrorStatus = errorStatus; mOutputShapes = outputShapes; CallbackBase::notify(); return Void(); } Loading @@ -150,6 +152,11 @@ ErrorStatus ExecutionCallback::getStatus() { return mErrorStatus; } const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() { wait(); return mOutputShapes; } } // namespace implementation } // namespace V1_2 } // namespace neuralnetworks Loading neuralnetworks/1.0/vts/functional/Callbacks.h +58 −11 Original line number Diff line number Diff line Loading @@ -275,8 +275,9 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback { * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must * be called exactly once on a given ExecutionCallback object. * * @param status Error status returned from asynchronously preparing the * model; will be: * @param status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error Loading @@ -285,27 +286,73 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback { * - INVALID_ARGUMENT if the input request is invalid */ Return<void> notify(ErrorStatus status) override; Return<void> notify_1_2(ErrorStatus status) override; /** * Similar to IExecutionCallback::notify, but for V1_2::IPreparedModel to * also notify output shapes along with error status. * * @param status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error * - OUTPUT_INSUFFICIENT_SIZE if at least one output * operand buffer is not large enough to store the * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid * @param outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds to the index * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. */ Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes) override; /** * Retrieves the error status returned from the asynchronous task launched * by IPreparedModel::execute. If IPreparedModel::execute has not finished * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished * asynchronously executing, this call will block until the asynchronous task * notifies the object. * * @return status Error status returned from asynchronously preparing the * model; will be: * @return status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is * not large enough to store the resultant values * - INVALID_ARGUMENT if the input request is invalid * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error * - OUTPUT_INSUFFICIENT_SIZE if at least one output * operand buffer is not large enough to store the * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid */ ErrorStatus getStatus(); /** * Retrieves the output shapes returned from the asynchronous task launched * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished * asynchronously executing, this call will block until the asynchronous task * notifies the object. * * If the asynchronous task was launched by IPreparedModel::execute, an empty vector * will be returned. * * @return outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds to the index * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. */ const std::vector<OutputShape>& getOutputShapes(); private: ErrorStatus mErrorStatus; std::vector<OutputShape> mOutputShapes; }; Loading neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +28 −9 Original line number Diff line number Diff line Loading @@ -89,13 +89,24 @@ static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& prepar sp<ExecutionCallback>& callback) { return preparedModel->execute_1_2(request, callback); } static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&) { static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&, hidl_vec<OutputShape>*) { ADD_FAILURE() << "asking for synchronous execution at V1_0"; return ErrorStatus::GENERAL_FAILURE; } static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel, const Request& request) { return preparedModel->executeSynchronously(request); const Request& request, hidl_vec<OutputShape>* outputShapes) { ErrorStatus result; Return<void> ret = preparedModel->executeSynchronously( request, [&result, &outputShapes](ErrorStatus error, const hidl_vec<OutputShape>& shapes) { result = error; *outputShapes = shapes; }); if (!ret.isOk()) { return ErrorStatus::GENERAL_FAILURE; } return result; } enum class Synchronously { NO, YES }; const float kDefaultAtol = 1e-5f; Loading Loading @@ -197,6 +208,8 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo inputMemory->commit(); outputMemory->commit(); ErrorStatus executionStatus; hidl_vec<OutputShape> outputShapes; if (sync == Synchronously::NO) { SCOPED_TRACE("asynchronous"); Loading @@ -211,18 +224,24 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo // retrieve execution status executionCallback->wait(); ErrorStatus executionReturnStatus = executionCallback->getStatus(); EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); executionStatus = executionCallback->getStatus(); outputShapes = executionCallback->getOutputShapes(); } else { SCOPED_TRACE("synchronous"); // execute Return<ErrorStatus> executionStatus = ExecutePreparedModel( preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); ASSERT_TRUE(executionStatus.isOk()); EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionStatus)); Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel( preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, &outputShapes); ASSERT_TRUE(executionReturnStatus.isOk()); executionStatus = static_cast<ErrorStatus>(executionReturnStatus); } ASSERT_EQ(ErrorStatus::NONE, executionStatus); // TODO(xusongw): Check if the returned output shapes match with expectation once the // sample driver implementation of dynamic output shape is finished. ASSERT_EQ(outputShapes.size(), 0); // validate results outputMemory->read(); copy_back(&test, outputs_info, outputPtr); Loading neuralnetworks/1.2/Android.bp +1 −0 Original line number Diff line number Diff line Loading @@ -27,6 +27,7 @@ hidl_interface { "Operation", "OperationType", "OperationTypeRange", "OutputShape", ], gen_java: false, } Loading neuralnetworks/1.2/IExecutionCallback.hal +10 −3 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ package android.hardware.neuralnetworks@1.2; import @1.0::ErrorStatus; import @1.0::IExecutionCallback; import OutputShape; /** * IExecutionCallback must be used to return the error status result from an Loading @@ -39,10 +40,16 @@ interface IExecutionCallback extends @1.0::IExecutionCallback { * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is * not large enough to store the resultant values * - OUTPUT_INSUFFICIENT_SIZE if at least one output * operand buffer is not large enough to store the * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid * @param outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds with to index * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. */ oneway notify_1_2(ErrorStatus status); oneway notify_1_2(ErrorStatus status, vec<OutputShape> outputShapes); }; Loading
neuralnetworks/1.0/vts/functional/Callbacks.cpp +8 −1 Original line number Diff line number Diff line Loading @@ -139,8 +139,10 @@ Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) { return Void(); } Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) { Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes) { mErrorStatus = errorStatus; mOutputShapes = outputShapes; CallbackBase::notify(); return Void(); } Loading @@ -150,6 +152,11 @@ ErrorStatus ExecutionCallback::getStatus() { return mErrorStatus; } const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() { wait(); return mOutputShapes; } } // namespace implementation } // namespace V1_2 } // namespace neuralnetworks Loading
neuralnetworks/1.0/vts/functional/Callbacks.h +58 −11 Original line number Diff line number Diff line Loading @@ -275,8 +275,9 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback { * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must * be called exactly once on a given ExecutionCallback object. * * @param status Error status returned from asynchronously preparing the * model; will be: * @param status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error Loading @@ -285,27 +286,73 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback { * - INVALID_ARGUMENT if the input request is invalid */ Return<void> notify(ErrorStatus status) override; Return<void> notify_1_2(ErrorStatus status) override; /** * Similar to IExecutionCallback::notify, but for V1_2::IPreparedModel to * also notify output shapes along with error status. * * @param status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error * - OUTPUT_INSUFFICIENT_SIZE if at least one output * operand buffer is not large enough to store the * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid * @param outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds to the index * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. */ Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes) override; /** * Retrieves the error status returned from the asynchronous task launched * by IPreparedModel::execute. If IPreparedModel::execute has not finished * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished * asynchronously executing, this call will block until the asynchronous task * notifies the object. * * @return status Error status returned from asynchronously preparing the * model; will be: * @return status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is * not large enough to store the resultant values * - INVALID_ARGUMENT if the input request is invalid * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error * - OUTPUT_INSUFFICIENT_SIZE if at least one output * operand buffer is not large enough to store the * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid */ ErrorStatus getStatus(); /** * Retrieves the output shapes returned from the asynchronous task launched * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished * asynchronously executing, this call will block until the asynchronous task * notifies the object. * * If the asynchronous task was launched by IPreparedModel::execute, an empty vector * will be returned. * * @return outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds to the index * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. */ const std::vector<OutputShape>& getOutputShapes(); private: ErrorStatus mErrorStatus; std::vector<OutputShape> mOutputShapes; }; Loading
neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +28 −9 Original line number Diff line number Diff line Loading @@ -89,13 +89,24 @@ static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& prepar sp<ExecutionCallback>& callback) { return preparedModel->execute_1_2(request, callback); } static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&) { static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&, hidl_vec<OutputShape>*) { ADD_FAILURE() << "asking for synchronous execution at V1_0"; return ErrorStatus::GENERAL_FAILURE; } static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel, const Request& request) { return preparedModel->executeSynchronously(request); const Request& request, hidl_vec<OutputShape>* outputShapes) { ErrorStatus result; Return<void> ret = preparedModel->executeSynchronously( request, [&result, &outputShapes](ErrorStatus error, const hidl_vec<OutputShape>& shapes) { result = error; *outputShapes = shapes; }); if (!ret.isOk()) { return ErrorStatus::GENERAL_FAILURE; } return result; } enum class Synchronously { NO, YES }; const float kDefaultAtol = 1e-5f; Loading Loading @@ -197,6 +208,8 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo inputMemory->commit(); outputMemory->commit(); ErrorStatus executionStatus; hidl_vec<OutputShape> outputShapes; if (sync == Synchronously::NO) { SCOPED_TRACE("asynchronous"); Loading @@ -211,18 +224,24 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo // retrieve execution status executionCallback->wait(); ErrorStatus executionReturnStatus = executionCallback->getStatus(); EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); executionStatus = executionCallback->getStatus(); outputShapes = executionCallback->getOutputShapes(); } else { SCOPED_TRACE("synchronous"); // execute Return<ErrorStatus> executionStatus = ExecutePreparedModel( preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); ASSERT_TRUE(executionStatus.isOk()); EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionStatus)); Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel( preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, &outputShapes); ASSERT_TRUE(executionReturnStatus.isOk()); executionStatus = static_cast<ErrorStatus>(executionReturnStatus); } ASSERT_EQ(ErrorStatus::NONE, executionStatus); // TODO(xusongw): Check if the returned output shapes match with expectation once the // sample driver implementation of dynamic output shape is finished. ASSERT_EQ(outputShapes.size(), 0); // validate results outputMemory->read(); copy_back(&test, outputs_info, outputPtr); Loading
neuralnetworks/1.2/Android.bp +1 −0 Original line number Diff line number Diff line Loading @@ -27,6 +27,7 @@ hidl_interface { "Operation", "OperationType", "OperationTypeRange", "OutputShape", ], gen_java: false, } Loading
neuralnetworks/1.2/IExecutionCallback.hal +10 −3 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ package android.hardware.neuralnetworks@1.2; import @1.0::ErrorStatus; import @1.0::IExecutionCallback; import OutputShape; /** * IExecutionCallback must be used to return the error status result from an Loading @@ -39,10 +40,16 @@ interface IExecutionCallback extends @1.0::IExecutionCallback { * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is * not large enough to store the resultant values * - OUTPUT_INSUFFICIENT_SIZE if at least one output * operand buffer is not large enough to store the * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid * @param outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds with to index * of the output operand in the Request outputs vector. * outputShapes must be empty unless the status is either * NONE or OUTPUT_INSUFFICIENT_SIZE. */ oneway notify_1_2(ErrorStatus status); oneway notify_1_2(ErrorStatus status, vec<OutputShape> outputShapes); };