Loading neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +21 −4 Original line number Original line Diff line number Diff line Loading @@ -32,6 +32,26 @@ namespace android::hardware::neuralnetworks::V1_0::utils { namespace android::hardware::neuralnetworks::V1_0::utils { // Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, // this function returns with the supported operations as indicated by a driver. On failure, this // function returns with the appropriate nn::GeneralError. nn::GeneralResult<std::vector<bool>> supportedOperationsCallback( ErrorStatus status, const hidl_vec<bool>& supportedOperations); // Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this // function returns with a non-null nn::SharedPreparedModel with a feature level of // nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate // nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const sp<IPreparedModel>& preparedModel); // Converts the results of IDevice::execute* to the NN canonical format. On success, this function // returns with an empty output shape vector and no timing information. On failure, this function // returns with the appropriate nn::ExecutionError. nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback( ErrorStatus status); // A HIDL callback class to receive the results of IDevice::prepareModel asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public hal::utils::IProtectedCallback { public: public: Loading @@ -44,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); Data get(); private: private: void notifyInternal(Data result); hal::utils::TransferValue<Data> mData; hal::utils::TransferValue<Data> mData; }; }; // A HIDL callback class to receive the results of IDevice::execute asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: public: using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>; using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>; Loading @@ -60,8 +79,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); Data get(); private: private: void notifyInternal(Data result); hal::utils::TransferValue<Data> mData; hal::utils::TransferValue<Data> mData; }; }; Loading neuralnetworks/1.0/utils/src/Callbacks.cpp +20 −30 Original line number Original line Diff line number Diff line Loading @@ -27,6 +27,7 @@ #include <nnapi/Result.h> #include <nnapi/Result.h> #include <nnapi/Types.h> #include <nnapi/Types.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/TransferValue.h> #include <nnapi/hal/TransferValue.h> Loading @@ -36,63 +37,52 @@ // lifetimes across processes and for protecting asynchronous calls across HIDL. // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { namespace android::hardware::neuralnetworks::V1_0::utils { namespace { nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel( nn::GeneralResult<std::vector<bool>> supportedOperationsCallback( const sp<IPreparedModel>& preparedModel) { ErrorStatus status, const hidl_vec<bool>& supportedOperations) { return NN_TRY(utils::PreparedModel::create(preparedModel)); HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); return supportedOperations; } } } // namespace nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const sp<IPreparedModel>& preparedModel) { HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return NN_TRY(PreparedModel::create(preparedModel)); } nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback( ErrorStatus status) { HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return {}; } Return<void> PreparedModelCallback::notify(ErrorStatus status, Return<void> PreparedModelCallback::notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) { const sp<IPreparedModel>& preparedModel) { if (status != ErrorStatus::NONE) { mData.put(prepareModelCallback(status, preparedModel)); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); } else if (preparedModel == nullptr) { notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned preparedModel is nullptr"); } else { notifyInternal(convertPreparedModel(preparedModel)); } return Void(); return Void(); } } void PreparedModelCallback::notifyAsDeadObject() { void PreparedModelCallback::notifyAsDeadObject() { notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } } PreparedModelCallback::Data PreparedModelCallback::get() { PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); return mData.take(); } } void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { mData.put(std::move(result)); } // ExecutionCallback methods begin here // ExecutionCallback methods begin here Return<void> ExecutionCallback::notify(ErrorStatus status) { Return<void> ExecutionCallback::notify(ErrorStatus status) { if (status != ErrorStatus::NONE) { mData.put(executionCallback(status)); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); } else { notifyInternal({}); } return Void(); return Void(); } } void ExecutionCallback::notifyAsDeadObject() { void ExecutionCallback::notifyAsDeadObject() { notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } } ExecutionCallback::Data ExecutionCallback::get() { ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); return mData.take(); } } void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { mData.put(std::move(result)); } } // namespace android::hardware::neuralnetworks::V1_0::utils } // namespace android::hardware::neuralnetworks::V1_0::utils neuralnetworks/1.0/utils/src/Device.cpp +14 −34 Original line number Original line Diff line number Diff line Loading @@ -31,6 +31,7 @@ #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/TransferValue.h> #include <functional> #include <functional> #include <memory> #include <memory> Loading @@ -44,24 +45,21 @@ namespace android::hardware::neuralnetworks::V1_0::utils { namespace android::hardware::neuralnetworks::V1_0::utils { namespace { namespace { nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) { nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status, const Capabilities& capabilities) { HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); return nn::convert(capabilities); } nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_0::IDevice* device) { CHECK(device != nullptr); CHECK(device != nullptr); nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) auto cb = hal::utils::CallbackValue(capabilitiesCallback); << "uninitialized"; const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { if (status != ErrorStatus::NONE) { const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status); } else { result = nn::convert(capabilities); } }; const auto ret = device->getCapabilities(cb); const auto ret = device->getCapabilities(cb); HANDLE_TRANSPORT_FAILURE(ret); HANDLE_TRANSPORT_FAILURE(ret); return result; return cb.take(); } } } // namespace } // namespace Loading @@ -77,7 +75,7 @@ nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name << "V1_0::utils::Device::create must have non-null device"; << "V1_0::utils::Device::create must have non-null device"; } } auto capabilities = NN_TRY(initCapabilities(device.get())); auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name), return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name), Loading Loading @@ -134,27 +132,12 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) auto cb = hal::utils::CallbackValue(supportedOperationsCallback); << "uninitialized"; auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) { if (status != ErrorStatus::NONE) { const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); result = NN_ERROR(canonical) << "getSupportedOperations failed with " << toString(status); } else if (supportedOperations.size() != model.main.operations.size()) { result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "getSupportedOperations returned vector of size " << supportedOperations.size() << " but expected " << model.main.operations.size(); } else { result = supportedOperations; } }; const auto ret = kDevice->getSupportedOperations(hidlModel, cb); const auto ret = kDevice->getSupportedOperations(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); HANDLE_TRANSPORT_FAILURE(ret); return result; return cb.take(); } } nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( Loading @@ -173,10 +156,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const auto ret = kDevice->prepareModel(hidlModel, cb); const auto ret = kDevice->prepareModel(hidlModel, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); const auto status = HANDLE_TRANSPORT_FAILURE(ret); if (status != ErrorStatus::NONE) { HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); } return cb->get(); return cb->get(); } } Loading neuralnetworks/1.0/utils/src/PreparedModel.cpp +2 −6 Original line number Original line Diff line number Diff line Loading @@ -42,8 +42,7 @@ namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( sp<V1_0::IPreparedModel> preparedModel) { sp<V1_0::IPreparedModel> preparedModel) { if (preparedModel == nullptr) { if (preparedModel == nullptr) { return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) return NN_ERROR() << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; } } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); Loading Loading @@ -71,10 +70,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); const auto status = HANDLE_TRANSPORT_FAILURE(ret); if (status != ErrorStatus::NONE) { HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); return NN_ERROR(canonical) << "execute failed with " << toString(status); } auto result = NN_TRY(cb->get()); auto result = NN_TRY(cb->get()); NN_TRY(hal::utils::makeExecutionFailure( NN_TRY(hal::utils::makeExecutionFailure( Loading neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h +4 −0 Original line number Original line Diff line number Diff line Loading @@ -51,6 +51,10 @@ nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities); nn::GeneralResult<Model> convert(const nn::Model& model); nn::GeneralResult<Model> convert(const nn::Model& model); nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference); nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference); nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus); nn::GeneralResult<V1_0::Request> convert(const nn::Request& request); nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status); } // namespace android::hardware::neuralnetworks::V1_1::utils } // namespace android::hardware::neuralnetworks::V1_1::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H Loading
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +21 −4 Original line number Original line Diff line number Diff line Loading @@ -32,6 +32,26 @@ namespace android::hardware::neuralnetworks::V1_0::utils { namespace android::hardware::neuralnetworks::V1_0::utils { // Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, // this function returns with the supported operations as indicated by a driver. On failure, this // function returns with the appropriate nn::GeneralError. nn::GeneralResult<std::vector<bool>> supportedOperationsCallback( ErrorStatus status, const hidl_vec<bool>& supportedOperations); // Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this // function returns with a non-null nn::SharedPreparedModel with a feature level of // nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate // nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const sp<IPreparedModel>& preparedModel); // Converts the results of IDevice::execute* to the NN canonical format. On success, this function // returns with an empty output shape vector and no timing information. On failure, this function // returns with the appropriate nn::ExecutionError. nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback( ErrorStatus status); // A HIDL callback class to receive the results of IDevice::prepareModel asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public hal::utils::IProtectedCallback { public: public: Loading @@ -44,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); Data get(); private: private: void notifyInternal(Data result); hal::utils::TransferValue<Data> mData; hal::utils::TransferValue<Data> mData; }; }; // A HIDL callback class to receive the results of IDevice::execute asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: public: using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>; using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>; Loading @@ -60,8 +79,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); Data get(); private: private: void notifyInternal(Data result); hal::utils::TransferValue<Data> mData; hal::utils::TransferValue<Data> mData; }; }; Loading
neuralnetworks/1.0/utils/src/Callbacks.cpp +20 −30 Original line number Original line Diff line number Diff line Loading @@ -27,6 +27,7 @@ #include <nnapi/Result.h> #include <nnapi/Result.h> #include <nnapi/Types.h> #include <nnapi/Types.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/TransferValue.h> #include <nnapi/hal/TransferValue.h> Loading @@ -36,63 +37,52 @@ // lifetimes across processes and for protecting asynchronous calls across HIDL. // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { namespace android::hardware::neuralnetworks::V1_0::utils { namespace { nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel( nn::GeneralResult<std::vector<bool>> supportedOperationsCallback( const sp<IPreparedModel>& preparedModel) { ErrorStatus status, const hidl_vec<bool>& supportedOperations) { return NN_TRY(utils::PreparedModel::create(preparedModel)); HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); return supportedOperations; } } } // namespace nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const sp<IPreparedModel>& preparedModel) { HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return NN_TRY(PreparedModel::create(preparedModel)); } nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback( ErrorStatus status) { HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return {}; } Return<void> PreparedModelCallback::notify(ErrorStatus status, Return<void> PreparedModelCallback::notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) { const sp<IPreparedModel>& preparedModel) { if (status != ErrorStatus::NONE) { mData.put(prepareModelCallback(status, preparedModel)); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); } else if (preparedModel == nullptr) { notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned preparedModel is nullptr"); } else { notifyInternal(convertPreparedModel(preparedModel)); } return Void(); return Void(); } } void PreparedModelCallback::notifyAsDeadObject() { void PreparedModelCallback::notifyAsDeadObject() { notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } } PreparedModelCallback::Data PreparedModelCallback::get() { PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); return mData.take(); } } void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { mData.put(std::move(result)); } // ExecutionCallback methods begin here // ExecutionCallback methods begin here Return<void> ExecutionCallback::notify(ErrorStatus status) { Return<void> ExecutionCallback::notify(ErrorStatus status) { if (status != ErrorStatus::NONE) { mData.put(executionCallback(status)); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); } else { notifyInternal({}); } return Void(); return Void(); } } void ExecutionCallback::notifyAsDeadObject() { void ExecutionCallback::notifyAsDeadObject() { notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } } ExecutionCallback::Data ExecutionCallback::get() { ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); return mData.take(); } } void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { mData.put(std::move(result)); } } // namespace android::hardware::neuralnetworks::V1_0::utils } // namespace android::hardware::neuralnetworks::V1_0::utils
neuralnetworks/1.0/utils/src/Device.cpp +14 −34 Original line number Original line Diff line number Diff line Loading @@ -31,6 +31,7 @@ #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/ProtectCallback.h> #include <nnapi/hal/TransferValue.h> #include <functional> #include <functional> #include <memory> #include <memory> Loading @@ -44,24 +45,21 @@ namespace android::hardware::neuralnetworks::V1_0::utils { namespace android::hardware::neuralnetworks::V1_0::utils { namespace { namespace { nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) { nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status, const Capabilities& capabilities) { HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); return nn::convert(capabilities); } nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_0::IDevice* device) { CHECK(device != nullptr); CHECK(device != nullptr); nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) auto cb = hal::utils::CallbackValue(capabilitiesCallback); << "uninitialized"; const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { if (status != ErrorStatus::NONE) { const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status); } else { result = nn::convert(capabilities); } }; const auto ret = device->getCapabilities(cb); const auto ret = device->getCapabilities(cb); HANDLE_TRANSPORT_FAILURE(ret); HANDLE_TRANSPORT_FAILURE(ret); return result; return cb.take(); } } } // namespace } // namespace Loading @@ -77,7 +75,7 @@ nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name << "V1_0::utils::Device::create must have non-null device"; << "V1_0::utils::Device::create must have non-null device"; } } auto capabilities = NN_TRY(initCapabilities(device.get())); auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name), return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name), Loading Loading @@ -134,27 +132,12 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) auto cb = hal::utils::CallbackValue(supportedOperationsCallback); << "uninitialized"; auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) { if (status != ErrorStatus::NONE) { const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); result = NN_ERROR(canonical) << "getSupportedOperations failed with " << toString(status); } else if (supportedOperations.size() != model.main.operations.size()) { result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "getSupportedOperations returned vector of size " << supportedOperations.size() << " but expected " << model.main.operations.size(); } else { result = supportedOperations; } }; const auto ret = kDevice->getSupportedOperations(hidlModel, cb); const auto ret = kDevice->getSupportedOperations(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); HANDLE_TRANSPORT_FAILURE(ret); return result; return cb.take(); } } nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( Loading @@ -173,10 +156,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const auto ret = kDevice->prepareModel(hidlModel, cb); const auto ret = kDevice->prepareModel(hidlModel, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); const auto status = HANDLE_TRANSPORT_FAILURE(ret); if (status != ErrorStatus::NONE) { HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); } return cb->get(); return cb->get(); } } Loading
neuralnetworks/1.0/utils/src/PreparedModel.cpp +2 −6 Original line number Original line Diff line number Diff line Loading @@ -42,8 +42,7 @@ namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( sp<V1_0::IPreparedModel> preparedModel) { sp<V1_0::IPreparedModel> preparedModel) { if (preparedModel == nullptr) { if (preparedModel == nullptr) { return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) return NN_ERROR() << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; } } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); Loading Loading @@ -71,10 +70,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); const auto status = HANDLE_TRANSPORT_FAILURE(ret); if (status != ErrorStatus::NONE) { HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); return NN_ERROR(canonical) << "execute failed with " << toString(status); } auto result = NN_TRY(cb->get()); auto result = NN_TRY(cb->get()); NN_TRY(hal::utils::makeExecutionFailure( NN_TRY(hal::utils::makeExecutionFailure( Loading
neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h +4 −0 Original line number Original line Diff line number Diff line Loading @@ -51,6 +51,10 @@ nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities); nn::GeneralResult<Model> convert(const nn::Model& model); nn::GeneralResult<Model> convert(const nn::Model& model); nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference); nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference); nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus); nn::GeneralResult<V1_0::Request> convert(const nn::Request& request); nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status); } // namespace android::hardware::neuralnetworks::V1_1::utils } // namespace android::hardware::neuralnetworks::V1_1::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H