Loading neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h +4 −4 Original line number Original line Diff line number Diff line Loading @@ -59,13 +59,13 @@ class Device final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedBuffer> allocate( nn::GeneralResult<nn::SharedBuffer> allocate( Loading neuralnetworks/1.0/utils/src/Conversions.cpp +2 −4 Original line number Original line Diff line number Diff line Loading @@ -290,10 +290,8 @@ nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& ope } } nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) { nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) { const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size); return hidl_memory(memory.name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory.handle)), // Copy memory to force the native_handle_t to be copied. memory.size); auto copiedMemory = hidlMemory; return copiedMemory; } } nn::GeneralResult<Model> convert(const nn::Model& model) { nn::GeneralResult<Model> convert(const nn::Model& model) { Loading neuralnetworks/1.0/utils/src/Device.cpp +4 −4 Original line number Original line Diff line number Diff line Loading @@ -157,8 +157,8 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { // Ensure that model is ready for IPC. // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = const nn::Model& modelInShared = Loading @@ -181,8 +181,8 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( } } nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IDevice::prepareModelFromCache not supported on 1.0 HAL service"; << "IDevice::prepareModelFromCache not supported on 1.0 HAL service"; } } Loading neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h +4 −4 Original line number Original line Diff line number Diff line Loading @@ -59,13 +59,13 @@ class Device final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedBuffer> allocate( nn::GeneralResult<nn::SharedBuffer> allocate( Loading neuralnetworks/1.1/utils/src/Device.cpp +4 −4 Original line number Original line Diff line number Diff line Loading @@ -159,8 +159,8 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { // Ensure that model is ready for IPC. // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = const nn::Model& modelInShared = Loading @@ -184,8 +184,8 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( } } nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IDevice::prepareModelFromCache not supported on 1.1 HAL service"; << "IDevice::prepareModelFromCache not supported on 1.1 HAL service"; } } Loading Loading
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h +4 −4 Original line number Original line Diff line number Diff line Loading @@ -59,13 +59,13 @@ class Device final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedBuffer> allocate( nn::GeneralResult<nn::SharedBuffer> allocate( Loading
neuralnetworks/1.0/utils/src/Conversions.cpp +2 −4 Original line number Original line Diff line number Diff line Loading @@ -290,10 +290,8 @@ nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& ope } } nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) { nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) { const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size); return hidl_memory(memory.name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory.handle)), // Copy memory to force the native_handle_t to be copied. memory.size); auto copiedMemory = hidlMemory; return copiedMemory; } } nn::GeneralResult<Model> convert(const nn::Model& model) { nn::GeneralResult<Model> convert(const nn::Model& model) { Loading
neuralnetworks/1.0/utils/src/Device.cpp +4 −4 Original line number Original line Diff line number Diff line Loading @@ -157,8 +157,8 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { // Ensure that model is ready for IPC. // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = const nn::Model& modelInShared = Loading @@ -181,8 +181,8 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( } } nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IDevice::prepareModelFromCache not supported on 1.0 HAL service"; << "IDevice::prepareModelFromCache not supported on 1.0 HAL service"; } } Loading
neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h +4 −4 Original line number Original line Diff line number Diff line Loading @@ -59,13 +59,13 @@ class Device final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::NativeHandle>& dataCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const override; const nn::CacheToken& token) const override; nn::GeneralResult<nn::SharedBuffer> allocate( nn::GeneralResult<nn::SharedBuffer> allocate( Loading
neuralnetworks/1.1/utils/src/Device.cpp +4 −4 Original line number Original line Diff line number Diff line Loading @@ -159,8 +159,8 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { // Ensure that model is ready for IPC. // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = const nn::Model& modelInShared = Loading @@ -184,8 +184,8 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( } } nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IDevice::prepareModelFromCache not supported on 1.1 HAL service"; << "IDevice::prepareModelFromCache not supported on 1.1 HAL service"; } } Loading