Loading neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h +4 −0 Original line number Diff line number Diff line Loading @@ -48,6 +48,10 @@ class Burst final : public nn::IBurst { const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalDuration& loopTimeoutDuration) const override; private: const nn::SharedPreparedModel kPreparedModel; }; Loading neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h 0 → 100644 +64 −0 Original line number Diff line number Diff line /* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> #include <nnapi/IExecution.h> #include <nnapi/Result.h> #include <nnapi/Types.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/ProtectCallback.h> #include "PreparedModel.h" #include <memory> #include <utility> #include <vector> // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> { struct PrivateConstructorTag {}; public: static nn::GeneralResult<std::shared_ptr<const Execution>> create( std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation); Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation); nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute( const nn::OptionalTimePoint& deadline) const override; nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced( const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& timeoutDurationAfterFence) const override; private: const std::shared_ptr<const PreparedModel> kPreparedModel; const Request kRequest; const hal::utils::RequestRelocation kRelocation; }; } // namespace android::hardware::neuralnetworks::V1_0::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h +7 −0 Original line number Diff line number Diff line Loading @@ -57,10 +57,17 @@ class PreparedModel final : public nn::IPreparedModel, const nn::OptionalDuration& loopTimeoutDuration, const nn::OptionalDuration& timeoutDurationAfterFence) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; std::any getUnderlyingResource() const override; nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal( const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const; private: const sp<V1_0::IPreparedModel> kPreparedModel; const hal::utils::DeathHandler kDeathHandler; Loading neuralnetworks/1.0/utils/src/Burst.cpp +6 −0 Original line number Diff line number Diff line Loading @@ -55,4 +55,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); } nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalDuration& loopTimeoutDuration) const { return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration); } } // namespace android::hardware::neuralnetworks::V1_0::utils neuralnetworks/1.0/utils/src/Execution.cpp 0 → 100644 +72 −0 Original line number Diff line number Diff line /* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Execution.h" #include "Callbacks.h" #include "Conversions.h" #include "Utils.h" #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> #include <android/hardware/neuralnetworks/1.0/types.h> #include <nnapi/IExecution.h> #include <nnapi/Result.h> #include <nnapi/TypeUtils.h> #include <nnapi/Types.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/ProtectCallback.h> #include <memory> #include <utility> #include <vector> // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create( std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation) { if (preparedModel == nullptr) { return NN_ERROR() << "V1_0::utils::Execution::create must have non-null preparedModel"; } return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel), std::move(request), std::move(relocation)); } Execution::Execution(PrivateConstructorTag /*tag*/, std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation) : kPreparedModel(std::move(preparedModel)), kRequest(std::move(request)), kRelocation(std::move(relocation)) {} nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute( const nn::OptionalTimePoint& /*deadline*/) const { return kPreparedModel->executeInternal(kRequest, kRelocation); } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced( const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/, const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IExecution::computeFenced is not supported on 1.0 HAL service"; } } // namespace android::hardware::neuralnetworks::V1_0::utils Loading
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h +4 −0 Original line number Diff line number Diff line Loading @@ -48,6 +48,10 @@ class Burst final : public nn::IBurst { const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalDuration& loopTimeoutDuration) const override; private: const nn::SharedPreparedModel kPreparedModel; }; Loading
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h 0 → 100644 +64 −0 Original line number Diff line number Diff line /* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> #include <nnapi/IExecution.h> #include <nnapi/Result.h> #include <nnapi/Types.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/ProtectCallback.h> #include "PreparedModel.h" #include <memory> #include <utility> #include <vector> // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> { struct PrivateConstructorTag {}; public: static nn::GeneralResult<std::shared_ptr<const Execution>> create( std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation); Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation); nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute( const nn::OptionalTimePoint& deadline) const override; nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced( const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& timeoutDurationAfterFence) const override; private: const std::shared_ptr<const PreparedModel> kPreparedModel; const Request kRequest; const hal::utils::RequestRelocation kRelocation; }; } // namespace android::hardware::neuralnetworks::V1_0::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h +7 −0 Original line number Diff line number Diff line Loading @@ -57,10 +57,17 @@ class PreparedModel final : public nn::IPreparedModel, const nn::OptionalDuration& loopTimeoutDuration, const nn::OptionalDuration& timeoutDurationAfterFence) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; std::any getUnderlyingResource() const override; nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal( const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const; private: const sp<V1_0::IPreparedModel> kPreparedModel; const hal::utils::DeathHandler kDeathHandler; Loading
neuralnetworks/1.0/utils/src/Burst.cpp +6 −0 Original line number Diff line number Diff line Loading @@ -55,4 +55,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); } nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalDuration& loopTimeoutDuration) const { return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration); } } // namespace android::hardware::neuralnetworks::V1_0::utils
neuralnetworks/1.0/utils/src/Execution.cpp 0 → 100644 +72 −0 Original line number Diff line number Diff line /* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Execution.h" #include "Callbacks.h" #include "Conversions.h" #include "Utils.h" #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> #include <android/hardware/neuralnetworks/1.0/types.h> #include <nnapi/IExecution.h> #include <nnapi/Result.h> #include <nnapi/TypeUtils.h> #include <nnapi/Types.h> #include <nnapi/hal/CommonUtils.h> #include <nnapi/hal/HandleError.h> #include <nnapi/hal/ProtectCallback.h> #include <memory> #include <utility> #include <vector> // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create( std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation) { if (preparedModel == nullptr) { return NN_ERROR() << "V1_0::utils::Execution::create must have non-null preparedModel"; } return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel), std::move(request), std::move(relocation)); } Execution::Execution(PrivateConstructorTag /*tag*/, std::shared_ptr<const PreparedModel> preparedModel, Request request, hal::utils::RequestRelocation relocation) : kPreparedModel(std::move(preparedModel)), kRequest(std::move(request)), kRelocation(std::move(relocation)) {} nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute( const nn::OptionalTimePoint& /*deadline*/) const { return kPreparedModel->executeInternal(kRequest, kRelocation); } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced( const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/, const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IExecution::computeFenced is not supported on 1.0 HAL service"; } } // namespace android::hardware::neuralnetworks::V1_0::utils