Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe548fea authored by Michael Butler's avatar Michael Butler Committed by Automerger Merge Worker
Browse files

Implement partial canonical Burst in NN util code am: 95331510

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1554337

MUST ONLY BE SUBMITTED BY AUTOMERGER

Change-Id: Ifdded08bad42f3f0a61ccfe5cccfb21cdc72c633
parents 54c9f546 95331510
Loading
Loading
Loading
Loading
+55 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2020 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_BURST_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_BURST_H

#include <nnapi/IBurst.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>

#include <memory>
#include <optional>
#include <utility>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.

namespace android::hardware::neuralnetworks::V1_0::utils {

// Class that adapts nn::IPreparedModel to nn::IBurst.
class Burst final : public nn::IBurst {
    struct PrivateConstructorTag {};

  public:
    static nn::GeneralResult<std::shared_ptr<const Burst>> create(
            nn::SharedPreparedModel preparedModel);

    Burst(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel);

    OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;

    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
            const nn::Request& request, nn::MeasureTiming measure) const override;

  private:
    const nn::SharedPreparedModel kPreparedModel;
};

}  // namespace android::hardware::neuralnetworks::V1_0::utils

#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_BURST_H
+2 −1
Original line number Diff line number Diff line
@@ -35,7 +35,8 @@
namespace android::hardware::neuralnetworks::V1_0::utils {

// Class that adapts V1_0::IPreparedModel to nn::IPreparedModel.
class PreparedModel final : public nn::IPreparedModel {
class PreparedModel final : public nn::IPreparedModel,
                            public std::enable_shared_from_this<PreparedModel> {
    struct PrivateConstructorTag {};

  public:
+55 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2020 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "Burst.h"

#include <android-base/logging.h>
#include <nnapi/IBurst.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>

#include <memory>
#include <optional>
#include <utility>

namespace android::hardware::neuralnetworks::V1_0::utils {

nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
        nn::SharedPreparedModel preparedModel) {
    if (preparedModel == nullptr) {
        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
               << "V1_0::utils::Burst::create must have non-null preparedModel";
    }

    return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(preparedModel));
}

Burst::Burst(PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel)
    : kPreparedModel(std::move(preparedModel)) {
    CHECK(kPreparedModel != nullptr);
}

Burst::OptionalCacheHold Burst::cacheMemory(const nn::Memory& /*memory*/) const {
    return nullptr;
}

nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
        const nn::Request& request, nn::MeasureTiming measure) const {
    return kPreparedModel->execute(request, measure, {}, {});
}

}  // namespace android::hardware::neuralnetworks::V1_0::utils
+2 −1
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@

#include "PreparedModel.h"

#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
#include "Utils.h"
@@ -91,7 +92,7 @@ PreparedModel::executeFenced(const nn::Request& /*request*/,
}

nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Not yet implemented";
    return Burst::create(shared_from_this());
}

std::any PreparedModel::getUnderlyingResource() const {
+2 −1
Original line number Diff line number Diff line
@@ -36,7 +36,8 @@
namespace android::hardware::neuralnetworks::V1_2::utils {

// Class that adapts V1_2::IPreparedModel to nn::IPreparedModel.
class PreparedModel final : public nn::IPreparedModel {
class PreparedModel final : public nn::IPreparedModel,
                            public std::enable_shared_from_this<PreparedModel> {
    struct PrivateConstructorTag {};

  public:
Loading