Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f583928 authored by Michael Butler's avatar Michael Butler Committed by Gerrit Code Review
Browse files

Merge changes from topic "nnapi-timings"

* changes:
  Remove 'blocking' param from NN ResilientPreparedModel and *Buffer
  Cleanup NN callback error handling
  Add HIDL lifetime and protecting callback info to NN README
  Change NN canonical timings to nanoseconds -- hal
parents 85959e16 bf59946c
Loading
Loading
Loading
Loading
+24 −4
Original line number Diff line number Diff line
@@ -27,8 +27,31 @@
#include <nnapi/hal/ProtectCallback.h>
#include <nnapi/hal/TransferValue.h>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.

namespace android::hardware::neuralnetworks::V1_0::utils {

// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success,
// this function returns with the supported operations as indicated by a driver. On failure, this
// function returns with the appropriate nn::GeneralError.
nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
        ErrorStatus status, const hidl_vec<bool>& supportedOperations);

// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this
// function returns with a non-null nn::SharedPreparedModel with a feature level of
// nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate
// nn::GeneralError.
nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
        ErrorStatus status, const sp<IPreparedModel>& preparedModel);

// Converts the results of IDevice::execute* to the NN canonical format. On success, this function
// returns with an empty output shape vector and no timing information. On failure, this function
// returns with the appropriate nn::ExecutionError.
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
        ErrorStatus status);

// A HIDL callback class to receive the results of IDevice::prepareModel asynchronously.
class PreparedModelCallback final : public IPreparedModelCallback,
                                    public hal::utils::IProtectedCallback {
  public:
@@ -41,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback,
    Data get();

  private:
    void notifyInternal(Data result);

    hal::utils::TransferValue<Data> mData;
};

// A HIDL callback class to receive the results of IDevice::execute asynchronously.
class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
  public:
    using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
@@ -57,8 +79,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP
    Data get();

  private:
    void notifyInternal(Data result);

    hal::utils::TransferValue<Data> mData;
};

+4 −0
Original line number Diff line number Diff line
@@ -32,8 +32,12 @@
#include <string>
#include <vector>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.

namespace android::hardware::neuralnetworks::V1_0::utils {

// Class that adapts V1_0::IDevice to nn::IDevice.
class Device final : public nn::IDevice {
    struct PrivateConstructorTag {};

+7 −3
Original line number Diff line number Diff line
@@ -29,8 +29,12 @@
#include <utility>
#include <vector>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.

namespace android::hardware::neuralnetworks::V1_0::utils {

// Class that adapts V1_0::IPreparedModel to nn::IPreparedModel.
class PreparedModel final : public nn::IPreparedModel {
    struct PrivateConstructorTag {};

@@ -44,13 +48,13 @@ class PreparedModel final : public nn::IPreparedModel {
    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
            const nn::Request& request, nn::MeasureTiming measure,
            const nn::OptionalTimePoint& deadline,
            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
            const nn::OptionalDuration& loopTimeoutDuration) const override;

    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
            const nn::OptionalDuration& loopTimeoutDuration,
            const nn::OptionalDuration& timeoutDurationAfterFence) const override;

    std::any getUnderlyingResource() const override;

+23 −30
Original line number Diff line number Diff line
@@ -27,69 +27,62 @@
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <nnapi/hal/ProtectCallback.h>
#include <nnapi/hal/TransferValue.h>

#include <utility>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.

namespace android::hardware::neuralnetworks::V1_0::utils {
namespace {

nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
        const sp<IPreparedModel>& preparedModel) {
    return NN_TRY(utils::PreparedModel::create(preparedModel));
nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
        ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
    HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
    return supportedOperations;
}

nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
        ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
    return NN_TRY(PreparedModel::create(preparedModel));
}

}  // namespace
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
        ErrorStatus status) {
    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
    return {};
}

Return<void> PreparedModelCallback::notify(ErrorStatus status,
                                           const sp<IPreparedModel>& preparedModel) {
    if (status != ErrorStatus::NONE) {
        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
    } else if (preparedModel == nullptr) {
        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                       << "Returned preparedModel is nullptr");
    } else {
        notifyInternal(convertPreparedModel(preparedModel));
    }
    mData.put(prepareModelCallback(status, preparedModel));
    return Void();
}

void PreparedModelCallback::notifyAsDeadObject() {
    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
}

PreparedModelCallback::Data PreparedModelCallback::get() {
    return mData.take();
}

void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
    mData.put(std::move(result));
}

// ExecutionCallback methods begin here

Return<void> ExecutionCallback::notify(ErrorStatus status) {
    if (status != ErrorStatus::NONE) {
        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
    } else {
        notifyInternal({});
    }
    mData.put(executionCallback(status));
    return Void();
}

void ExecutionCallback::notifyAsDeadObject() {
    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
}

ExecutionCallback::Data ExecutionCallback::get() {
    return mData.take();
}

void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
    mData.put(std::move(result));
}

}  // namespace android::hardware::neuralnetworks::V1_0::utils
+17 −34
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <nnapi/hal/ProtectCallback.h>
#include <nnapi/hal/TransferValue.h>

#include <functional>
#include <memory>
@@ -38,27 +39,27 @@
#include <string>
#include <vector>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.

namespace android::hardware::neuralnetworks::V1_0::utils {
namespace {

nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) {
nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
                                                         const Capabilities& capabilities) {
    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
    return nn::convert(capabilities);
}

nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_0::IDevice* device) {
    CHECK(device != nullptr);

    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                                                 << "uninitialized";
    const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) {
        if (status != ErrorStatus::NONE) {
            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
            result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status);
        } else {
            result = nn::convert(capabilities);
        }
    };
    auto cb = hal::utils::CallbackValue(capabilitiesCallback);

    const auto ret = device->getCapabilities(cb);
    HANDLE_TRANSPORT_FAILURE(ret);

    return result;
    return cb.take();
}

}  // namespace
@@ -74,7 +75,7 @@ nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name
               << "V1_0::utils::Device::create must have non-null device";
    }

    auto capabilities = NN_TRY(initCapabilities(device.get()));
    auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));

    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
    return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
@@ -131,27 +132,12 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo

    const auto hidlModel = NN_TRY(convert(modelInShared));

    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                                                  << "uninitialized";
    auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
        if (status != ErrorStatus::NONE) {
            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
            result = NN_ERROR(canonical)
                     << "getSupportedOperations failed with " << toString(status);
        } else if (supportedOperations.size() != model.main.operations.size()) {
            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                     << "getSupportedOperations returned vector of size "
                     << supportedOperations.size() << " but expected "
                     << model.main.operations.size();
        } else {
            result = supportedOperations;
        }
    };
    auto cb = hal::utils::CallbackValue(supportedOperationsCallback);

    const auto ret = kDevice->getSupportedOperations(hidlModel, cb);
    HANDLE_TRANSPORT_FAILURE(ret);

    return result;
    return cb.take();
}

nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -170,10 +156,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(

    const auto ret = kDevice->prepareModel(hidlModel, cb);
    const auto status = HANDLE_TRANSPORT_FAILURE(ret);
    if (status != ErrorStatus::NONE) {
        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
        return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
    }
    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);

    return cb->get();
}
Loading