Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9479bd15 authored by Xusong Wang's avatar Xusong Wang Committed by android-build-merger
Browse files

Merge "Add 1.2 NN HAL: IPreparedModel & callbacks." am: ba400a29

am: 67480880

Change-Id: Ib67e0a61ed2a9b1f8fb3936253ab91e8aa193663
parents 45791a34 67480880
Loading
Loading
Loading
Loading
+34 −4
Original line number Original line Diff line number Diff line
/*
 * Copyright (C) 2018 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "Callbacks.h"
#include "Callbacks.h"
#include <android-base/logging.h>
#include <android-base/logging.h>


namespace android {
namespace android {
namespace hardware {
namespace hardware {
namespace neuralnetworks {
namespace neuralnetworks {
namespace V1_0 {
namespace V1_2 {
namespace implementation {
namespace implementation {


CallbackBase::CallbackBase() : mNotified(false) {}
CallbackBase::CallbackBase() : mNotified(false) {}
@@ -88,7 +104,15 @@ PreparedModelCallback::PreparedModelCallback() :
PreparedModelCallback::~PreparedModelCallback() {}
PreparedModelCallback::~PreparedModelCallback() {}


Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
                                           const sp<IPreparedModel>& preparedModel) {
                                           const sp<V1_0::IPreparedModel>& preparedModel) {
    mErrorStatus = errorStatus;
    mPreparedModel = preparedModel;
    CallbackBase::notify();
    return Void();
}

Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
                                               const sp<V1_2::IPreparedModel>& preparedModel) {
    mErrorStatus = errorStatus;
    mErrorStatus = errorStatus;
    mPreparedModel = preparedModel;
    mPreparedModel = preparedModel;
    CallbackBase::notify();
    CallbackBase::notify();
@@ -100,7 +124,7 @@ ErrorStatus PreparedModelCallback::getStatus() {
    return mErrorStatus;
    return mErrorStatus;
}
}


sp<IPreparedModel> PreparedModelCallback::getPreparedModel() {
sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
    wait();
    wait();
    return mPreparedModel;
    return mPreparedModel;
}
}
@@ -115,13 +139,19 @@ Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
    return Void();
    return Void();
}
}


Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) {
    mErrorStatus = errorStatus;
    CallbackBase::notify();
    return Void();
}

ErrorStatus ExecutionCallback::getStatus() {
ErrorStatus ExecutionCallback::getStatus() {
    wait();
    wait();
    return mErrorStatus;
    return mErrorStatus;
}
}


}  // namespace implementation
}  // namespace implementation
}  // namespace V1_0
}  // namespace V1_2
}  // namespace neuralnetworks
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace hardware
}  // namespace android
}  // namespace android
+53 −30
Original line number Original line Diff line number Diff line
/*
 * Copyright (C) 2018 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H


#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <chrono>
#include <chrono>
#include <condition_variable>
#include <condition_variable>
#include <functional>
#include <functional>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <mutex>
#include <mutex>
#include <thread>
#include <thread>


namespace android {
namespace android {
namespace hardware {
namespace hardware {
namespace neuralnetworks {
namespace neuralnetworks {
namespace V1_0 {
namespace V1_2 {
namespace implementation {
namespace implementation {


using V1_0::ErrorStatus;

/**
/**
 * The CallbackBase class is used internally by the NeuralNetworks runtime to
 * The CallbackBase class is used internally by the NeuralNetworks runtime to
 * synchronize between different threads. An asynchronous task is launched
 * synchronize between different threads. An asynchronous task is launched
@@ -156,11 +176,11 @@ class CallbackBase {
 * asynchronously with respect to the runtime. If a calling thread calls wait*
 * asynchronously with respect to the runtime. If a calling thread calls wait*
 * or get* on a PreparedModelCallback object and the corresponding asynchronous
 * or get* on a PreparedModelCallback object and the corresponding asynchronous
 * task has not finished preparing the model, the calling thread will block
 * task has not finished preparing the model, the calling thread will block
 * until the asynchronous task has called notify. For more information on the
 * until the asynchronous task has either called notify or notify_1_2. For more
 * synchronization behavior, refer to the CallbackBase class.
 * information on the synchronization behavior, refer to the CallbackBase class.
 *
 *
 * This class inherits the basic blocking and signaling calls from
 * This class inherits the basic blocking and signaling calls from
 * CallbackBase, and implements the HIDL notify call from
 * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
 * IPreparedModelCallback. This callback object is passed as an argument to
 * IPreparedModelCallback. This callback object is passed as an argument to
 * IDevice::prepareModel.
 * IDevice::prepareModel.
 */
 */
@@ -170,15 +190,15 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
    ~PreparedModelCallback() override;
    ~PreparedModelCallback() override;


    /**
    /**
     * IPreparedModelCallback::notify marks the callback object with the return
     * IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
     * status of the asynchronous model preparation along with the prepared
     * mark the callback object with the return status of the asynchronous
     * model, and calls CallbackBase::notify, enabling all prior and future
     * model preparation along with the prepared model, and call
     * wait* calls on the PreparedModelCallback object to proceed. For more
     * CallbackBase::notify, enabling all prior and future wait* calls on the
     * information on the synchronization behavior, refer to the CallbackBase
     * PreparedModelCallback object to proceed. For more information on the
     * class.
     * synchronization behavior, refer to the CallbackBase class.
     *
     *
     * IPreparedModelCallback::notify must be called exactly once on a given
     * Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
     * PreparedModelCallback object.
     * must be called exactly once on a given PreparedModelCallback object.
     *
     *
     * @param status Error status returned from asynchronously preparing the
     * @param status Error status returned from asynchronously preparing the
     *               model; will be:
     *               model; will be:
@@ -189,7 +209,9 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
     * @param preparedModel Returned model that has been prepared for execution,
     * @param preparedModel Returned model that has been prepared for execution,
     *                      nullptr if the model was unable to be prepared.
     *                      nullptr if the model was unable to be prepared.
     */
     */
    Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
    Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
    Return<void> notify_1_2(ErrorStatus status,
                            const sp<V1_2::IPreparedModel>& preparedModel) override;


    /**
    /**
     * Retrieves the error status returned from the asynchronous task launched
     * Retrieves the error status returned from the asynchronous task launched
@@ -217,11 +239,11 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
     *                       execution, nullptr if the model was unable to be
     *                       execution, nullptr if the model was unable to be
     *                       prepared.
     *                       prepared.
     */
     */
    sp<IPreparedModel> getPreparedModel();
    sp<V1_0::IPreparedModel> getPreparedModel();


   private:
   private:
    ErrorStatus        mErrorStatus;
    ErrorStatus        mErrorStatus;
    sp<IPreparedModel> mPreparedModel;
    sp<V1_0::IPreparedModel> mPreparedModel;
};
};


/**
/**
@@ -229,12 +251,12 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
 * execution from a task executing asynchronously with respect to the runtime.
 * execution from a task executing asynchronously with respect to the runtime.
 * If a calling thread calls wait* or get* on a PreparedModelCallback object and
 * If a calling thread calls wait* or get* on a PreparedModelCallback object and
 * the corresponding asynchronous task has not finished the execution, the
 * the corresponding asynchronous task has not finished the execution, the
 * calling thread will block until the asynchronous task has called notify. For
 * calling thread will block until the asynchronous task has either called notify
 * more information on the synchronization behavior, refer to the CallbackBase
 * or notify_1_2. For more information on the synchronization behavior, refer to
 * class.
 * the CallbackBase class.
 *
 *
 * This class inherits the basic blocking and signaling calls from
 * This class inherits the basic blocking and signaling calls from
 * CallbackBase, and implements the HIDL notify call from
 * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
 * IExecutionCallback. This callback object is passed as an argument to
 * IExecutionCallback. This callback object is passed as an argument to
 * IPreparedModel::execute.
 * IPreparedModel::execute.
 */
 */
@@ -244,14 +266,14 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback {
    ~ExecutionCallback() override;
    ~ExecutionCallback() override;


    /**
    /**
     * IExecutionCallback::notify marks the callback object with the return
     * IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
     * status of the asynchronous execution that held this callback and enables
     * callback object with the return status of the asynchronous execution that
     * all prior and future wait* calls on the ExecutionCallback object to
     * held this callback and enable all prior and future wait* calls on the
     * proceed. For more information on the synchronization behavior, refer to
     * ExecutionCallback object to proceed. For more information on the
     * the CallbackBase class.
     * synchronization behavior, refer to the CallbackBase class.
     *
     *
     * IExecutionCallback::notify must be called exactly once on a given
     * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
     * ExecutionCallback object.
     * be called exactly once on a given ExecutionCallback object.
     *
     *
     * @param status Error status returned from asynchronously preparing the
     * @param status Error status returned from asynchronously preparing the
     *               model; will be:
     *               model; will be:
@@ -263,6 +285,7 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback {
     *               - INVALID_ARGUMENT if the input request is invalid
     *               - INVALID_ARGUMENT if the input request is invalid
     */
     */
    Return<void> notify(ErrorStatus status) override;
    Return<void> notify(ErrorStatus status) override;
    Return<void> notify_1_2(ErrorStatus status) override;


    /**
    /**
     * Retrieves the error status returned from the asynchronous task launched
     * Retrieves the error status returned from the asynchronous task launched
@@ -299,7 +322,7 @@ std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& t
}
}


}  // namespace implementation
}  // namespace implementation
}  // namespace V1_0
}  // namespace V1_2
}  // namespace neuralnetworks
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace hardware
}  // namespace android
}  // namespace android
+38 −8
Original line number Original line Diff line number Diff line
@@ -24,6 +24,11 @@
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <hidlmemory/mapping.h>
@@ -34,8 +39,8 @@ namespace hardware {
namespace neuralnetworks {
namespace neuralnetworks {


namespace generated_tests {
namespace generated_tests {
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::test_helper::bool8;
using ::test_helper::bool8;
using ::test_helper::compare;
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
@@ -73,7 +78,18 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr


// Top level driver for models and examples generated by test_generator.py
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
// Test driver for those generated from ml/nn/runtime/test/spec
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
                                                const Request& request,
                                                sp<ExecutionCallback>& callback) {
    return preparedModel->execute(request, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
                                                const Request& request,
                                                sp<ExecutionCallback>& callback) {
    return preparedModel->execute_1_2(request, callback);
}
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExample>& examples,
                           const std::vector<MixedTypedExample>& examples,
                           bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                           bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                           float fpRtol = 1e-5f) {
                           float fpRtol = 1e-5f) {
@@ -172,8 +188,9 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
        // launch execution
        // launch execution
        sp<ExecutionCallback> executionCallback = new ExecutionCallback();
        sp<ExecutionCallback> executionCallback = new ExecutionCallback();
        ASSERT_NE(nullptr, executionCallback.get());
        ASSERT_NE(nullptr, executionCallback.get());
        Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(
        Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
            {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionCallback);
            preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
            executionCallback);
        ASSERT_TRUE(executionLaunchStatus.isOk());
        ASSERT_TRUE(executionLaunchStatus.isOk());
        EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
        EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));


@@ -199,6 +216,16 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
    }
    }
}
}


static void getPreparedModel(sp<PreparedModelCallback> callback,
                             sp<V1_0::IPreparedModel>* preparedModel) {
    *preparedModel = callback->getPreparedModel();
}
static void getPreparedModel(sp<PreparedModelCallback> callback,
                             sp<V1_2::IPreparedModel>* preparedModel) {
    sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
    *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
}

void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
             std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
             std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
    V1_0::Model model = create_model();
    V1_0::Model model = create_model();
@@ -224,7 +251,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c
    // retrieve prepared model
    // retrieve prepared model
    preparedModelCallback->wait();
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
    sp<V1_0::IPreparedModel> preparedModel;
    getPreparedModel(preparedModelCallback, &preparedModel);


    // early termination if vendor service cannot fully prepare model
    // early termination if vendor service cannot fully prepare model
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -270,7 +298,8 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c
    // retrieve prepared model
    // retrieve prepared model
    preparedModelCallback->wait();
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
    sp<V1_0::IPreparedModel> preparedModel;
    getPreparedModel(preparedModelCallback, &preparedModel);


    // early termination if vendor service cannot fully prepare model
    // early termination if vendor service cannot fully prepare model
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -316,7 +345,8 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
    // retrieve prepared model
    // retrieve prepared model
    preparedModelCallback->wait();
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
    sp<V1_2::IPreparedModel> preparedModel;
    getPreparedModel(preparedModelCallback, &preparedModel);


    // early termination if vendor service cannot fully prepare model
    // early termination if vendor service cannot fully prepare model
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+2 −2
Original line number Original line Diff line number Diff line
@@ -40,8 +40,8 @@ namespace V1_0 {
namespace vts {
namespace vts {
namespace functional {
namespace functional {


using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
using ::test_helper::MixedTypedExample;


+2 −2
Original line number Original line Diff line number Diff line
@@ -27,8 +27,8 @@ namespace V1_0 {
namespace vts {
namespace vts {
namespace functional {
namespace functional {


using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;


///////////////////////// UTILITY FUNCTIONS /////////////////////////
///////////////////////// UTILITY FUNCTIONS /////////////////////////


Loading