Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 782bb2c0 authored by David Gross's avatar David Gross Committed by Android (Google) Code Review
Browse files

Merge "Add @1.2::IPreparedModel::executeSynchronously() and corresponding VTS tests."

parents 3a405c59 49e41678
Loading
Loading
Loading
Loading
+48 −16
Original line number Diff line number Diff line
@@ -89,11 +89,22 @@ static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& prepar
                                                sp<ExecutionCallback>& callback) {
    return preparedModel->execute_1_2(request, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&) {
    ADD_FAILURE() << "asking for synchronous execution at V1_0";
    return ErrorStatus::GENERAL_FAILURE;
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
                                                const Request& request) {
    return preparedModel->executeSynchronously(request);
}
enum class Synchronously { NO, YES };
const float kDefaultAtol = 1e-5f;
const float kDefaultRtol = 1e-5f;
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExample>& examples,
                           bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                           float fpRtol = 1e-5f) {
                           bool hasRelaxedFloat32Model = false, float fpAtol = kDefaultAtol,
                           float fpRtol = kDefaultRtol, Synchronously sync = Synchronously::NO) {
    const uint32_t INPUT = 0;
    const uint32_t OUTPUT = 1;

@@ -186,6 +197,9 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
        inputMemory->commit();
        outputMemory->commit();

        if (sync == Synchronously::NO) {
            SCOPED_TRACE("asynchronous");

            // launch execution
            sp<ExecutionCallback> executionCallback = new ExecutionCallback();
            ASSERT_NE(nullptr, executionCallback.get());
@@ -199,6 +213,15 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
            executionCallback->wait();
            ErrorStatus executionReturnStatus = executionCallback->getStatus();
            EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
        } else {
            SCOPED_TRACE("synchronous");

            // execute
            Return<ErrorStatus> executionStatus = ExecutePreparedModel(
                preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
            ASSERT_TRUE(executionStatus.isOk());
            EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionStatus));
        }

        // validate results
        outputMemory->read();
@@ -216,6 +239,13 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
        }
    }
}
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                           const std::vector<MixedTypedExample>& examples,
                           bool hasRelaxedFloat32Model, Synchronously sync) {
    EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
                          kDefaultRtol, sync);
}

static void getPreparedModel(sp<PreparedModelCallback> callback,
                             sp<V1_0::IPreparedModel>* preparedModel) {
@@ -363,7 +393,9 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
    ASSERT_NE(nullptr, preparedModel.get());

    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16);
                          model.relaxComputationFloat32toFloat16, Synchronously::NO);
    EvaluatePreparedModel(preparedModel, is_ignored, examples,
                          model.relaxComputationFloat32toFloat16, Synchronously::YES);
}

}  // namespace generated_tests
+38 −2
Original line number Diff line number Diff line
@@ -51,8 +51,9 @@ interface IPreparedModel extends @1.0::IPreparedModel {
     * and complete successfully (ErrorStatus::NONE). There must be
     * no failure unless the device itself is in a bad state.
     *
     * Multiple threads can call the execute_1_2 function on the same IPreparedModel
     * object concurrently with different requests.
     * Any number of calls to the execute, execute_1_2, and executeSynchronously
     * functions, in any combination, may be made concurrently, even on the same
     * IPreparedModel object.
     *
     * @param request The input and output information on which the prepared
     *                model is to be executed.
@@ -71,4 +72,39 @@ interface IPreparedModel extends @1.0::IPreparedModel {
     */
    execute_1_2(Request request, IExecutionCallback callback)
        generates (ErrorStatus status);

    /**
     * Performs a synchronous execution on a prepared model.
     *
     * The execution is performed synchronously with respect to the caller.
     * executeSynchronously must verify the inputs to the function are
     * correct. If there is an error, executeSynchronously must immediately
     * return with the appropriate ErrorStatus value. If the inputs to the
     * function are valid and there is no error, executeSynchronously must
     * perform the execution, and must not return until the execution is
     * complete.
     *
     * If the prepared model was prepared from a model wherein all tensor
     * operands have fully specified dimensions, and the inputs to the function
     * are valid, then the execution should complete successfully
     * (ErrorStatus::NONE). There must be no failure unless the device itself is
     * in a bad state.
     *
     * Any number of calls to the execute, execute_1_2, and executeSynchronously
     * functions, in any combination, may be made concurrently, even on the same
     * IPreparedModel object.
     *
     * @param request The input and output information on which the prepared
     *                model is to be executed.
     * @return status Error status of the execution, must be:
     *                - NONE if execution is performed successfully
     *                - DEVICE_UNAVAILABLE if driver is offline or busy
     *                - GENERAL_FAILURE if there is an unspecified error
     *                - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
     *                  not large enough to store the resultant values
     *                - INVALID_ARGUMENT if one of the input arguments is
     *                  invalid
     */
    executeSynchronously(Request request)
        generates (ErrorStatus status);
};
+23 −12
Original line number Diff line number Diff line
@@ -97,7 +97,9 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
                     Request request, const std::function<void(Request*)>& mutation) {
    mutation(&request);
    SCOPED_TRACE(message + " [execute]");

    {
        SCOPED_TRACE(message + " [execute_1_2]");

        sp<ExecutionCallback> executionCallback = new ExecutionCallback();
        ASSERT_NE(nullptr, executionCallback.get());
@@ -111,6 +113,15 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
        ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
    }

    {
        SCOPED_TRACE(message + " [executeSynchronously]");

        Return<ErrorStatus> executeStatus = preparedModel->executeSynchronously(request);
        ASSERT_TRUE(executeStatus.isOk());
        ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
    }
}

// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
// resizing the hidl_vec to one less.