Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81663d4b authored by Miao Wang's avatar Miao Wang Committed by Android (Google) Code Review
Browse files

Merge "HAL interface for compilation and execution hints"

parents 11f7f7ef 0e671f3e
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -45,12 +45,15 @@ class Burst final : public nn::IBurst {

    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
            const nn::Request& request, nn::MeasureTiming measure,
            const nn::OptionalTimePoint& deadline,
            const nn::OptionalDuration& loopTimeoutDuration) const override;
            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
            const std::vector<nn::TokenValuePair>& hints,
            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;

    nn::GeneralResult<nn::SharedExecution> createReusableExecution(
            const nn::Request& request, nn::MeasureTiming measure,
            const nn::OptionalDuration& loopTimeoutDuration) const override;
            const nn::OptionalDuration& loopTimeoutDuration,
            const std::vector<nn::TokenValuePair>& hints,
            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;

  private:
    const nn::SharedPreparedModel kPreparedModel;
+3 −2
Original line number Diff line number Diff line
@@ -65,8 +65,9 @@ class Device final : public nn::IDevice {
    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
            const std::vector<nn::SharedHandle>& dataCache,
            const nn::CacheToken& token) const override;
            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
            const std::vector<nn::TokenValuePair>& hints,
            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;

    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+9 −4
Original line number Diff line number Diff line
@@ -49,18 +49,23 @@ class PreparedModel final : public nn::IPreparedModel,

    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
            const nn::Request& request, nn::MeasureTiming measure,
            const nn::OptionalTimePoint& deadline,
            const nn::OptionalDuration& loopTimeoutDuration) const override;
            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
            const std::vector<nn::TokenValuePair>& hints,
            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;

    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
            const nn::OptionalDuration& loopTimeoutDuration,
            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
            const nn::OptionalDuration& timeoutDurationAfterFence,
            const std::vector<nn::TokenValuePair>& hints,
            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;

    nn::GeneralResult<nn::SharedExecution> createReusableExecution(
            const nn::Request& request, nn::MeasureTiming measure,
            const nn::OptionalDuration& loopTimeoutDuration) const override;
            const nn::OptionalDuration& loopTimeoutDuration,
            const std::vector<nn::TokenValuePair>& hints,
            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;

    nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;

+10 −5
Original line number Diff line number Diff line
@@ -50,15 +50,20 @@ Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& /*memory*/)

nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
        const nn::Request& request, nn::MeasureTiming measure,
        const nn::OptionalTimePoint& deadline,
        const nn::OptionalDuration& loopTimeoutDuration) const {
    return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
        const std::vector<nn::TokenValuePair>& hints,
        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
    return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, hints,
                                   extensionNameToPrefix);
}

nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
        const nn::Request& request, nn::MeasureTiming measure,
        const nn::OptionalDuration& loopTimeoutDuration) const {
    return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
        const nn::OptionalDuration& loopTimeoutDuration,
        const std::vector<nn::TokenValuePair>& hints,
        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
    return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, hints,
                                                   extensionNameToPrefix);
}

}  // namespace android::hardware::neuralnetworks::V1_0::utils
+3 −1
Original line number Diff line number Diff line
@@ -143,7 +143,9 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
        const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
        const std::vector<nn::TokenValuePair>& /*hints*/,
        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
    // Ensure that model is ready for IPC.
    std::optional<nn::Model> maybeModelInShared;
    const nn::Model& modelInShared =
Loading