Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49b5e4eb authored by Michael Butler's avatar Michael Butler
Browse files

Add dynamic interface casting to NN utility code

Prior to this CL, the NN utility code would always use the type of
IPreparedModel provided by IPreparedModeCallback::notify*. This means
that an IPreparedModel returned as a dynamic type of V1_X but static
type of V1_Y would be used by the utility code as V1_Y. This CL adds
dynamic casting, such that an IPreparedModel returned as a dynamic type
of V1_X but static type V1_Y will be dynamically cast to V1_X and used
as a V1_X::IPreparedModel.

This CL also adds the utility functions
V1_[0123]::convertFromNonCanonical to convert from a non-canonical type
to another non-canonical type by using canonical types as an
intermediate conversion "hop."

Bug: 178180472
Test: mma
Change-Id: I709b2a8944af2cc78b089aade55df1e2ab7b40cc
parent 1f417b1a
Loading
Loading
Loading
Loading
+6 −0
Original line number Original line Diff line number Diff line
@@ -44,6 +44,12 @@ bool valid(const Type& halObject) {
    return result.has_value();
    return result.has_value();
}
}


template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
        -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
    return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}

}  // namespace android::hardware::neuralnetworks::V1_0::utils
}  // namespace android::hardware::neuralnetworks::V1_0::utils


#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_H
#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_H
+6 −0
Original line number Original line Diff line number Diff line
@@ -47,6 +47,12 @@ bool valid(const Type& halObject) {
    return result.has_value();
    return result.has_value();
}
}


template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
        -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
    return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}

}  // namespace android::hardware::neuralnetworks::V1_1::utils
}  // namespace android::hardware::neuralnetworks::V1_1::utils


#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_H
#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_H
+6 −0
Original line number Original line Diff line number Diff line
@@ -54,6 +54,12 @@ bool valid(const Type& halObject) {
    return result.has_value();
    return result.has_value();
}
}


template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
        -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
    return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}

}  // namespace android::hardware::neuralnetworks::V1_2::utils
}  // namespace android::hardware::neuralnetworks::V1_2::utils


#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_H
#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_H
+10 −1
Original line number Original line Diff line number Diff line
@@ -43,6 +43,15 @@
namespace android::hardware::neuralnetworks::V1_2::utils {
namespace android::hardware::neuralnetworks::V1_2::utils {
namespace {
namespace {


nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
        V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
    if (const auto dynamicPreparedModel =
                V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
        return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
    }
    return V1_0::utils::prepareModelCallback(status, preparedModel);
}

nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
                                     const Timing& timing) {
                                     const Timing& timing) {
@@ -72,7 +81,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executi


Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
                                           const sp<V1_0::IPreparedModel>& preparedModel) {
                                           const sp<V1_0::IPreparedModel>& preparedModel) {
    mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
    mData.put(prepareModelCallback(status, preparedModel));
    return Void();
    return Void();
}
}


+6 −0
Original line number Original line Diff line number Diff line
@@ -49,6 +49,12 @@ bool valid(const Type& halObject) {
    return result.has_value();
    return result.has_value();
}
}


template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
        -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
    return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}

}  // namespace android::hardware::neuralnetworks::V1_3::utils
}  // namespace android::hardware::neuralnetworks::V1_3::utils


#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_H
#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_H
Loading