Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 475a7ba7 authored by Michael Butler's avatar Michael Butler Committed by Automerger Merge Worker
Browse files

Merge changes Ifeffea05,I966f65a1 am: 104192c2 am: ca40766d am: c0bceee0 am: d038448a

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1944552

Change-Id: If3fb99d21670277bcf99f54d7f064d792e4ee0cc
parents 5a4bcb34 d038448a
Loading
Loading
Loading
Loading
+11 −1
Original line number Diff line number Diff line
@@ -112,11 +112,15 @@ GeneralResult<Priority> convert(const aidl_hal::Priority& priority);
GeneralResult<Request> convert(const aidl_hal::Request& request);
GeneralResult<Timing> convert(const aidl_hal::Timing& timing);
GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle);
GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc);

GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
GeneralResult<std::vector<OutputShape>> convert(
        const std::vector<aidl_hal::OutputShape>& outputShapes);
GeneralResult<std::vector<SharedHandle>> convert(
        const std::vector<ndk::ScopedFileDescriptor>& handles);
GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles);

GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec);

@@ -129,6 +133,7 @@ namespace nn = ::android::nn;
nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken);
nn::GeneralResult<BufferDesc> unvalidatedConvert(const nn::BufferDesc& bufferDesc);
nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRole);
nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType);
nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming);
nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory);
nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
@@ -154,14 +159,16 @@ nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request);
nn::GeneralResult<RequestArgument> unvalidatedConvert(const nn::Request::Argument& requestArgument);
nn::GeneralResult<RequestMemoryPool> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool);
nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing);
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration);
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration);
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalTimePoint& optionalTimePoint);
nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SyncFence& syncFence);
nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SharedHandle& handle);
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities);
nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);

nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken);
nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming);
nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory);
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
@@ -172,6 +179,8 @@ nn::GeneralResult<Request> convert(const nn::Request& request);
nn::GeneralResult<Timing> convert(const nn::Timing& timing);
nn::GeneralResult<int64_t> convert(const nn::OptionalDuration& optionalDuration);
nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& optionalTimePoint);
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
nn::GeneralResult<Extension> convert(const nn::Extension& extension);

nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
nn::GeneralResult<std::vector<OutputShape>> convert(
@@ -180,6 +189,7 @@ nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
        const std::vector<nn::SharedHandle>& handles);
nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
        const std::vector<nn::SyncFence>& syncFences);
nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions);

nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);

+128 −45
Original line number Diff line number Diff line
@@ -551,6 +551,10 @@ GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle) {
    return validatedConvert(handle);
}

GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc) {
    return validatedConvert(bufferDesc);
}

GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension) {
    return validatedConvert(extension);
}
@@ -564,6 +568,15 @@ GeneralResult<std::vector<OutputShape>> convert(
    return validatedConvert(outputShapes);
}

GeneralResult<std::vector<SharedHandle>> convert(
        const std::vector<ndk::ScopedFileDescriptor>& handles) {
    return validatedConvert(handles);
}

GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles) {
    return validatedConvert(roles);
}

GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec) {
    if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) {
        return NN_ERROR() << "Negative value passed to conversion from signed to unsigned";
@@ -576,42 +589,7 @@ GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec)
namespace aidl::android::hardware::neuralnetworks::utils {
namespace {

template <typename Input>
using UnvalidatedConvertOutput =
        std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;

template <typename Type>
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
        const std::vector<Type>& arguments) {
    std::vector<UnvalidatedConvertOutput<Type>> halObject;
    halObject.reserve(arguments.size());
    for (const auto& argument : arguments) {
        halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
    }
    return halObject;
}

template <typename Type>
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
        const std::vector<Type>& arguments) {
    return unvalidatedConvertVec(arguments);
}

template <typename Type>
nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
    NN_TRY(compliantVersion(canonical));
    return utils::unvalidatedConvert(canonical);
}

template <typename Type>
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
        const std::vector<Type>& arguments) {
    std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
    for (size_t i = 0; i < arguments.size(); ++i) {
        halObject[i] = NN_TRY(validatedConvert(arguments[i]));
    }
    return halObject;
}
using utils::unvalidatedConvert;

// Helper template for std::visit
template <class... Ts>
@@ -721,6 +699,74 @@ nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::Unknown& /*memory
            operator nn::GeneralResult<Memory>();
}

nn::GeneralResult<PerformanceInfo> unvalidatedConvert(
        const nn::Capabilities::PerformanceInfo& info) {
    return PerformanceInfo{.execTime = info.execTime, .powerUsage = info.powerUsage};
}

nn::GeneralResult<OperandPerformance> unvalidatedConvert(
        const nn::Capabilities::OperandPerformance& operandPerformance) {
    return OperandPerformance{.type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
                              .info = NN_TRY(unvalidatedConvert(operandPerformance.info))};
}

nn::GeneralResult<std::vector<OperandPerformance>> unvalidatedConvert(
        const nn::Capabilities::OperandPerformanceTable& table) {
    std::vector<OperandPerformance> operandPerformances;
    operandPerformances.reserve(table.asVector().size());
    for (const auto& operandPerformance : table.asVector()) {
        operandPerformances.push_back(NN_TRY(unvalidatedConvert(operandPerformance)));
    }
    return operandPerformances;
}

nn::GeneralResult<ExtensionOperandTypeInformation> unvalidatedConvert(
        const nn::Extension::OperandTypeInformation& info) {
    return ExtensionOperandTypeInformation{.type = info.type,
                                           .isTensor = info.isTensor,
                                           .byteSize = static_cast<int32_t>(info.byteSize)};
}

nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
    if (duration < nn::Duration::zero()) {
        return NN_ERROR() << "Unable to convert invalid (negative) duration";
    }
    constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
    const auto count = duration.count();
    return static_cast<int64_t>(std::min(count, kIntMax));
}

template <typename Input>
using UnvalidatedConvertOutput =
        std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;

template <typename Type>
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
        const std::vector<Type>& arguments) {
    std::vector<UnvalidatedConvertOutput<Type>> halObject;
    halObject.reserve(arguments.size());
    for (const auto& argument : arguments) {
        halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
    }
    return halObject;
}

template <typename Type>
nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
    NN_TRY(compliantVersion(canonical));
    return utils::unvalidatedConvert(canonical);
}

template <typename Type>
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
        const std::vector<Type>& arguments) {
    std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
    for (size_t i = 0; i < arguments.size(); ++i) {
        halObject[i] = NN_TRY(validatedConvert(arguments[i]));
    }
    return halObject;
}

}  // namespace

nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken) {
@@ -743,6 +789,19 @@ nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRol
    };
}

nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType) {
    switch (deviceType) {
        case nn::DeviceType::UNKNOWN:
            break;
        case nn::DeviceType::OTHER:
        case nn::DeviceType::CPU:
        case nn::DeviceType::GPU:
        case nn::DeviceType::ACCELERATOR:
            return static_cast<DeviceType>(deviceType);
    }
    return NN_ERROR() << "Invalid DeviceType " << deviceType;
}

nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming) {
    return measureTiming == nn::MeasureTiming::YES;
}
@@ -956,15 +1015,6 @@ nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
    };
}

nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
    if (duration < nn::Duration::zero()) {
        return NN_ERROR() << "Unable to convert invalid (negative) duration";
    }
    constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
    const auto count = duration.count();
    return static_cast<int64_t>(std::min(count, kIntMax));
}

nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration) {
    if (!optionalDuration.has_value()) {
        return kNoTiming;
@@ -989,6 +1039,23 @@ nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::Shared
    return ndk::ScopedFileDescriptor(duplicatedFd.release());
}

nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
    return Capabilities{
            .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
                    unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
            .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
                    unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
            .operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)),
            .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
            .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
    };
}

nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
    return Extension{.name = extension.name,
                     .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
}

nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
    return validatedConvert(cacheToken);
}
@@ -997,6 +1064,10 @@ nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
    return validatedConvert(bufferDesc);
}

nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
    return validatedConvert(deviceType);
}

nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming) {
    return validatedConvert(measureTiming);
}
@@ -1037,6 +1108,14 @@ nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& outputShapes) {
    return validatedConvert(outputShapes);
}

nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
    return validatedConvert(capabilities);
}

nn::GeneralResult<Extension> convert(const nn::Extension& extension) {
    return validatedConvert(extension);
}

nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
    return validatedConvert(bufferRoles);
}
@@ -1056,6 +1135,10 @@ nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
    return validatedConvert(syncFences);
}

nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions) {
    return validatedConvert(extensions);
}

nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
    if (!std::all_of(vec.begin(), vec.end(),
                     [](uint32_t v) { return v <= std::numeric_limits<int32_t>::max(); })) {
+42 −0
Original line number Diff line number Diff line
//
// Copyright (C) 2021 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

package {
    // See: http://go/android-license-faq
    // A large-scale-change added 'default_applicable_licenses' to import
    // all of the 'license_kinds' from "hardware_interfaces_license"
    // to get the below license kinds:
    //   SPDX-license-identifier-Apache-2.0
    default_applicable_licenses: ["hardware_interfaces_license"],
}

cc_library_static {
    name: "neuralnetworks_utils_hal_adapter_aidl",
    defaults: [
        "neuralnetworks_use_latest_utils_hal_aidl",
        "neuralnetworks_utils_defaults",
    ],
    srcs: ["src/*"],
    local_include_dirs: ["include/nnapi/hal/aidl/"],
    export_include_dirs: ["include"],
    static_libs: [
        "neuralnetworks_types",
        "neuralnetworks_utils_hal_common",
    ],
    shared_libs: [
        "libbinder_ndk",
    ],
}
+73 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H

#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
#include <nnapi/IDevice.h>
#include <nnapi/Types.h>

#include <functional>
#include <memory>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
// lifetimes across processes and for protecting asynchronous calls across AIDL.

namespace aidl::android::hardware::neuralnetworks::adapter {

/**
 * A self-contained unit of work to be executed.
 */
using Task = std::function<void()>;

/**
 * A type-erased executor which executes a task asynchronously.
 *
 * This executor is also provided an optional deadline for when the caller expects is the upper
 * bound for the amount of time to complete the task. If needed, the Executor can retrieve the
 * Application ID (Android User ID) by calling AIBinder_getCallingUid in android/binder_ibinder.h.
 */
using Executor = std::function<void(Task, ::android::nn::OptionalTimePoint)>;

/**
 * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
 *
 * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
 * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
 *
 * @param device NNAPI canonical IDevice interface object to be adapted.
 * @param executor Type-erased executor to handle executing tasks asynchronously.
 * @return AIDL NN HAL IDevice interface object.
 */
std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor);

/**
 * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
 *
 * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
 * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
 *
 * This function uses a default executor, which will execute tasks from a detached thread.
 *
 * @param device NNAPI canonical IDevice interface object to be adapted.
 * @return AIDL NN HAL IDevice interface object.
 */
std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device);

}  // namespace aidl::android::hardware::neuralnetworks::adapter

#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
+47 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H

#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
#include <aidl/android/hardware/neuralnetworks/Memory.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IBuffer.h>

#include <memory>
#include <vector>

// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
// lifetimes across processes and for protecting asynchronous calls across AIDL.

namespace aidl::android::hardware::neuralnetworks::adapter {

// Class that adapts nn::IBuffer to BnBuffer.
class Buffer : public BnBuffer {
  public:
    explicit Buffer(::android::nn::SharedBuffer buffer);

    ndk::ScopedAStatus copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) override;
    ndk::ScopedAStatus copyTo(const Memory& dst) override;

  private:
    const ::android::nn::SharedBuffer kBuffer;
};

}  // namespace aidl::android::hardware::neuralnetworks::adapter

#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
Loading