Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ece0b71c authored by Michael Butler's avatar Michael Butler Committed by Automerger Merge Worker
Browse files

Merge changes from topic "nnapi-canonical-ahwb" am: 8548f574

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1593092

MUST ONLY BE SUBMITTED BY AUTOMERGER

Change-Id: I14deb62e82c7629de5cb62c61bf043ada37ab95c
parents 243afb03 8548f574
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ GeneralResult<Model::OperandValues> unvalidatedConvert(const hidl_vec<uint8_t>&
}

GeneralResult<SharedMemory> unvalidatedConvert(const hidl_memory& memory) {
    return createSharedMemoryFromHidlMemory(memory);
    return hal::utils::createSharedMemoryFromHidlMemory(memory);
}

GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model) {
@@ -347,9 +347,7 @@ nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
}

nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
    CHECK(memory != nullptr);
    return hidl_memory(memory->name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory->handle)),
                       memory->size);
    return hal::utils::createHidlMemoryFromSharedMemory(memory);
}

nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
+9 −2
Original line number Diff line number Diff line
@@ -304,7 +304,11 @@ GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
}

GeneralResult<SharedHandle> unvalidatedConvert(const hidl_handle& hidlHandle) {
    return hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle());
    if (hidlHandle.getNativeHandle() == nullptr) {
        return nullptr;
    }
    auto handle = NN_TRY(hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle()));
    return std::make_shared<const Handle>(std::move(handle));
}

GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
@@ -588,7 +592,10 @@ nn::GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
}

nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
    return hal::utils::hidlHandleFromSharedHandle(handle);
    if (handle == nullptr) {
        return {};
    }
    return hal::utils::hidlHandleFromSharedHandle(*handle);
}

nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
+1 −1
Original line number Diff line number Diff line
@@ -261,7 +261,7 @@ GeneralResult<Request::MemoryPool> unvalidatedConvert(
    using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
    switch (memoryPool.getDiscriminator()) {
        case Discriminator::hidlMemory:
            return createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
            return hal::utils::createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
        case Discriminator::token:
            return static_cast<Request::MemoryDomainToken>(memoryPool.token());
    }
+3 −1
Original line number Diff line number Diff line
@@ -21,12 +21,14 @@ cc_library_static {
    local_include_dirs: ["include/nnapi/hal/aidl/"],
    export_include_dirs: ["include"],
    static_libs: [
        "libarect",
        "neuralnetworks_types",
        "neuralnetworks_utils_hal_common",
    ],
    shared_libs: [
        "libhidlbase",
        "android.hardware.neuralnetworks-V1-ndk_platform",
        "libbinder_ndk",
        "libhidlbase",
        "libnativewindow",
    ],
}
+172 −25
Original line number Diff line number Diff line
@@ -18,6 +18,8 @@

#include <aidl/android/hardware/common/NativeHandle.h>
#include <android-base/logging.h>
#include <android/hardware_buffer.h>
#include <cutils/native_handle.h>
#include <nnapi/OperandTypes.h>
#include <nnapi/OperationTypes.h>
#include <nnapi/Result.h>
@@ -27,6 +29,7 @@
#include <nnapi/Validation.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <vndk/hardware_buffer.h>

#include <algorithm>
#include <chrono>
@@ -127,6 +130,61 @@ GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
    return canonical;
}

GeneralResult<Handle> unvalidatedConvertHelper(const NativeHandle& aidlNativeHandle) {
    std::vector<base::unique_fd> fds;
    fds.reserve(aidlNativeHandle.fds.size());
    for (const auto& fd : aidlNativeHandle.fds) {
        const int dupFd = dup(fd.get());
        if (dupFd == -1) {
            // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
            // here?
            return NN_ERROR() << "Failed to dup the fd";
        }
        fds.emplace_back(dupFd);
    }

    return Handle{.fds = std::move(fds), .ints = aidlNativeHandle.ints};
}

struct NativeHandleDeleter {
    void operator()(native_handle_t* handle) const {
        if (handle) {
            native_handle_close(handle);
            native_handle_delete(handle);
        }
    }
};

using UniqueNativeHandle = std::unique_ptr<native_handle_t, NativeHandleDeleter>;

static nn::GeneralResult<UniqueNativeHandle> nativeHandleFromAidlHandle(
        const NativeHandle& handle) {
    std::vector<base::unique_fd> fds;
    fds.reserve(handle.fds.size());
    for (const auto& fd : handle.fds) {
        const int dupFd = dup(fd.get());
        if (dupFd == -1) {
            return NN_ERROR() << "Failed to dup the fd";
        }
        fds.emplace_back(dupFd);
    }

    constexpr size_t kIntMax = std::numeric_limits<int>::max();
    CHECK_LE(handle.fds.size(), kIntMax);
    CHECK_LE(handle.ints.size(), kIntMax);
    native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
                                                         static_cast<int>(handle.ints.size()));
    if (nativeHandle == nullptr) {
        return NN_ERROR() << "Failed to create native_handle";
    }
    for (size_t i = 0; i < fds.size(); ++i) {
        nativeHandle->data[i] = fds[i].release();
    }
    std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);

    return UniqueNativeHandle(nativeHandle);
}

}  // anonymous namespace

GeneralResult<OperandType> unvalidatedConvert(const aidl_hal::OperandType& operandType) {
@@ -318,10 +376,64 @@ GeneralResult<MeasureTiming> unvalidatedConvert(bool measureTiming) {
    return measureTiming ? MeasureTiming::YES : MeasureTiming::NO;
}

static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
    return (value + multiple - 1) / multiple * multiple;
}

GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory) {
    VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative";
    if (memory.size > std::numeric_limits<uint32_t>::max()) {
        return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
    }

    if (memory.name != "hardware_buffer_blob") {
        return std::make_shared<const Memory>(Memory{
            .handle = NN_TRY(unvalidatedConvert(memory.handle)),
                .handle = NN_TRY(unvalidatedConvertHelper(memory.handle)),
                .size = static_cast<uint32_t>(memory.size),
                .name = memory.name,
        });
    }

    const auto size = static_cast<uint32_t>(memory.size);
    const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
    const uint32_t width = size;
    const uint32_t height = 1;  // height is always 1 for BLOB mode AHardwareBuffer.
    const uint32_t layers = 1;  // layers is always 1 for BLOB mode AHardwareBuffer.

    const UniqueNativeHandle handle = NN_TRY(nativeHandleFromAidlHandle(memory.handle));
    const native_handle_t* nativeHandle = handle.get();

    // AHardwareBuffer_createFromHandle() might fail because an allocator
    // expects a specific stride value. In that case, we try to guess it by
    // aligning the width to small powers of 2.
    // TODO(b/174120849): Avoid stride assumptions.
    AHardwareBuffer* hardwareBuffer = nullptr;
    status_t status = UNKNOWN_ERROR;
    for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
        const uint32_t stride = roundUpToMultiple(width, alignment);
        AHardwareBuffer_Desc desc{
                .width = width,
                .height = height,
                .layers = layers,
                .format = format,
                .usage = usage,
                .stride = stride,
        };
        status = AHardwareBuffer_createFromHandle(&desc, nativeHandle,
                                                  AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
                                                  &hardwareBuffer);
        if (status == NO_ERROR) {
            break;
        }
    }
    if (status != NO_ERROR) {
        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
               << "Can't create AHardwareBuffer from handle. Error: " << status;
    }

    return std::make_shared<const Memory>(Memory{
            .handle = HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
            .size = static_cast<uint32_t>(memory.size),
            .name = memory.name,
    });
@@ -400,22 +512,7 @@ GeneralResult<ExecutionPreference> unvalidatedConvert(
}

GeneralResult<SharedHandle> unvalidatedConvert(const NativeHandle& aidlNativeHandle) {
    std::vector<base::unique_fd> fds;
    fds.reserve(aidlNativeHandle.fds.size());
    for (const auto& fd : aidlNativeHandle.fds) {
        int dupFd = dup(fd.get());
        if (dupFd == -1) {
            // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
            // here?
            return NN_ERROR() << "Failed to dup the fd";
        }
        fds.emplace_back(dupFd);
    }

    return std::make_shared<const Handle>(Handle{
            .fds = std::move(fds),
            .ints = aidlNativeHandle.ints,
    });
    return std::make_shared<const Handle>(NN_TRY(unvalidatedConvertHelper(aidlNativeHandle)));
}

GeneralResult<ExecutionPreference> convert(
@@ -508,13 +605,11 @@ nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
    return halObject;
}

}  // namespace

nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::Handle& handle) {
    common::NativeHandle aidlNativeHandle;
    aidlNativeHandle.fds.reserve(sharedHandle->fds.size());
    for (const auto& fd : sharedHandle->fds) {
        int dupFd = dup(fd.get());
    aidlNativeHandle.fds.reserve(handle.fds.size());
    for (const auto& fd : handle.fds) {
        const int dupFd = dup(fd.get());
        if (dupFd == -1) {
            // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
            // here?
@@ -522,17 +617,69 @@ nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandl
        }
        aidlNativeHandle.fds.emplace_back(dupFd);
    }
    aidlNativeHandle.ints = sharedHandle->ints;
    aidlNativeHandle.ints = handle.ints;
    return aidlNativeHandle;
}

static nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
        const native_handle_t& handle) {
    common::NativeHandle aidlNativeHandle;

    aidlNativeHandle.fds.reserve(handle.numFds);
    for (int i = 0; i < handle.numFds; ++i) {
        const int dupFd = dup(handle.data[i]);
        if (dupFd == -1) {
            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
        }
        aidlNativeHandle.fds.emplace_back(dupFd);
    }

    aidlNativeHandle.ints = std::vector<int>(&handle.data[handle.numFds],
                                             &handle.data[handle.numFds + handle.numInts]);

    return aidlNativeHandle;
}

}  // namespace

nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
    CHECK(sharedHandle != nullptr);
    return unvalidatedConvert(*sharedHandle);
}

nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory) {
    CHECK(memory != nullptr);
    if (memory->size > std::numeric_limits<int64_t>::max()) {
        return NN_ERROR() << "Memory size doesn't fit into int64_t.";
    }
    if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
        return Memory{
                .handle = NN_TRY(unvalidatedConvert(*handle)),
                .size = static_cast<int64_t>(memory->size),
                .name = memory->name,
        };
    }

    const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
    AHardwareBuffer_Desc bufferDesc;
    AHardwareBuffer_describe(ahwb, &bufferDesc);

    if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
        CHECK_EQ(memory->size, bufferDesc.width);
        CHECK_EQ(memory->name, "hardware_buffer_blob");
    } else {
        CHECK_EQ(memory->size, 0u);
        CHECK_EQ(memory->name, "hardware_buffer");
    }

    const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
    if (nativeHandle == nullptr) {
        return NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle "
                             "returned nullptr";
    }

    return Memory{
            .handle = NN_TRY(unvalidatedConvert(memory->handle)),
            .handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)),
            .size = static_cast<int64_t>(memory->size),
            .name = memory->name,
    };
Loading