Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8fa53834 authored by Michael Butler's avatar Michael Butler
Browse files

Move Aidl utility code to aidl/utils -- hal 1/2

This change is part of an effort to remove HAL types from
libneuralnetworks_common*. This change moves the following files to
hardware/interfaces/neuralnetworks/aidl/utils:
* AidlBufferTracker.h
* AidlHalInterfaces.h
* AidlHalUtils.h
* AidlValidateHal.h

Bug: 191442336
Test: mma
Change-Id: I799d8ba761c004af963fc6bc044125d8828f2053
parent be7f81f5
Loading
Loading
Loading
Loading
+119 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H

#include <android-base/macros.h>
#include <android-base/thread_annotations.h>

#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <stack>
#include <utility>
#include <vector>

#include "AidlHalInterfaces.h"
#include "AidlValidateHal.h"

namespace android::nn {

// This class manages a CPU buffer allocated on heap and provides validation methods.
class AidlManagedBuffer {
  public:
    static std::shared_ptr<AidlManagedBuffer> create(uint32_t size,
                                                     std::set<AidlHalPreparedModelRole> roles,
                                                     const Operand& operand);

    // Prefer AidlManagedBuffer::create.
    AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
                      std::set<AidlHalPreparedModelRole> roles, const Operand& operand);

    uint8_t* getPointer() const { return kBuffer.get(); }
    uint32_t getSize() const { return kSize; }

    // "poolIndex" is the index of this buffer in the request.pools.
    ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
                                const aidl_hal::IPreparedModel* preparedModel) const;

    // "size" is the byte size of the Memory provided to the copyFrom or copyTo method.
    ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
    ErrorStatus validateCopyTo(uint32_t size) const;

    bool updateDimensions(const std::vector<uint32_t>& dimensions);
    void setInitialized(bool initialized);

  private:
    mutable std::mutex mMutex;
    const std::unique_ptr<uint8_t[]> kBuffer;
    const uint32_t kSize;
    const std::set<AidlHalPreparedModelRole> kRoles;
    const OperandType kOperandType;
    const std::vector<uint32_t> kInitialDimensions;
    std::vector<uint32_t> mUpdatedDimensions GUARDED_BY(mMutex);
    bool mInitialized GUARDED_BY(mMutex) = false;
};

// Keep track of all AidlManagedBuffers and assign each with a unique token.
class AidlBufferTracker : public std::enable_shared_from_this<AidlBufferTracker> {
    DISALLOW_COPY_AND_ASSIGN(AidlBufferTracker);

  public:
    // A RAII class to help manage the lifetime of the token.
    // It is only supposed to be constructed in AidlBufferTracker::add.
    class Token {
        DISALLOW_COPY_AND_ASSIGN(Token);

      public:
        Token(uint32_t token, std::shared_ptr<AidlBufferTracker> tracker)
            : kToken(token), kBufferTracker(std::move(tracker)) {}
        ~Token() { kBufferTracker->free(kToken); }
        uint32_t get() const { return kToken; }

      private:
        const uint32_t kToken;
        const std::shared_ptr<AidlBufferTracker> kBufferTracker;
    };

    // The factory of AidlBufferTracker. This ensures that the AidlBufferTracker is always managed
    // by a shared_ptr.
    static std::shared_ptr<AidlBufferTracker> create() {
        return std::make_shared<AidlBufferTracker>();
    }

    // Prefer AidlBufferTracker::create.
    AidlBufferTracker() : mTokenToBuffers(1) {}

    std::unique_ptr<Token> add(std::shared_ptr<AidlManagedBuffer> buffer);
    std::shared_ptr<AidlManagedBuffer> get(uint32_t token) const;

  private:
    void free(uint32_t token);

    mutable std::mutex mMutex;
    std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens GUARDED_BY(mMutex);

    // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping.
    // The index of the vector is the token. When the token gets freed, the corresponding entry is
    // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token.
    std::vector<std::shared_ptr<AidlManagedBuffer>> mTokenToBuffers GUARDED_BY(mMutex);
};

}  // namespace android::nn

#endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H
+72 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H

#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
#include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h>
#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
#include <aidl/android/hardware/neuralnetworks/DataLocation.h>
#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
#include <aidl/android/hardware/neuralnetworks/Extension.h>
#include <aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.h>
#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h>
#include <aidl/android/hardware/neuralnetworks/FusedActivationFunc.h>
#include <aidl/android/hardware/neuralnetworks/IBuffer.h>
#include <aidl/android/hardware/neuralnetworks/IDevice.h>
#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
#include <aidl/android/hardware/neuralnetworks/Memory.h>
#include <aidl/android/hardware/neuralnetworks/Model.h>
#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
#include <aidl/android/hardware/neuralnetworks/Operand.h>
#include <aidl/android/hardware/neuralnetworks/OperandExtraParams.h>
#include <aidl/android/hardware/neuralnetworks/OperandLifeTime.h>
#include <aidl/android/hardware/neuralnetworks/OperandPerformance.h>
#include <aidl/android/hardware/neuralnetworks/OperandType.h>
#include <aidl/android/hardware/neuralnetworks/Operation.h>
#include <aidl/android/hardware/neuralnetworks/OperationType.h>
#include <aidl/android/hardware/neuralnetworks/OutputShape.h>
#include <aidl/android/hardware/neuralnetworks/PerformanceInfo.h>
#include <aidl/android/hardware/neuralnetworks/Priority.h>
#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <aidl/android/hardware/neuralnetworks/RequestArgument.h>
#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
#include <aidl/android/hardware/neuralnetworks/Subgraph.h>
#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
#include <aidl/android/hardware/neuralnetworks/Timing.h>

namespace android::nn {

namespace aidl_hal = ::aidl::android::hardware::neuralnetworks;

inline constexpr aidl_hal::Priority kDefaultPriorityAidl = aidl_hal::Priority::MEDIUM;

}  // namespace android::nn

#endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H
+52 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H

#include <vector>

#include "AidlHalInterfaces.h"

namespace android {
namespace nn {

// Return a vector with one entry for each non-extension OperandType except
// SUBGRAPH, set to the specified PerformanceInfo value.  The vector will be
// sorted by OperandType.
//
// Control flow (OperandType::SUBGRAPH) operation performance is specified
// separately using Capabilities::ifPerformance and
// Capabilities::whilePerformance.
std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
        aidl_hal::PerformanceInfo perf);

// Update the vector entry corresponding to the specified OperandType with the
// specified PerformanceInfo value.  The vector must already have an entry for
// that OperandType, and must be sorted by OperandType.
void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
            aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf);

// Returns true if an operand type is an extension type.
bool isExtensionOperandType(aidl_hal::OperandType type);

// Returns true if an operand type is a scalar type.
bool isNonExtensionScalar(aidl_hal::OperandType type);

}  // namespace nn
}  // namespace android

#endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H
+46 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H

#include <memory>
#include <set>
#include <tuple>
#include <vector>

#include "AidlHalInterfaces.h"
#include "nnapi/TypeUtils.h"
#include "nnapi/Validation.h"

namespace android {
namespace nn {

using AidlHalPreparedModelRole = std::tuple<const aidl_hal::IPreparedModel*, IOType, uint32_t>;

bool validateMemoryDesc(
        const aidl_hal::BufferDesc& desc,
        const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
        const std::vector<aidl_hal::BufferRole>& inputRoles,
        const std::vector<aidl_hal::BufferRole>& outputRoles,
        std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
                getModel,
        std::set<AidlHalPreparedModelRole>* preparedModelRoles, aidl_hal::Operand* combinedOperand);

}  // namespace nn
}  // namespace android

#endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H
+227 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2020 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "AidlBufferTracker.h"

#include <android-base/macros.h>

#include <memory>
#include <mutex>
#include <set>
#include <stack>
#include <utility>
#include <vector>

#include "AidlHalInterfaces.h"
#include "nnapi/TypeUtils.h"

namespace android::nn {

std::shared_ptr<AidlManagedBuffer> AidlManagedBuffer::create(
        uint32_t size, std::set<AidlHalPreparedModelRole> roles, const Operand& operand) {
    std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]);
    if (buffer == nullptr) {
        return nullptr;
    }
    if (isExtension(operand.type)) {
        LOG(ERROR) << "AidlManagedBuffer cannot handle extension operands.";
        return nullptr;
    }
    return std::make_shared<AidlManagedBuffer>(std::move(buffer), size, std::move(roles), operand);
}

AidlManagedBuffer::AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
                                     std::set<AidlHalPreparedModelRole> roles,
                                     const Operand& operand)
    : kBuffer(std::move(buffer)),
      kSize(size),
      kRoles(std::move(roles)),
      kOperandType(operand.type),
      kInitialDimensions(operand.dimensions),
      mUpdatedDimensions(operand.dimensions) {
    CHECK(!isExtension(kOperandType));
}

ErrorStatus AidlManagedBuffer::validateRequest(
        uint32_t poolIndex, const Request& request,
        const aidl_hal::IPreparedModel* preparedModel) const {
    CHECK_LT(poolIndex, request.pools.size());
    CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex]));
    std::lock_guard<std::mutex> guard(mMutex);

    bool usedAsInput = false, usedAsOutput = false;
    for (uint32_t i = 0; i < request.inputs.size(); i++) {
        if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
        if (request.inputs[i].location.poolIndex != poolIndex) continue;
        // Validate if the input role is specified during allocation.
        if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
            return ErrorStatus::INVALID_ARGUMENT;
        }
        if (!mInitialized) {
            LOG(ERROR)
                    << "AidlManagedBuffer::validateRequest -- using uninitialized buffer as input "
                       "request.";
            return ErrorStatus::GENERAL_FAILURE;
        }
        auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions);
        if (!combined.has_value()) {
            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
                       << toString(mUpdatedDimensions) << " vs "
                       << toString(request.inputs[i].dimensions) << ")";
            return ErrorStatus::INVALID_ARGUMENT;
        }
        usedAsInput = true;
    }
    for (uint32_t i = 0; i < request.outputs.size(); i++) {
        if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
        if (request.outputs[i].location.poolIndex != poolIndex) continue;
        if (usedAsInput || usedAsOutput) {
            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- using the same device memory for "
                          "input/output or multiple outputs";
            return ErrorStatus::INVALID_ARGUMENT;
        }
        // Validate if the output role is specified during allocation.
        if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) {
            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
            return ErrorStatus::INVALID_ARGUMENT;
        }
        auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions);
        if (!combined.has_value()) {
            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
                       << toString(kInitialDimensions) << " vs "
                       << toString(request.outputs[i].dimensions) << ")";
            return ErrorStatus::INVALID_ARGUMENT;
        }
        usedAsOutput = true;
    }
    return ErrorStatus::NONE;
}

ErrorStatus AidlManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions,
                                                uint32_t size) const {
    if (size != kSize) {
        LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize
                   << " vs " << size;
        return ErrorStatus::INVALID_ARGUMENT;
    }

    if (isNonExtensionScalar(kOperandType)) {
        if (!dimensions.empty()) {
            LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid dimensions for scalar "
                          "operand: "
                       << toString(dimensions);
            return ErrorStatus::INVALID_ARGUMENT;
        }
        return ErrorStatus::NONE;
    }

    if (dimensions.empty()) {
        if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) {
            LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the initial dimensions are not "
                          "fully "
                          "specified and no dimension update is provided: "
                       << toString(kInitialDimensions);
            return ErrorStatus::INVALID_ARGUMENT;
        }
    } else {
        if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) {
            LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the updated dimensions are not "
                          "fully "
                          "specified: "
                       << toString(dimensions);
            return ErrorStatus::INVALID_ARGUMENT;
        }
    }

    const auto combined = combineDimensions(kInitialDimensions, dimensions);
    if (!combined.has_value()) {
        LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- incompatible dimensions ("
                   << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
        return ErrorStatus::INVALID_ARGUMENT;
    }
    return ErrorStatus::NONE;
}

ErrorStatus AidlManagedBuffer::validateCopyTo(uint32_t size) const {
    if (size != kSize) {
        LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- invalid memory size: " << kSize
                   << " vs " << size;
        return ErrorStatus::INVALID_ARGUMENT;
    }
    std::lock_guard<std::mutex> guard(mMutex);
    if (!mInitialized) {
        LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- using uninitialized buffer as source.";
        return ErrorStatus::GENERAL_FAILURE;
    }
    return ErrorStatus::NONE;
}

bool AidlManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) {
    auto combined = combineDimensions(kInitialDimensions, dimensions);
    if (!combined.has_value()) {
        LOG(ERROR) << "AidlManagedBuffer::updateDimensions -- incompatible dimensions ("
                   << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
        return false;
    }
    std::lock_guard<std::mutex> guard(mMutex);
    mUpdatedDimensions = std::move(combined).value();
    return true;
}

void AidlManagedBuffer::setInitialized(bool initialized) {
    std::lock_guard<std::mutex> guard(mMutex);
    mInitialized = initialized;
}

std::unique_ptr<AidlBufferTracker::Token> AidlBufferTracker::add(
        std::shared_ptr<AidlManagedBuffer> buffer) {
    if (buffer == nullptr) {
        return nullptr;
    }
    std::lock_guard<std::mutex> guard(mMutex);
    uint32_t token = 0;
    if (mFreeTokens.empty()) {
        token = mTokenToBuffers.size();
        mTokenToBuffers.push_back(std::move(buffer));
    } else {
        token = mFreeTokens.top();
        mFreeTokens.pop();
        mTokenToBuffers[token] = std::move(buffer);
    }
    VLOG(MEMORY) << "AidlBufferTracker::add -- new token = " << token;
    return std::make_unique<Token>(token, shared_from_this());
}

std::shared_ptr<AidlManagedBuffer> AidlBufferTracker::get(uint32_t token) const {
    std::lock_guard<std::mutex> guard(mMutex);
    if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) {
        LOG(ERROR) << "AidlBufferTracker::get -- unknown token " << token;
        return nullptr;
    }
    return mTokenToBuffers[token];
}

void AidlBufferTracker::free(uint32_t token) {
    std::lock_guard<std::mutex> guard(mMutex);
    CHECK_LT(token, mTokenToBuffers.size());
    CHECK(mTokenToBuffers[token] != nullptr);
    VLOG(MEMORY) << "AidlBufferTracker::free -- release token = " << token;
    mTokenToBuffers[token] = nullptr;
    mFreeTokens.push(token);
}

}  // namespace android::nn
Loading