Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d3a6efc2 authored by Michael Butler's avatar Michael Butler
Browse files

Enable NN VTS and utility code to use lazy services

This CL enables VtsHalNeuralnetworksTargetTest to use lazy services by
changing from AServiceManager_getService (which will return nullptr for
the service if the service is not able to be loaded within a short
amount of time) to AServiceManager_waitForService (which will wait for a
longer time, allowing lazy services to start up).

Similarly, the utility code is changed from using
AServiceManager_getService to AServiceManager_waitForService where
possible.

This CL also introduces an "InvalidDevice" utility class to the
nnapi/hal/aidl utility code. InvalidDevices are minimal devices that
support no functionality but are still able to pass VTS tests.

Bug: 170696939
Test: mma
Test: VtsHalNeuralnetworksTargetTest
Change-Id: I4f806b104ef6af863ec55c2c3f2a2dd1f72b9633
parent 6594b5f1
Loading
Loading
Loading
Loading
+70 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#pragma once

#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
#include <android/binder_auto_utils.h>

#include <memory>
#include <string>
#include <vector>

namespace aidl::android::hardware::neuralnetworks {

class InvalidDevice : public BnDevice {
  public:
    static std::shared_ptr<InvalidDevice> create();

    InvalidDevice(Capabilities capabilities, const NumberOfCacheFiles& numberOfCacheFiles,
                  std::vector<Extension> extensions, DeviceType deviceType,
                  std::string versionString);

    ndk::ScopedAStatus allocate(const BufferDesc& desc,
                                const std::vector<IPreparedModelParcel>& preparedModels,
                                const std::vector<BufferRole>& inputRoles,
                                const std::vector<BufferRole>& outputRoles,
                                DeviceBuffer* deviceBuffer) override;
    ndk::ScopedAStatus getCapabilities(Capabilities* capabilities) override;
    ndk::ScopedAStatus getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) override;
    ndk::ScopedAStatus getSupportedExtensions(std::vector<Extension>* extensions) override;
    ndk::ScopedAStatus getSupportedOperations(const Model& model,
                                              std::vector<bool>* supportedOperations) override;
    ndk::ScopedAStatus getType(DeviceType* deviceType) override;
    ndk::ScopedAStatus getVersionString(std::string* versionString) override;
    ndk::ScopedAStatus prepareModel(
            const Model& model, ExecutionPreference preference, Priority priority, int64_t deadline,
            const std::vector<ndk::ScopedFileDescriptor>& modelCache,
            const std::vector<ndk::ScopedFileDescriptor>& dataCache,
            const std::vector<uint8_t>& token,
            const std::shared_ptr<IPreparedModelCallback>& callback) override;
    ndk::ScopedAStatus prepareModelFromCache(
            int64_t deadline, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
            const std::vector<ndk::ScopedFileDescriptor>& dataCache,
            const std::vector<uint8_t>& token,
            const std::shared_ptr<IPreparedModelCallback>& callback) override;

  private:
    const Capabilities kCapabilities;
    const NumberOfCacheFiles kNumberOfCacheFiles;
    const std::vector<Extension> kExtensions;
    const DeviceType kDeviceType;
    const std::string kVersionString;
};

}  // namespace aidl::android::hardware::neuralnetworks
+179 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2021 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "InvalidDevice"

#include "InvalidDevice.h"

#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
#include <android/binder_auto_utils.h>

#include "Conversions.h"
#include "Utils.h"

#include <memory>
#include <string>
#include <utility>
#include <vector>

namespace aidl::android::hardware::neuralnetworks {
namespace {

ndk::ScopedAStatus toAStatus(ErrorStatus errorStatus, const std::string& errorMessage) {
    if (errorStatus == ErrorStatus::NONE) {
        return ndk::ScopedAStatus::ok();
    }
    return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
            static_cast<int32_t>(errorStatus), errorMessage.c_str());
}

}  // namespace

std::shared_ptr<InvalidDevice> InvalidDevice::create() {
    constexpr auto perf = PerformanceInfo{
            .execTime = std::numeric_limits<float>::max(),
            .powerUsage = std::numeric_limits<float>::max(),
    };
    auto capabilities = Capabilities{
            .relaxedFloat32toFloat16PerformanceScalar = perf,
            .relaxedFloat32toFloat16PerformanceTensor = perf,
            .operandPerformance = {},
            .ifPerformance = perf,
            .whilePerformance = perf,
    };
    constexpr auto numberOfCacheFiles = NumberOfCacheFiles{
            .numModelCache = 0,
            .numDataCache = 0,
    };
    std::vector<Extension> extensions{};
    constexpr auto deviceType = DeviceType::OTHER;
    std::string versionString = "invalid";

    return ndk::SharedRefBase::make<InvalidDevice>(std::move(capabilities), numberOfCacheFiles,
                                                   std::move(extensions), deviceType,
                                                   std::move(versionString));
}

InvalidDevice::InvalidDevice(Capabilities capabilities,
                             const NumberOfCacheFiles& numberOfCacheFiles,
                             std::vector<Extension> extensions, DeviceType deviceType,
                             std::string versionString)
    : kCapabilities(std::move(capabilities)),
      kNumberOfCacheFiles(numberOfCacheFiles),
      kExtensions(std::move(extensions)),
      kDeviceType(deviceType),
      kVersionString(std::move(versionString)) {}

ndk::ScopedAStatus InvalidDevice::allocate(
        const BufferDesc& /*desc*/, const std::vector<IPreparedModelParcel>& /*preparedModels*/,
        const std::vector<BufferRole>& /*inputRoles*/,
        const std::vector<BufferRole>& /*outputRoles*/, DeviceBuffer* /*deviceBuffer*/) {
    return toAStatus(ErrorStatus::GENERAL_FAILURE, "InvalidDevice");
}

ndk::ScopedAStatus InvalidDevice::getCapabilities(Capabilities* capabilities) {
    *capabilities = kCapabilities;
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::getNumberOfCacheFilesNeeded(
        NumberOfCacheFiles* numberOfCacheFiles) {
    *numberOfCacheFiles = kNumberOfCacheFiles;
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::getSupportedExtensions(std::vector<Extension>* extensions) {
    *extensions = kExtensions;
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::getSupportedOperations(const Model& model,
                                                         std::vector<bool>* supportedOperations) {
    if (const auto result = utils::validate(model); !result.ok()) {
        return toAStatus(ErrorStatus::INVALID_ARGUMENT, result.error());
    }
    *supportedOperations = std::vector<bool>(model.main.operations.size(), false);
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::getType(DeviceType* deviceType) {
    *deviceType = kDeviceType;
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::getVersionString(std::string* versionString) {
    *versionString = kVersionString;
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::prepareModel(
        const Model& model, ExecutionPreference preference, Priority priority, int64_t deadline,
        const std::vector<ndk::ScopedFileDescriptor>& modelCache,
        const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
        const std::shared_ptr<IPreparedModelCallback>& callback) {
    if (callback.get() == nullptr) {
        return toAStatus(ErrorStatus::INVALID_ARGUMENT,
                         "invalid callback passed to InvalidDevice::prepareModel");
    }
    if (const auto result = utils::validate(model); !result.ok()) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(ErrorStatus::INVALID_ARGUMENT, result.error());
    }
    if (const auto result = utils::validate(preference); !result.ok()) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(ErrorStatus::INVALID_ARGUMENT, result.error());
    }
    if (const auto result = utils::validate(priority); !result.ok()) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(ErrorStatus::INVALID_ARGUMENT, result.error());
    }
    if (deadline < -1) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(ErrorStatus::INVALID_ARGUMENT,
                         "Invalid deadline " + std::to_string(deadline));
    }
    if (modelCache.size() != static_cast<size_t>(kNumberOfCacheFiles.numModelCache)) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(ErrorStatus::INVALID_ARGUMENT,
                         "Invalid modelCache, size = " + std::to_string(modelCache.size()));
    }
    if (dataCache.size() != static_cast<size_t>(kNumberOfCacheFiles.numDataCache)) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(ErrorStatus::INVALID_ARGUMENT,
                         "Invalid modelCache, size = " + std::to_string(dataCache.size()));
    }
    if (token.size() != IDevice::BYTE_SIZE_OF_CACHE_TOKEN) {
        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
        return toAStatus(
                ErrorStatus::INVALID_ARGUMENT,
                "Invalid cache token, size = " + std::to_string(IDevice::BYTE_SIZE_OF_CACHE_TOKEN));
    }
    callback->notify(ErrorStatus::GENERAL_FAILURE, nullptr);
    return ndk::ScopedAStatus::ok();
}

ndk::ScopedAStatus InvalidDevice::prepareModelFromCache(
        int64_t /*deadline*/, const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/,
        const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/,
        const std::vector<uint8_t>& /*token*/,
        const std::shared_ptr<IPreparedModelCallback>& callback) {
    callback->notify(ErrorStatus::GENERAL_FAILURE, nullptr);
    return toAStatus(ErrorStatus::GENERAL_FAILURE, "InvalidDevice");
}

}  // namespace aidl::android::hardware::neuralnetworks
+16 −5
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@

#include "Service.h"

#include <AndroidVersionUtil.h>
#include <android/binder_auto_utils.h>
#include <android/binder_manager.h>
#include <android/binder_process.h>
@@ -35,11 +36,21 @@ nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& instanceName) {
    hal::utils::ResilientDevice::Factory makeDevice =
            [instanceName,
             name = std::move(fullName)](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
        const auto& getService =
                blocking ? AServiceManager_getService : AServiceManager_checkService;
        std::add_pointer_t<AIBinder*(const char*)> getService;
        if (blocking) {
            if (__builtin_available(android __NNAPI_AIDL_MIN_ANDROID_API__, *)) {
                getService = AServiceManager_waitForService;
            } else {
                getService = AServiceManager_getService;
            }
        } else {
            getService = AServiceManager_checkService;
        }

        auto service = IDevice::fromBinder(ndk::SpAIBinder(getService(name.c_str())));
        if (service == nullptr) {
            return NN_ERROR() << (blocking ? "AServiceManager_getService"
            return NN_ERROR()
                   << (blocking ? "AServiceManager_waitForService (or AServiceManager_getService)"
                                : "AServiceManager_checkService")
                   << " returned nullptr";
        }
+1 −1
Original line number Diff line number Diff line
@@ -94,7 +94,7 @@ void NeuralNetworksAidlTest::SetUp() {
}

static NamedDevice makeNamedDevice(const std::string& name) {
    ndk::SpAIBinder binder(AServiceManager_getService(name.c_str()));
    ndk::SpAIBinder binder(AServiceManager_waitForService(name.c_str()));
    return {name, IDevice::fromBinder(binder)};
}