Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e84f442c authored by Michael Butler's avatar Michael Butler Committed by Android (Google) Code Review
Browse files

Merge "Follow up CL to "Add validation tests for NNAPI Burst serialized format"" into qt-dev

parents 881e261f 0a1ad962
Loading
Loading
Loading
Loading
+1 −56
Original line number Diff line number Diff line
@@ -34,7 +34,6 @@ namespace vts {
namespace functional {

using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -42,53 +41,6 @@ using test_helper::MixedTypedExample;

///////////////////////// UTILITY FUNCTIONS /////////////////////////

static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
                                sp<IPreparedModel>* preparedModel) {
    ASSERT_NE(nullptr, preparedModel);

    // see if service can handle model
    bool fullySupportsModel = false;
    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
            ASSERT_EQ(ErrorStatus::NONE, status);
            ASSERT_NE(0ul, supported.size());
            fullySupportsModel =
                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
        });
    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());

    // launch prepare model
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());
    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());
    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    *preparedModel = preparedModelCallback->getPreparedModel();

    // The getSupportedOperations call returns a list of operations that are
    // guaranteed not to fail if prepareModel is called, and
    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
    // If a driver has any doubt that it can prepare an operation, it must
    // return false. So here, if a driver isn't sure if it can support an
    // operation, but reports that it successfully prepared the model, the test
    // can continue.
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
        ASSERT_EQ(nullptr, preparedModel->get());
        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Unable to test Request validation because vendor service "
                     "cannot prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    ASSERT_NE(nullptr, preparedModel->get());
}

// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
// that use the request. Note that the request here is passed by value, and any
@@ -237,15 +189,8 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
    return requests;
}

void ValidationTest::validateRequests(const V1_0::Model& model,
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
                                      const std::vector<Request>& requests) {
    // create IPreparedModel
    sp<IPreparedModel> preparedModel;
    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
    if (preparedModel == nullptr) {
        return;
    }

    // validate each request
    for (const Request& request : requests) {
        removeInputTest(preparedModel, request);
+63 −2
Original line number Diff line number Diff line
@@ -18,6 +18,10 @@

#include "VtsHalNeuralnetworks.h"

#include <android-base/logging.h>

#include "Callbacks.h"

namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -25,6 +29,55 @@ namespace V1_0 {
namespace vts {
namespace functional {

using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;

static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
                                sp<IPreparedModel>* preparedModel) {
    ASSERT_NE(nullptr, preparedModel);

    // see if service can handle model
    bool fullySupportsModel = false;
    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
                ASSERT_EQ(ErrorStatus::NONE, status);
                ASSERT_NE(0ul, supported.size());
                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                 [](bool valid) { return valid; });
            });
    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());

    // launch prepare model
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());
    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());
    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    *preparedModel = preparedModelCallback->getPreparedModel();

    // The getSupportedOperations call returns a list of operations that are
    // guaranteed not to fail if prepareModel is called, and
    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
    // If a driver has any doubt that it can prepare an operation, it must
    // return false. So here, if a driver isn't sure if it can support an
    // operation, but reports that it successfully prepared the model, the test
    // can continue.
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
        ASSERT_EQ(nullptr, preparedModel->get());
        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Unable to test Request validation because vendor service "
                     "cannot prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    ASSERT_NE(nullptr, preparedModel->get());
}

// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}

@@ -68,9 +121,17 @@ void NeuralnetworksHidlTest::TearDown() {
    ::testing::VtsHalHidlTargetTestBase::TearDown();
}

void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
    validateModel(model);
    validateRequests(model, request);

    // create IPreparedModel
    sp<IPreparedModel> preparedModel;
    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
    if (preparedModel == nullptr) {
        return;
    }

    validateRequests(preparedModel, requests);
}

}  // namespace functional
+2 −1
Original line number Diff line number Diff line
@@ -67,7 +67,8 @@ class ValidationTest : public NeuralnetworksHidlTest {

   private:
     void validateModel(const Model& model);
     void validateRequests(const Model& model, const std::vector<Request>& request);
     void validateRequests(const sp<IPreparedModel>& preparedModel,
                           const std::vector<Request>& requests);
};

// Tag for the generated tests
+1 −57
Original line number Diff line number Diff line
@@ -34,7 +34,6 @@ namespace vts {
namespace functional {

using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -42,54 +41,6 @@ using test_helper::MixedTypedExample;

///////////////////////// UTILITY FUNCTIONS /////////////////////////

static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
                                sp<IPreparedModel>* preparedModel) {
    ASSERT_NE(nullptr, preparedModel);

    // see if service can handle model
    bool fullySupportsModel = false;
    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
            ASSERT_EQ(ErrorStatus::NONE, status);
            ASSERT_NE(0ul, supported.size());
            fullySupportsModel =
                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
        });
    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());

    // launch prepare model
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());
    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
        model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());
    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    *preparedModel = preparedModelCallback->getPreparedModel();

    // The getSupportedOperations_1_1 call returns a list of operations that are
    // guaranteed not to fail if prepareModel_1_1 is called, and
    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
    // If a driver has any doubt that it can prepare an operation, it must
    // return false. So here, if a driver isn't sure if it can support an
    // operation, but reports that it successfully prepared the model, the test
    // can continue.
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
        ASSERT_EQ(nullptr, preparedModel->get());
        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Unable to test Request validation because vendor service "
                     "cannot prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    ASSERT_NE(nullptr, preparedModel->get());
}

// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
// that use the request. Note that the request here is passed by value, and any
@@ -238,15 +189,8 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
    return requests;
}

void ValidationTest::validateRequests(const V1_1::Model& model,
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
                                      const std::vector<Request>& requests) {
    // create IPreparedModel
    sp<IPreparedModel> preparedModel;
    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
    if (preparedModel == nullptr) {
        return;
    }

    // validate each request
    for (const Request& request : requests) {
        removeInputTest(preparedModel, request);
+64 −2
Original line number Diff line number Diff line
@@ -18,6 +18,10 @@

#include "VtsHalNeuralnetworks.h"

#include <android-base/logging.h>

#include "Callbacks.h"

namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -25,6 +29,56 @@ namespace V1_1 {
namespace vts {
namespace functional {

using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;

static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
                                sp<IPreparedModel>* preparedModel) {
    ASSERT_NE(nullptr, preparedModel);

    // see if service can handle model
    bool fullySupportsModel = false;
    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
                ASSERT_EQ(ErrorStatus::NONE, status);
                ASSERT_NE(0ul, supported.size());
                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                 [](bool valid) { return valid; });
            });
    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());

    // launch prepare model
    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
    ASSERT_NE(nullptr, preparedModelCallback.get());
    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
            model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
    ASSERT_TRUE(prepareLaunchStatus.isOk());
    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));

    // retrieve prepared model
    preparedModelCallback->wait();
    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
    *preparedModel = preparedModelCallback->getPreparedModel();

    // The getSupportedOperations_1_1 call returns a list of operations that are
    // guaranteed not to fail if prepareModel_1_1 is called, and
    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
    // If a driver has any doubt that it can prepare an operation, it must
    // return false. So here, if a driver isn't sure if it can support an
    // operation, but reports that it successfully prepared the model, the test
    // can continue.
    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
        ASSERT_EQ(nullptr, preparedModel->get());
        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
                     "prepare model that it does not support.";
        std::cout << "[          ]   Unable to test Request validation because vendor service "
                     "cannot prepare model that it does not support."
                  << std::endl;
        return;
    }
    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
    ASSERT_NE(nullptr, preparedModel->get());
}

// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}

@@ -68,9 +122,17 @@ void NeuralnetworksHidlTest::TearDown() {
    ::testing::VtsHalHidlTargetTestBase::TearDown();
}

void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
    validateModel(model);
    validateRequests(model, request);

    // create IPreparedModel
    sp<IPreparedModel> preparedModel;
    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
    if (preparedModel == nullptr) {
        return;
    }

    validateRequests(preparedModel, requests);
}

}  // namespace functional
Loading