Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e16af0a4 authored by Michael Butler's avatar Michael Butler
Browse files

Consolidate NNAPI VTS utility code

This CL does additional NNAPI VTS test cleanup, including consolidating
duplicate functionality. Specifically, this CL:
* consolidates the createPreparedModel function, removing the duplicate
* consolidates the std::out ErrorStatus and DeviceStatus code into Utils
* changes non-null constant pointers to constant references
* removes redudant leading namespace specifiers (V1_0::, ::testing, etc.)
* makes the Valdiation tests free functions
* renames device to kDevice and mTestModel to kTestModel

Bug: N/A
Test: mma
Test: VtsHalNeuralnetworksV1_*TargetTest (with sample-all)
Change-Id: Ic401bb1f1760cc10384ac0d30c0c93409b63a9c7
parent 8bc15b59
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ TEST_F(NeuralnetworksHidlTest, CreateDevice) {}

// status test
TEST_F(NeuralnetworksHidlTest, StatusTest) {
    Return<DeviceStatus> status = device->getStatus();
    Return<DeviceStatus> status = kDevice->getStatus();
    ASSERT_TRUE(status.isOk());
    EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
@@ -33,7 +33,7 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) {
// initialization
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
    Return<void> ret =
            device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
            kDevice->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
                EXPECT_EQ(ErrorStatus::NONE, status);
                EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
                EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
+9 −46
Original line number Diff line number Diff line
@@ -122,9 +122,15 @@ Model createModel(const TestModel& testModel) {

// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
    const Model model = createModel(testModel);
    const Request request = createRequest(testModel);

    // Create IPreparedModel.
    sp<IPreparedModel> preparedModel;
    createPreparedModel(device, model, &preparedModel);
    if (preparedModel == nullptr) return;

    // Launch execution.
    sp<ExecutionCallback> executionCallback = new ExecutionCallback();
    Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
@@ -143,53 +149,10 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}

// Tag for the generated tests
class GeneratedTest : public GeneratedTestBase {
  protected:
    void Execute(const TestModel& testModel) {
        Model model = createModel(testModel);

        // see if service can handle model
        bool fullySupportsModel = false;
        Return<void> supportedCall = device->getSupportedOperations(
                model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
                    ASSERT_EQ(ErrorStatus::NONE, status);
                    ASSERT_NE(0ul, supported.size());
                    fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                     [](bool valid) { return valid; });
                });
        ASSERT_TRUE(supportedCall.isOk());

        // launch prepare model
        sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
        Return<ErrorStatus> prepareLaunchStatus =
                device->prepareModel(model, preparedModelCallback);
        ASSERT_TRUE(prepareLaunchStatus.isOk());
        ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));

        // retrieve prepared model
        preparedModelCallback->wait();
        ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
        sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();

        // early termination if vendor service cannot fully prepare model
        if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
            ASSERT_EQ(nullptr, preparedModel.get());
            LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                         "prepare model that it does not support.";
            std::cout << "[          ]   Early termination of test because vendor service cannot "
                         "prepare model that it does not support."
                      << std::endl;
            GTEST_SKIP();
        }
        EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
        ASSERT_NE(nullptr, preparedModel.get());

        EvaluatePreparedModel(preparedModel, testModel);
    }
};
class GeneratedTest : public GeneratedTestBase {};

TEST_P(GeneratedTest, Test) {
    Execute(*mTestModel);
    Execute(kDevice, kTestModel);
}

INSTANTIATE_GENERATED_TEST(GeneratedTest,
+7 −19
Original line number Diff line number Diff line
@@ -25,32 +25,20 @@ namespace android::hardware::neuralnetworks::V1_0::vts::functional {

class GeneratedTestBase
    : public NeuralnetworksHidlTest,
      public ::testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
      public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
  protected:
    void SetUp() override {
        NeuralnetworksHidlTest::SetUp();
        ASSERT_NE(mTestModel, nullptr);
    }

    const test_helper::TestModel* mTestModel = GetParam().second;
    const test_helper::TestModel& kTestModel = *GetParam().second;
};

#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                        \
    INSTANTIATE_TEST_SUITE_P(                                                                \
            TestGenerated, TestSuite,                                                        \
            ::testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
            testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
            [](const auto& info) { return info.param.first; })

// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
// TODO: Clean up the hierarchy for ValidationTest.
class ValidationTest : public GeneratedTestBase {
  protected:
    void validateEverything(const Model& model, const Request& request);

  private:
    void validateModel(const Model& model);
    void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
};
class ValidationTest : public GeneratedTestBase {};

Model createModel(const ::test_helper::TestModel& testModel);

+13 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <hidlmemory/mapping.h>

#include <algorithm>
#include <iostream>
#include <vector>

namespace android::hardware::neuralnetworks {
@@ -117,3 +118,15 @@ std::vector<TestBuffer> getOutputBuffers(const Request& request) {
}

}  // namespace android::hardware::neuralnetworks

namespace android::hardware::neuralnetworks::V1_0 {

::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
    return os << toString(errorStatus);
}

::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
    return os << toString(deviceStatus);
}

}  // namespace android::hardware::neuralnetworks::V1_0
+21 −23
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ using implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////

static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
                                           const V1_0::Model& model) {
                                           const Model& model) {
    SCOPED_TRACE(message + " [getSupportedOperations]");

    Return<void> ret =
@@ -38,7 +38,7 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
}

static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
                                 const V1_0::Model& model) {
                                 const Model& model) {
    SCOPED_TRACE(message + " [prepareModel]");

    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
@@ -57,7 +57,7 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
// mutation to it to invalidate the model, then pass it to interface calls that
// use the model. Note that the model here is passed by value, and any mutation
// to the model does not leave this function.
static void validate(const sp<IDevice>& device, const std::string& message, V1_0::Model model,
static void validate(const sp<IDevice>& device, const std::string& message, Model model,
                     const std::function<void(Model*)>& mutation) {
    mutation(&model);
    validateGetSupportedOperations(device, message, model);
@@ -113,7 +113,7 @@ static const int32_t invalidOperandTypes[] = {
        static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,      // upper bound OEM
};

static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operand = 0; operand < model.operands.size(); ++operand) {
        for (int32_t invalidOperandType : invalidOperandTypes) {
            const std::string message = "mutateOperandTypeTest: operand " +
@@ -143,7 +143,7 @@ static uint32_t getInvalidRank(OperandType type) {
    }
}

static void mutateOperandRankTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operand = 0; operand < model.operands.size(); ++operand) {
        const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
        const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
@@ -172,7 +172,7 @@ static float getInvalidScale(OperandType type) {
    }
}

static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operand = 0; operand < model.operands.size(); ++operand) {
        const float invalidScale = getInvalidScale(model.operands[operand].type);
        const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
@@ -200,7 +200,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
    }
}

static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operand = 0; operand < model.operands.size(); ++operand) {
        const std::vector<int32_t> invalidZeroPoints =
                getInvalidZeroPoints(model.operands[operand].type);
@@ -257,7 +257,7 @@ static void mutateOperand(Operand* operand, OperandType type) {
    *operand = newOperand;
}

static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) {
static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) {
    // LSH_PROJECTION's second argument is allowed to have any type. This is the
    // only operation that currently has a type that can be anything independent
    // from any other type. Changing the operand type to any other type will
@@ -271,7 +271,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& mo
    return false;
}

static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operand = 0; operand < model.operands.size(); ++operand) {
        if (mutateOperationOperandTypeSkip(operand, model)) {
            continue;
@@ -302,7 +302,7 @@ static const int32_t invalidOperationTypes[] = {
        static_cast<int32_t>(OperationType::OEM_OPERATION) + 1,  // upper bound OEM
};

static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        for (int32_t invalidOperationType : invalidOperationTypes) {
            const std::string message = "mutateOperationTypeTest: operation " +
@@ -318,8 +318,7 @@ static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model

///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////

static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
                                                 const V1_0::Model& model) {
static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        const uint32_t invalidOperand = model.operands.size();
        for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
@@ -335,8 +334,7 @@ static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,

///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////

static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
                                                  const V1_0::Model& model) {
static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        const uint32_t invalidOperand = model.operands.size();
        for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
@@ -374,7 +372,7 @@ static void removeOperand(Model* model, uint32_t index) {
    removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
}

static void removeOperandTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operand = 0; operand < model.operands.size(); ++operand) {
        const std::string message = "removeOperandTest: operand " + std::to_string(operand);
        validate(device, message, model,
@@ -391,7 +389,7 @@ static void removeOperation(Model* model, uint32_t index) {
    hidl_vec_removeAt(&model->operations, index);
}

static void removeOperationTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        const std::string message = "removeOperationTest: operation " + std::to_string(operation);
        validate(device, message, model,
@@ -401,14 +399,14 @@ static void removeOperationTest(const sp<IDevice>& device, const V1_0::Model& mo

///////////////////////// REMOVE OPERATION INPUT /////////////////////////

static void removeOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
            const V1_0::Operation& op = model.operations[operation];
            const Operation& op = model.operations[operation];
            // CONCATENATION has at least 2 inputs, with the last element being
            // INT32. Skip this test if removing one of CONCATENATION's
            // inputs still produces a valid model.
            if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 &&
            if (op.type == OperationType::CONCATENATION && op.inputs.size() > 2 &&
                input != op.inputs.size() - 1) {
                continue;
            }
@@ -426,7 +424,7 @@ static void removeOperationInputTest(const sp<IDevice>& device, const V1_0::Mode

///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////

static void removeOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
            const std::string message = "removeOperationOutputTest: operation " +
@@ -447,7 +445,7 @@ static void removeOperationOutputTest(const sp<IDevice>& device, const V1_0::Mod

///////////////////////// ADD OPERATION INPUT /////////////////////////

static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
        validate(device, message, model, [operation](Model* model) {
@@ -460,7 +458,7 @@ static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model&

///////////////////////// ADD OPERATION OUTPUT /////////////////////////

static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
    for (size_t operation = 0; operation < model.operations.size(); ++operation) {
        const std::string message =
                "addOperationOutputTest: operation " + std::to_string(operation);
@@ -474,7 +472,7 @@ static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model&

////////////////////////// ENTRY POINT //////////////////////////////

void ValidationTest::validateModel(const V1_0::Model& model) {
void validateModel(const sp<IDevice>& device, const Model& model) {
    mutateOperandTypeTest(device, model);
    mutateOperandRankTest(device, model);
    mutateOperandScaleTest(device, model);
Loading