Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee390679 authored by Michael Butler's avatar Michael Butler Committed by Android (Google) Code Review
Browse files

Merge "Add negative tests to the Neuralnetworks HAL" into oc-mr1-dev

parents 61e7b96a 2f499a9b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ cc_test {
    srcs: [
        "Event.cpp",
        "GeneratedTestHarness.cpp",
        "Models.cpp",
        "VtsHalNeuralnetworksV1_0TargetTest.cpp",
    ],
    defaults: ["VtsHalTargetTestDefaults"],
+210 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2017 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "neuralnetworks_hidl_hal_test"

#include "Models.h"
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <vector>

namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {

// create a valid model
Model createValidTestModel() {
    const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
    const uint32_t size = operand2Data.size() * sizeof(float);

    const uint32_t operand1 = 0;
    const uint32_t operand2 = 1;
    const uint32_t operand3 = 2;
    const uint32_t operand4 = 3;

    const std::vector<Operand> operands = {
        {
            .type = OperandType::TENSOR_FLOAT32,
            .dimensions = {1, 2, 2, 1},
            .numberOfConsumers = 1,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::MODEL_INPUT,
            .location = {.poolIndex = 0, .offset = 0, .length = 0},
        },
        {
            .type = OperandType::TENSOR_FLOAT32,
            .dimensions = {1, 2, 2, 1},
            .numberOfConsumers = 1,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::CONSTANT_COPY,
            .location = {.poolIndex = 0, .offset = 0, .length = size},
        },
        {
            .type = OperandType::INT32,
            .dimensions = {},
            .numberOfConsumers = 1,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::CONSTANT_COPY,
            .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
        },
        {
            .type = OperandType::TENSOR_FLOAT32,
            .dimensions = {1, 2, 2, 1},
            .numberOfConsumers = 0,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::MODEL_OUTPUT,
            .location = {.poolIndex = 0, .offset = 0, .length = 0},
        },
    };

    const std::vector<Operation> operations = {{
        .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
        .inputs = {operand1, operand2, operand3},
        .outputs = {operand4},
    }};

    const std::vector<uint32_t> inputIndexes = {operand1};
    const std::vector<uint32_t> outputIndexes = {operand4};
    std::vector<uint8_t> operandValues(
        reinterpret_cast<const uint8_t*>(operand2Data.data()),
        reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
    int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
    operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
                         reinterpret_cast<const uint8_t*>(&activation[1]));

    const std::vector<hidl_memory> pools = {};

    return {
        .operands = operands,
        .operations = operations,
        .inputIndexes = inputIndexes,
        .outputIndexes = outputIndexes,
        .operandValues = operandValues,
        .pools = pools,
    };
}

// create first invalid model
Model createInvalidTestModel1() {
    Model model = createValidTestModel();
    model.operations[0].opTuple = {static_cast<OperationType>(0xDEADBEEF) /* INVALID */,
                                   OperandType::TENSOR_FLOAT32};
    return model;
}

// create second invalid model
Model createInvalidTestModel2() {
    Model model = createValidTestModel();
    const uint32_t operand1 = 0;
    const uint32_t operand5 = 4;  // INVALID OPERAND
    model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
    return model;
}

// allocator helper
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
    hidl_memory memory;

    sp<IAllocator> allocator = IAllocator::getService(type);
    if (!allocator.get()) {
        return {};
    }

    Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
        ASSERT_TRUE(success);
        memory = mem;
    });
    if (!ret.isOk()) {
        return {};
    }

    return memory;
}

// create a valid request
Request createValidTestRequest() {
    std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
    std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
    const uint32_t INPUT = 0;
    const uint32_t OUTPUT = 1;

    // prepare inputs
    uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
    uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
    std::vector<RequestArgument> inputs = {{
        .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
    }};
    std::vector<RequestArgument> outputs = {{
        .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
    }};
    std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
                                      allocateSharedMemory(outputSize)};
    if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
        return {};
    }

    // load data
    sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
    sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
    if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) {
        return {};
    }
    float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
    float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
    if (inputPtr == nullptr || outputPtr == nullptr) {
        return {};
    }
    inputMemory->update();
    outputMemory->update();
    std::copy(inputData.begin(), inputData.end(), inputPtr);
    std::copy(outputData.begin(), outputData.end(), outputPtr);
    inputMemory->commit();
    outputMemory->commit();

    return {.inputs = inputs, .outputs = outputs, .pools = pools};
}

// create first invalid request
Request createInvalidTestRequest1() {
    Request request = createValidTestRequest();
    const uint32_t INVALID = 2;
    std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
    uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
    request.inputs[0].location = {
        .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize};
    return request;
}

// create second invalid request
Request createInvalidTestRequest2() {
    Request request = createValidTestRequest();
    request.inputs[0].dimensions = std::vector<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */);
    return request;
}

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android
+43 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2017 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "neuralnetworks_hidl_hal_test"

#include "VtsHalNeuralnetworksV1_0TargetTest.h"

namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace vts {
namespace functional {

// create the model
Model createValidTestModel();
Model createInvalidTestModel1();
Model createInvalidTestModel2();

// create the request
Request createValidTestRequest();
Request createInvalidTestRequest1();
Request createInvalidTestRequest2();

}  // namespace functional
}  // namespace vts
}  // namespace V1_0
}  // namespace neuralnetworks
}  // namespace hardware
}  // namespace android
+126 −147
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@

#include "VtsHalNeuralnetworksV1_0TargetTest.h"
#include "Event.h"
#include "Models.h"
#include "TestHarness.h"

#include <android-base/logging.h>
@@ -65,6 +66,32 @@ void NeuralnetworksHidlTest::SetUp() {

void NeuralnetworksHidlTest::TearDown() {}

sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut(const Model& model) {
    sp<IPreparedModel> preparedModel;
    ErrorStatus prepareStatus;
    sp<Event> preparationEvent = new Event();
    if (preparationEvent.get() == nullptr) {
        return nullptr;
    }

    Return<void> prepareRet = device->prepareModel(
        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
            prepareStatus = status;
            preparedModel = prepared;
        });

    if (!prepareRet.isOk() || prepareStatus != ErrorStatus::NONE ||
        preparedModel.get() == nullptr) {
        return nullptr;
    }
    Event::Status eventStatus = preparationEvent->wait();
    if (eventStatus != Event::Status::SUCCESS) {
        return nullptr;
    }

    return preparedModel;
}

// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}

@@ -91,185 +118,137 @@ TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
    EXPECT_TRUE(ret.isOk());
}

namespace {
// create the model
Model createTestModel() {
    const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
    const uint32_t size = operand2Data.size() * sizeof(float);

    const uint32_t operand1 = 0;
    const uint32_t operand2 = 1;
    const uint32_t operand3 = 2;
    const uint32_t operand4 = 3;

    const std::vector<Operand> operands = {
        {
            .type = OperandType::TENSOR_FLOAT32,
            .dimensions = {1, 2, 2, 1},
            .numberOfConsumers = 1,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::MODEL_INPUT,
            .location = {.poolIndex = 0, .offset = 0, .length = 0},
        },
        {
            .type = OperandType::TENSOR_FLOAT32,
            .dimensions = {1, 2, 2, 1},
            .numberOfConsumers = 1,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::CONSTANT_COPY,
            .location = {.poolIndex = 0, .offset = 0, .length = size},
        },
        {
            .type = OperandType::INT32,
            .dimensions = {},
            .numberOfConsumers = 1,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::CONSTANT_COPY,
            .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
        },
        {
            .type = OperandType::TENSOR_FLOAT32,
            .dimensions = {1, 2, 2, 1},
            .numberOfConsumers = 0,
            .scale = 0.0f,
            .zeroPoint = 0,
            .lifetime = OperandLifeTime::MODEL_OUTPUT,
            .location = {.poolIndex = 0, .offset = 0, .length = 0},
        },
    };

    const std::vector<Operation> operations = {{
        .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
        .inputs = {operand1, operand2, operand3},
        .outputs = {operand4},
    }};

    const std::vector<uint32_t> inputIndexes = {operand1};
    const std::vector<uint32_t> outputIndexes = {operand4};
    std::vector<uint8_t> operandValues(
        reinterpret_cast<const uint8_t*>(operand2Data.data()),
        reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
    int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
    operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
                         reinterpret_cast<const uint8_t*>(&activation[1]));

    const std::vector<hidl_memory> pools = {};

    return {
        .operands = operands,
        .operations = operations,
        .inputIndexes = inputIndexes,
        .outputIndexes = outputIndexes,
        .operandValues = operandValues,
        .pools = pools,
    };
}
}  // anonymous namespace

// allocator helper
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
    hidl_memory memory;

    sp<IAllocator> allocator = IAllocator::getService(type);
    if (!allocator.get()) {
        return {};
    }

    Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
        ASSERT_TRUE(success);
        memory = mem;
// supported operations positive test
TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
    Model model = createValidTestModel();
    Return<void> ret = device->getSupportedOperations(
        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
            EXPECT_EQ(ErrorStatus::NONE, status);
            EXPECT_EQ(model.operations.size(), supported.size());
        });
    if (!ret.isOk()) {
        return {};
    EXPECT_TRUE(ret.isOk());
}

    return memory;
// supported operations negative test 1
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
    Model model = createInvalidTestModel1();
    Return<void> ret = device->getSupportedOperations(
        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
            (void)supported;
        });
    EXPECT_TRUE(ret.isOk());
}

// supported subgraph test
TEST_F(NeuralnetworksHidlTest, SupportedOperationsTest) {
    Model model = createTestModel();
// supported operations negative test 2
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
    Model model = createInvalidTestModel2();
    Return<void> ret = device->getSupportedOperations(
        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
            EXPECT_EQ(ErrorStatus::NONE, status);
            EXPECT_EQ(model.operations.size(), supported.size());
            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
            (void)supported;
        });
    EXPECT_TRUE(ret.isOk());
}

// execute simple graph
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
    std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
    std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
    std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
    const uint32_t INPUT = 0;
    const uint32_t OUTPUT = 1;

    // prepare request
    Model model = createTestModel();
    sp<IPreparedModel> preparedModel;
// prepare simple model positive test
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
    Model model = createValidTestModel();
    sp<Event> preparationEvent = new Event();
    ASSERT_NE(nullptr, preparationEvent.get());
    Return<void> prepareRet = device->prepareModel(
        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
            EXPECT_EQ(ErrorStatus::NONE, status);
            preparedModel = prepared;
            (void)prepared;
        });
    ASSERT_TRUE(prepareRet.isOk());
}

// prepare simple model negative test 1
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
    Model model = createInvalidTestModel1();
    sp<Event> preparationEvent = new Event();
    ASSERT_NE(nullptr, preparationEvent.get());
    Return<void> prepareRet = device->prepareModel(
        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
            (void)prepared;
        });
    ASSERT_TRUE(prepareRet.isOk());
}

// prepare simple model negative test 2
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
    Model model = createInvalidTestModel2();
    sp<Event> preparationEvent = new Event();
    ASSERT_NE(nullptr, preparationEvent.get());
    Return<void> prepareRet = device->prepareModel(
        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
            (void)prepared;
        });
    ASSERT_TRUE(prepareRet.isOk());
}

// execute simple graph positive test
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
    Model model = createValidTestModel();
    sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
    ASSERT_NE(nullptr, preparedModel.get());
    Event::Status preparationStatus = preparationEvent->wait();
    EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);

    // prepare inputs
    uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
    uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
    std::vector<RequestArgument> inputs = {{
        .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
    }};
    std::vector<RequestArgument> outputs = {{
        .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
    }};
    std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
                                      allocateSharedMemory(outputSize)};
    ASSERT_NE(0ull, pools[INPUT].size());
    ASSERT_NE(0ull, pools[OUTPUT].size());

    // load data
    sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
    sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
    ASSERT_NE(nullptr, inputMemory.get());
    ASSERT_NE(nullptr, outputMemory.get());
    float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
    float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
    ASSERT_NE(nullptr, inputPtr);
    ASSERT_NE(nullptr, outputPtr);
    inputMemory->update();
    outputMemory->update();
    std::copy(inputData.begin(), inputData.end(), inputPtr);
    std::copy(outputData.begin(), outputData.end(), outputPtr);
    inputMemory->commit();
    outputMemory->commit();
    Request request = createValidTestRequest();

    // execute request
    sp<Event> executionEvent = new Event();
    ASSERT_NE(nullptr, executionEvent.get());
    Return<ErrorStatus> executeStatus = preparedModel->execute(
        {.inputs = inputs, .outputs = outputs, .pools = pools}, executionEvent);
    Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
    ASSERT_TRUE(executeStatus.isOk());
    EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
    Event::Status eventStatus = executionEvent->wait();
    EXPECT_EQ(Event::Status::SUCCESS, eventStatus);

    // validate results { 1+5, 2+6, 3+7, 4+8 }
    std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
    std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
    const uint32_t OUTPUT = 1;

    sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
    ASSERT_NE(nullptr, outputMemory.get());
    float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
    ASSERT_NE(nullptr, outputPtr);
    outputMemory->read();
    std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
    outputMemory->commit();
    EXPECT_EQ(expectedData, outputData);
}

// execute simple graph negative test 1
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
    Model model = createValidTestModel();
    sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
    ASSERT_NE(nullptr, preparedModel.get());
    Request request = createInvalidTestRequest1();

    sp<Event> executionEvent = new Event();
    ASSERT_NE(nullptr, executionEvent.get());
    Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
    ASSERT_TRUE(executeStatus.isOk());
    EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
    executionEvent->wait();
}

// execute simple graph negative test 2
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
    Model model = createValidTestModel();
    sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
    ASSERT_NE(nullptr, preparedModel.get());
    Request request = createInvalidTestRequest2();

    sp<Event> executionEvent = new Event();
    ASSERT_NE(nullptr, executionEvent.get());
    Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
    ASSERT_TRUE(executeStatus.isOk());
    EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
    executionEvent->wait();
}

// Mixed-typed examples
typedef MixedTypedExampleType MixedTypedExample;

+2 −0
Original line number Diff line number Diff line
@@ -72,6 +72,8 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
    void SetUp() override;
    void TearDown() override;

    sp<IPreparedModel> doPrepareModelShortcut(const Model& model);

    sp<IDevice> device;
};