Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 76f5263d authored by Slava Shklyaev's avatar Slava Shklyaev Committed by Gerrit Code Review
Browse files

Merge changes from topic "quant_coupling"

* changes:
  Add QUANT8_ASYMM_SIGNED support to SELECT op
  Add quantization coupling test
  Combine test parameters into TestConfig structure
parents ddad94ab 3683c785
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -578,7 +578,7 @@ f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardwar
9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
71c0f7127335e5b74d1615d5e7f129831b43ffbae5318ad0924d7d8d8910a859 android.hardware.neuralnetworks@1.2::types
72de91c3feba4b19c159cd1c413cbea596b78240caa43e31194e20e6f5b05c49 android.hardware.neuralnetworks@1.2::types
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -597,7 +597,7 @@ adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardwar
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
c511b1427b1c3f76af90967bbddaaf250db983a8d3abb9ff189fb5a807cf3d4d android.hardware.neuralnetworks@1.3::types
554baa3b317e077b850afcbaac99daeef56861b1786540e56275a4fcad1f43e3 android.hardware.neuralnetworks@1.3::types
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
+6 −5
Original line number Diff line number Diff line
@@ -2448,15 +2448,17 @@ enum OperationType : int32_t {
     *       then clipping is disabled.
     *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
     *       this scalar must be of the type {@link OperandType::FLOAT32},
     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
     *       this scalar must be of type {@link OperandType::FLOAT16}.
     *       otherwise if all the input tensors have the type
     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
     *       of type {@link OperandType::FLOAT16}.
     * * 50: The clipping threshold for the output from the
     *       projection layer, such that values are bound within
     *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
     *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
     *       this scalar must be of the type {@link OperandType::FLOAT32},
     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
     *       this scalar must be of type {@link OperandType::FLOAT16}.
     *       otherwise if all the input tensors have the type
     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
     *       of type {@link OperandType::FLOAT16}.
     * * 51: merge_outputs
     *       An {@link OperandType::BOOL} scalar specifying if the outputs
     *       from forward and backward cells should be merged.
@@ -4124,7 +4126,6 @@ enum OperationType : int32_t {
     * * 0: A tensor of the same type and shape as input1 and input2.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
     *
     */
    SELECT = 84,

+46 −49
Original line number Diff line number Diff line
@@ -58,8 +58,20 @@ using V1_0::Request;
using V1_1::ExecutionPreference;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;

namespace {

enum class Executor { ASYNC, SYNC, BURST };

enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };

struct TestConfig {
    Executor executor;
    MeasureTiming measureTiming;
    OutputType outputType;
};

}  // namespace

Model createModel(const TestModel& testModel) {
    // Model operands.
    hidl_vec<Operand> operands(testModel.operands.size());
@@ -194,31 +206,31 @@ static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
    return android::nn::ExecutionBurstController::create(preparedModel,
                                                         std::chrono::microseconds{0});
}
enum class Executor { ASYNC, SYNC, BURST };

void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
                           Executor executor, MeasureTiming measure, OutputType outputType) {
                           const TestConfig& testConfig) {
    // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
    if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
    if (testConfig.outputType == OutputType::INSUFFICIENT &&
        !isOutputSizeGreaterThanOne(testModel, 0)) {
        return;
    }

    Request request = createRequest(testModel);
    if (outputType == OutputType::INSUFFICIENT) {
    if (testConfig.outputType == OutputType::INSUFFICIENT) {
        makeOutputInsufficientSize(/*outputIndex=*/0, &request);
    }

    ErrorStatus executionStatus;
    hidl_vec<OutputShape> outputShapes;
    Timing timing;
    switch (executor) {
    switch (testConfig.executor) {
        case Executor::ASYNC: {
            SCOPED_TRACE("asynchronous");

            // launch execution
            sp<ExecutionCallback> executionCallback = new ExecutionCallback();
            Return<ErrorStatus> executionLaunchStatus =
                    ExecutePreparedModel(preparedModel, request, measure, executionCallback);
            Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
                    preparedModel, request, testConfig.measureTiming, executionCallback);
            ASSERT_TRUE(executionLaunchStatus.isOk());
            EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));

@@ -234,8 +246,8 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
            SCOPED_TRACE("synchronous");

            // execute
            Return<ErrorStatus> executionReturnStatus =
                    ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
            Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
                    preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
            ASSERT_TRUE(executionReturnStatus.isOk());
            executionStatus = static_cast<ErrorStatus>(executionReturnStatus);

@@ -258,14 +270,14 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
            // execute burst
            int n;
            std::tie(n, outputShapes, timing, std::ignore) =
                    controller->compute(request, measure, keys);
                    controller->compute(request, testConfig.measureTiming, keys);
            executionStatus = nn::convertResultCodeToErrorStatus(n);

            break;
        }
    }

    if (outputType != OutputType::FULLY_SPECIFIED &&
    if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
        executionStatus == ErrorStatus::GENERAL_FAILURE) {
        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                     "execute model that it does not support.";
@@ -274,7 +286,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
                  << std::endl;
        GTEST_SKIP();
    }
    if (measure == MeasureTiming::NO) {
    if (testConfig.measureTiming == MeasureTiming::NO) {
        EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
        EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
    } else {
@@ -283,7 +295,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
        }
    }

    switch (outputType) {
    switch (testConfig.outputType) {
        case OutputType::FULLY_SPECIFIED:
            // If the model output operands are fully specified, outputShapes must be either
            // either empty, or have the same number of elements as the number of outputs.
@@ -321,44 +333,29 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo

void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
                           bool testDynamicOutputShape) {
    std::initializer_list<OutputType> outputTypesList;
    std::initializer_list<MeasureTiming> measureTimingList;
    std::initializer_list<Executor> executorList;

    if (testDynamicOutputShape) {
        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
                              OutputType::UNSPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
                              OutputType::UNSPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
                              OutputType::UNSPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
                              OutputType::UNSPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
                              OutputType::UNSPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
                              OutputType::UNSPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
                              OutputType::INSUFFICIENT);
        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
                              OutputType::INSUFFICIENT);
        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
                              OutputType::INSUFFICIENT);
        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
                              OutputType::INSUFFICIENT);
        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
                              OutputType::INSUFFICIENT);
        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
                              OutputType::INSUFFICIENT);
        outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
        measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
        executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
    } else {
        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
                              OutputType::FULLY_SPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
                              OutputType::FULLY_SPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
                              OutputType::FULLY_SPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
                              OutputType::FULLY_SPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
                              OutputType::FULLY_SPECIFIED);
        EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
                              OutputType::FULLY_SPECIFIED);
        outputTypesList = {OutputType::FULLY_SPECIFIED};
        measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
        executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
    }

    for (const OutputType outputType : outputTypesList) {
        for (const MeasureTiming measureTiming : measureTimingList) {
            for (const Executor executor : executorList) {
                const TestConfig testConfig = {.executor = executor,
                                               .measureTiming = measureTiming,
                                               .outputType = outputType};
                EvaluatePreparedModel(preparedModel, testModel, testConfig);
            }
        }
    }
}

+9 −6
Original line number Diff line number Diff line
@@ -2375,15 +2375,17 @@ enum OperationType : int32_t {
     *       then clipping is disabled.
     *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
     *       this scalar must be of the type {@link OperandType::FLOAT32},
     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
     *       this scalar must be of type {@link OperandType::FLOAT16}.
     *       otherwise if all the input tensors have the type
     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
     *       of type {@link OperandType::FLOAT16}.
     * * 50: The clipping threshold for the output from the
     *       projection layer, such that values are bound within
     *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
     *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
     *       this scalar must be of the type {@link OperandType::FLOAT32},
     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
     *       this scalar must be of type {@link OperandType::FLOAT16}.
     *       otherwise if all the input tensors have the type
     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
     *       of type {@link OperandType::FLOAT16}.
     * * 51: merge_outputs
     *       An {@link OperandType::BOOL} scalar specifying if the outputs
     *       from forward and backward cells should be merged.
@@ -4034,6 +4036,7 @@ enum OperationType : int32_t {
     * * {@link OperandType::TENSOR_FLOAT32}
     * * {@link OperandType::TENSOR_INT32}
     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
     * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
     *
     * Supported tensor rank: from 1
     *
@@ -4044,14 +4047,14 @@ enum OperationType : int32_t {
     *      true) or input2 (if false).
     * * 1: An input tensor of the same shape as input0.
     * * 2: An input tensor of the same shape and type as input1.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM}
     *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
     *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
     *
     * Outputs:
     * * 0: A tensor of the same type and shape as input1 and input2.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
     *
     */
    SELECT = @1.2::OperationType:SELECT,

+14 −28
Original line number Diff line number Diff line
@@ -456,8 +456,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
    }

    // Execute and verify results.
    EvaluatePreparedModel(preparedModel, testModel,
                          /*testDynamicOutputShape=*/false);
    EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}

TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
@@ -519,8 +518,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
    }

    // Execute and verify results.
    EvaluatePreparedModel(preparedModel, testModel,
                          /*testDynamicOutputShape=*/false);
    EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}

TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
@@ -541,8 +539,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -566,8 +563,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -590,8 +586,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -615,8 +610,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -727,8 +721,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -752,8 +745,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -776,8 +768,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -801,8 +792,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -914,8 +904,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -937,8 +926,7 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
        saveModelToCache(model, modelCache, dataCache, &preparedModel);
        ASSERT_NE(preparedModel, nullptr);
        // Execute and verify results.
        EvaluatePreparedModel(preparedModel, testModel,
                              /*testDynamicOutputShape=*/false);
        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
        // Check if prepareModelFromCache fails.
        preparedModel = nullptr;
        ErrorStatus status;
@@ -1082,8 +1070,7 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
                ASSERT_EQ(preparedModel, nullptr);
            } else {
                ASSERT_NE(preparedModel, nullptr);
                EvaluatePreparedModel(preparedModel, testModelAdd,
                                      /*testDynamicOutputShape=*/false);
                EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
            }
        }
    }
@@ -1144,8 +1131,7 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
                ASSERT_EQ(preparedModel, nullptr);
            } else {
                ASSERT_NE(preparedModel, nullptr);
                EvaluatePreparedModel(preparedModel, testModelAdd,
                                      /*testDynamicOutputShape=*/false);
                EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
            }
        }
    }
Loading