Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9148ddf3 authored by Slava Shklyaev's avatar Slava Shklyaev Committed by android-build-merger
Browse files

Ensure that drivers return an error if STRIDED_SLICE parameters are invalid...

Ensure that drivers return an error if STRIDED_SLICE parameters are invalid am: 95a59789 am: 87dff647
am: a29dd239

Change-Id: I0fee3a90355e97f3387290453c8a45fbb6abc8f1
parents badb2128 a29dd239
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -44,6 +44,21 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
// in frameworks/ml/nn/runtime/tests/generated/
#include "all_generated_V1_2_vts_tests.cpp"

// Generated from spec/strided_slice_invalid_output_dims.mod.py.
// TODO(b/132155416): Make this part of all_generated_V1_2_vts_tests.cpp.
namespace strided_slice_invalid_output_dims {
#include "generated/strided_slice_invalid_output_dims.example.cpp"
#include "generated/strided_slice_invalid_output_dims.model.cpp"
}  // namespace strided_slice_invalid_output_dims

// TODO(b/132155416): Make this part of all_generated_V1_2_vts_tests.cpp.
TEST_F(ValidationTest, strided_slice_invalid_output_dims) {
    const Model model = strided_slice_invalid_output_dims::createTestModel();
    const std::vector<Request> requests =
            createRequests(strided_slice_invalid_output_dims::get_examples());
    validateFailure(model, requests);
}

}  // namespace functional
}  // namespace vts
}  // namespace V1_2
+16 −0
Original line number Diff line number Diff line
@@ -274,6 +274,22 @@ void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
    }
}

void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
                                            const std::vector<Request>& requests) {
    for (const Request& request : requests) {
        SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
        Return<void> executeStatus = preparedModel->executeSynchronously(
                request, MeasureTiming::NO,
                [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
                   const Timing& timing) {
                    ASSERT_NE(ErrorStatus::NONE, error);
                    EXPECT_EQ(outputShapes.size(), 0);
                    EXPECT_TRUE(badTiming(timing));
                });
        ASSERT_TRUE(executeStatus.isOk());
    }
}

}  // namespace functional
}  // namespace vts
}  // namespace V1_2
+14 −0
Original line number Diff line number Diff line
@@ -140,6 +140,20 @@ void ValidationTest::validateEverything(const Model& model, const std::vector<Re
    validateBurst(preparedModel, requests);
}

void ValidationTest::validateFailure(const Model& model, const std::vector<Request>& requests) {
    // TODO: Should this always succeed?
    //       What if the invalid input is part of the model (i.e., a parameter).
    validateModel(model);

    sp<IPreparedModel> preparedModel;
    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
    if (preparedModel == nullptr) {
        return;
    }

    validateRequestFailure(preparedModel, requests);
}

sp<IPreparedModel> getPreparedModel_1_2(
    const sp<V1_2::implementation::PreparedModelCallback>& callback) {
    sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
+3 −0
Original line number Diff line number Diff line
@@ -73,11 +73,14 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
class ValidationTest : public NeuralnetworksHidlTest {
   protected:
     void validateEverything(const Model& model, const std::vector<Request>& requests);
     void validateFailure(const Model& model, const std::vector<Request>& requests);

   private:
     void validateModel(const Model& model);
     void validateRequests(const sp<IPreparedModel>& preparedModel,
                           const std::vector<Request>& requests);
     void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
                                 const std::vector<Request>& requests);
     void validateBurst(const sp<IPreparedModel>& preparedModel,
                        const std::vector<Request>& requests);
};
+116 −0
Original line number Diff line number Diff line
// clang-format off
// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
std::vector<MixedTypedExample>& get_examples() {
static std::vector<MixedTypedExample> examples = {
// Begin of an example
{
.operands = {
//Input(s)
{ // See tools/test_generator/include/TestHarness.h:MixedTyped
  // int -> Dimensions map
  .operandDimensions = {{0, {2, 3}}},
  // int -> FLOAT32 map
  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
  // int -> INT32 map
  .int32Operands = {},
  // int -> QUANT8_ASYMM map
  .quant8AsymmOperands = {},
  // int -> QUANT16_SYMM map
  .quant16SymmOperands = {},
  // int -> FLOAT16 map
  .float16Operands = {},
  // int -> BOOL8 map
  .bool8Operands = {},
  // int -> QUANT8_SYMM_PER_CHANNEL map
  .quant8ChannelOperands = {},
  // int -> QUANT16_ASYMM map
  .quant16AsymmOperands = {},
  // int -> QUANT8_SYMM map
  .quant8SymmOperands = {},
},
//Output(s)
{ // See tools/test_generator/include/TestHarness.h:MixedTyped
  // int -> Dimensions map
  .operandDimensions = {{0, {3}}},
  // int -> FLOAT32 map
  .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}},
  // int -> INT32 map
  .int32Operands = {},
  // int -> QUANT8_ASYMM map
  .quant8AsymmOperands = {},
  // int -> QUANT16_SYMM map
  .quant16SymmOperands = {},
  // int -> FLOAT16 map
  .float16Operands = {},
  // int -> BOOL8 map
  .bool8Operands = {},
  // int -> QUANT8_SYMM_PER_CHANNEL map
  .quant8ChannelOperands = {},
  // int -> QUANT16_ASYMM map
  .quant16AsymmOperands = {},
  // int -> QUANT8_SYMM map
  .quant8SymmOperands = {},
}
},
}, // End of an example
};
return examples;
};

std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
// Begin of an example
{
.operands = {
//Input(s)
{ // See tools/test_generator/include/TestHarness.h:MixedTyped
  // int -> Dimensions map
  .operandDimensions = {{0, {2, 3}}},
  // int -> FLOAT32 map
  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
  // int -> INT32 map
  .int32Operands = {},
  // int -> QUANT8_ASYMM map
  .quant8AsymmOperands = {},
  // int -> QUANT16_SYMM map
  .quant16SymmOperands = {},
  // int -> FLOAT16 map
  .float16Operands = {},
  // int -> BOOL8 map
  .bool8Operands = {},
  // int -> QUANT8_SYMM_PER_CHANNEL map
  .quant8ChannelOperands = {},
  // int -> QUANT16_ASYMM map
  .quant16AsymmOperands = {},
  // int -> QUANT8_SYMM map
  .quant8SymmOperands = {},
},
//Output(s)
{ // See tools/test_generator/include/TestHarness.h:MixedTyped
  // int -> Dimensions map
  .operandDimensions = {{0, {3}}},
  // int -> FLOAT32 map
  .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}},
  // int -> INT32 map
  .int32Operands = {},
  // int -> QUANT8_ASYMM map
  .quant8AsymmOperands = {},
  // int -> QUANT16_SYMM map
  .quant16SymmOperands = {},
  // int -> FLOAT16 map
  .float16Operands = {},
  // int -> BOOL8 map
  .bool8Operands = {},
  // int -> QUANT8_SYMM_PER_CHANNEL map
  .quant8ChannelOperands = {},
  // int -> QUANT16_ASYMM map
  .quant16AsymmOperands = {},
  // int -> QUANT8_SYMM map
  .quant8SymmOperands = {},
}
},
}, // End of an example
};
return examples_dynamic_output_shape;
};
Loading