Loading neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +4 −6 Original line number Original line Diff line number Diff line Loading @@ -45,13 +45,10 @@ using ::test_helper::bool8; using ::test_helper::compare; using ::test_helper::compare; using ::test_helper::expectMultinomialDistributionWithinTolerance; using ::test_helper::expectMultinomialDistributionWithinTolerance; using ::test_helper::filter; using ::test_helper::filter; using ::test_helper::Float32Operands; using ::test_helper::for_all; using ::test_helper::for_all; using ::test_helper::for_each; using ::test_helper::for_each; using ::test_helper::Int32Operands; using ::test_helper::MixedTyped; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; using ::test_helper::MixedTypedExample; using ::test_helper::Quant8Operands; using ::test_helper::resize_accordingly; using ::test_helper::resize_accordingly; template <typename T> template <typename T> Loading @@ -67,12 +64,13 @@ void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArg void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { copy_back_(&dst->float32Operands, ra, src); copy_back_(&dst->float32Operands, ra, src); copy_back_(&dst->int32Operands, ra, src); copy_back_(&dst->int32Operands, ra, src); copy_back_(&dst->quant8Operands, ra, src); copy_back_(&dst->quant8AsymmOperands, ra, src); copy_back_(&dst->quant16Operands, ra, src); copy_back_(&dst->quant16SymmOperands, ra, src); copy_back_(&dst->float16Operands, ra, src); copy_back_(&dst->float16Operands, ra, src); copy_back_(&dst->bool8Operands, ra, src); copy_back_(&dst->bool8Operands, ra, src); copy_back_(&dst->quant8ChannelOperands, ra, src); copy_back_(&dst->quant8ChannelOperands, ra, src); static_assert(7 == MixedTyped::kNumTypes, copy_back_(&dst->quant16AsymmOperands, ra, src); static_assert(8 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but copy_back function wasn't updated"); "Number of types in MixedTyped changed, but copy_back function wasn't updated"); } } Loading neuralnetworks/1.2/types.hal +13 −1 Original line number Original line Diff line number Diff line Loading @@ -76,6 +76,18 @@ enum OperandType : @1.0::OperandType { * where C is an index in the Channel dimension. * where C is an index in the Channel dimension. */ */ TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, /** * A tensor of 16 bit unsigned integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert the * 16 bit integer to the real value and vice versa. These two numbers are: * - scale: a 32 bit floating point value greater than zero. * - zeroPoint: a 32 bit integer, in range [0, 65535]. * * The formula is: * real_value = (integer_value - zeroPoint) * scale. */ TENSOR_QUANT16_ASYMM = 12, /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX. * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX. */ */ Loading @@ -89,7 +101,7 @@ enum OperandType : @1.0::OperandType { */ */ enum OperandTypeRange : uint32_t { enum OperandTypeRange : uint32_t { OPERAND_FUNDAMENTAL_MIN = 0, OPERAND_FUNDAMENTAL_MIN = 0, OPERAND_FUNDAMENTAL_MAX = 11, OPERAND_FUNDAMENTAL_MAX = 12, OPERAND_OEM_MIN = 10000, OPERAND_OEM_MIN = 10000, OPERAND_OEM_MAX = 10001, OPERAND_OEM_MAX = 10001, }; }; Loading neuralnetworks/1.2/vts/functional/ValidateModel.cpp +5 −0 Original line number Original line Diff line number Diff line Loading @@ -161,6 +161,7 @@ static uint32_t getInvalidRank(OperandType type) { case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_INT32: case OperandType::TENSOR_INT32: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: return 0; return 0; Loading Loading @@ -199,6 +200,7 @@ static float getInvalidScale(OperandType type) { case OperandType::TENSOR_INT32: case OperandType::TENSOR_INT32: return -1.0f; return -1.0f; case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: return 0.0f; return 0.0f; default: default: Loading Loading @@ -233,6 +235,8 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { return {1}; return {1}; case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: return {-1, 256}; return {-1, 256}; case OperandType::TENSOR_QUANT16_ASYMM: return {-1, 65536}; case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: return {-32769, -1, 1, 32768}; return {-32769, -1, 1, 32768}; default: default: Loading Loading @@ -288,6 +292,7 @@ static void mutateOperand(Operand* operand, OperandType type) { newOperand.zeroPoint = 0; newOperand.zeroPoint = 0; break; break; case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: newOperand.dimensions = newOperand.dimensions = operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); Loading Loading
neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +4 −6 Original line number Original line Diff line number Diff line Loading @@ -45,13 +45,10 @@ using ::test_helper::bool8; using ::test_helper::compare; using ::test_helper::compare; using ::test_helper::expectMultinomialDistributionWithinTolerance; using ::test_helper::expectMultinomialDistributionWithinTolerance; using ::test_helper::filter; using ::test_helper::filter; using ::test_helper::Float32Operands; using ::test_helper::for_all; using ::test_helper::for_all; using ::test_helper::for_each; using ::test_helper::for_each; using ::test_helper::Int32Operands; using ::test_helper::MixedTyped; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; using ::test_helper::MixedTypedExample; using ::test_helper::Quant8Operands; using ::test_helper::resize_accordingly; using ::test_helper::resize_accordingly; template <typename T> template <typename T> Loading @@ -67,12 +64,13 @@ void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArg void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { copy_back_(&dst->float32Operands, ra, src); copy_back_(&dst->float32Operands, ra, src); copy_back_(&dst->int32Operands, ra, src); copy_back_(&dst->int32Operands, ra, src); copy_back_(&dst->quant8Operands, ra, src); copy_back_(&dst->quant8AsymmOperands, ra, src); copy_back_(&dst->quant16Operands, ra, src); copy_back_(&dst->quant16SymmOperands, ra, src); copy_back_(&dst->float16Operands, ra, src); copy_back_(&dst->float16Operands, ra, src); copy_back_(&dst->bool8Operands, ra, src); copy_back_(&dst->bool8Operands, ra, src); copy_back_(&dst->quant8ChannelOperands, ra, src); copy_back_(&dst->quant8ChannelOperands, ra, src); static_assert(7 == MixedTyped::kNumTypes, copy_back_(&dst->quant16AsymmOperands, ra, src); static_assert(8 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but copy_back function wasn't updated"); "Number of types in MixedTyped changed, but copy_back function wasn't updated"); } } Loading
neuralnetworks/1.2/types.hal +13 −1 Original line number Original line Diff line number Diff line Loading @@ -76,6 +76,18 @@ enum OperandType : @1.0::OperandType { * where C is an index in the Channel dimension. * where C is an index in the Channel dimension. */ */ TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, /** * A tensor of 16 bit unsigned integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert the * 16 bit integer to the real value and vice versa. These two numbers are: * - scale: a 32 bit floating point value greater than zero. * - zeroPoint: a 32 bit integer, in range [0, 65535]. * * The formula is: * real_value = (integer_value - zeroPoint) * scale. */ TENSOR_QUANT16_ASYMM = 12, /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX. * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX. */ */ Loading @@ -89,7 +101,7 @@ enum OperandType : @1.0::OperandType { */ */ enum OperandTypeRange : uint32_t { enum OperandTypeRange : uint32_t { OPERAND_FUNDAMENTAL_MIN = 0, OPERAND_FUNDAMENTAL_MIN = 0, OPERAND_FUNDAMENTAL_MAX = 11, OPERAND_FUNDAMENTAL_MAX = 12, OPERAND_OEM_MIN = 10000, OPERAND_OEM_MIN = 10000, OPERAND_OEM_MAX = 10001, OPERAND_OEM_MAX = 10001, }; }; Loading
neuralnetworks/1.2/vts/functional/ValidateModel.cpp +5 −0 Original line number Original line Diff line number Diff line Loading @@ -161,6 +161,7 @@ static uint32_t getInvalidRank(OperandType type) { case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_INT32: case OperandType::TENSOR_INT32: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: return 0; return 0; Loading Loading @@ -199,6 +200,7 @@ static float getInvalidScale(OperandType type) { case OperandType::TENSOR_INT32: case OperandType::TENSOR_INT32: return -1.0f; return -1.0f; case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: return 0.0f; return 0.0f; default: default: Loading Loading @@ -233,6 +235,8 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { return {1}; return {1}; case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: return {-1, 256}; return {-1, 256}; case OperandType::TENSOR_QUANT16_ASYMM: return {-1, 65536}; case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: return {-32769, -1, 1, 32768}; return {-32769, -1, 1, 32768}; default: default: Loading Loading @@ -288,6 +292,7 @@ static void mutateOperand(Operand* operand, OperandType type) { newOperand.zeroPoint = 0; newOperand.zeroPoint = 0; break; break; case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT16_SYMM: newOperand.dimensions = newOperand.dimensions = operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); Loading