Loading neuralnetworks/1.2/vts/functional/ValidateModel.cpp +7 −0 Original line number Diff line number Diff line Loading @@ -326,6 +326,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL switch (operation.type) { case OperationType::LSH_PROJECTION: { if (operand == operation.inputs[1]) { Loading @@ -345,6 +346,12 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con return true; } } break; case OperationType::CONV_2D: { if (operand == 1 && (type == OperandType::TENSOR_QUANT8_ASYMM || type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { return true; } } break; default: break; } Loading Loading
neuralnetworks/1.2/vts/functional/ValidateModel.cpp +7 −0 Original line number Diff line number Diff line Loading @@ -326,6 +326,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL switch (operation.type) { case OperationType::LSH_PROJECTION: { if (operand == operation.inputs[1]) { Loading @@ -345,6 +346,12 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con return true; } } break; case OperationType::CONV_2D: { if (operand == 1 && (type == OperandType::TENSOR_QUANT8_ASYMM || type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { return true; } } break; default: break; } Loading