From ae643ae705144d1eef60671f19d2999218e37b61 Mon Sep 17 00:00:00 2001 From: Lev Proleev Date: Thu, 5 Dec 2019 16:57:30 +0000 Subject: [PATCH] Add TENSOR_QUANT8_ASYMM_SIGNED support for DEQUANTIZE Add TENSOR_QUANT8_ASYMM_SIGNED to the list of exceptions when mutating DEQUANTIZE for validation. Bug: 143934768 Test: VtsHalNeuralnetworksV1_3TargetTest Change-Id: I1b3b0a362d3949d4e31708388100d4794846ca3a Merged-In: I1b3b0a362d3949d4e31708388100d4794846ca3a (cherry picked from commit 2bd0b3339cc278ed283dc10aa0daf438b5540ad9) --- neuralnetworks/1.3/vts/functional/ValidateModel.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 6fd54071f9..65880b7cef 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -323,8 +323,8 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. // - DEQUANTIZE input can be any of - // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can - // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. + // TENSOR_(QUANT8_ASYMM|QUANT8_ASYMM_SIGNED|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), + // output can be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32 // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL @@ -365,6 +365,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con case OperationType::DEQUANTIZE: { if (operand == operation.inputs[0] && (type == OperandType::TENSOR_QUANT8_ASYMM || + type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || type == OperandType::TENSOR_QUANT8_SYMM || type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { return true;