From 68c8c174678f0881e4e031711397b6451479e36d Mon Sep 17 00:00:00 2001 From: Lev Proleev Date: Mon, 1 Oct 2018 11:18:31 +0100 Subject: [PATCH] Add TENSOR_QUANT16_ASYMM to operand types Add new OperandType::TENSOR_QUANT16_ASYMM. Add VTS validation for the new type. Bug: 113561892 Test: NeuralNetworksTest_static Test: VtsHalNeuralnetworksV1_0TargetTest Test: VtsHalNeuralnetworksV1_1TargetTest Test: VtsHalNeuralnetworksV1_2TargetTest Change-Id: I4f9ed6a33d5d3ec227e9f335df71954c73edf344 Merged-In: I4f9ed6a33d5d3ec227e9f335df71954c73edf344 (cherry picked from commit 5d7c99527e7bad07d6ab5413bcfd14cec5df5f31) --- neuralnetworks/1.2/types.hal | 12 ++++++++++++ neuralnetworks/1.2/vts/functional/ValidateModel.cpp | 12 ++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index 95e97c4b6b..0aa7cc204b 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -30,6 +30,18 @@ enum OperandType : @1.0::OperandType { * represents false; any other value represents true. */ BOOL = 6, + /** + * A tensor of 16 bit signed integers that represent real numbers. + * + * Attached to this tensor are two numbers that are used to convert the 16 + * bit integer to the real value and vice versa. These two numbers are: + * - scale: a 32 bit floating point value greater than zero. + * - zeroPoint: a 32 bit integer, in range [-32768, 32767]. + * + * The formula is: + * realValue = (integerValue - zeroPoint) * scale. + */ + TENSOR_QUANT16_ASYMM = 7, }; /** diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index 5a8b8c59c0..9af6258917 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -129,10 +129,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const int32_t invalidOperandTypes[] = { - static_cast(OperandType::FLOAT32) - 1, // lower bound fundamental - static_cast(OperandType::BOOL) + 1, // upper bound fundamental - static_cast(OperandType::OEM) - 1, // lower bound OEM - static_cast(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM + static_cast(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast(OperandType::TENSOR_QUANT16_ASYMM) + 1, // upper bound fundamental + static_cast(OperandType::OEM) - 1, // lower bound OEM + static_cast(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM }; static void mutateOperandTypeTest(const sp& device, const Model& model) { @@ -160,6 +160,7 @@ static uint32_t getInvalidRank(OperandType type) { case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_INT32: case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT16_ASYMM: return 0; default: return 0; @@ -190,6 +191,7 @@ static float getInvalidScale(OperandType type) { case OperandType::TENSOR_INT32: return -1.0f; case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT16_ASYMM: return 0.0f; default: return 0.0f; @@ -219,6 +221,7 @@ static std::vector getInvalidZeroPoints(OperandType type) { case OperandType::TENSOR_INT32: return {1}; case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT16_ASYMM: return {-1, 256}; default: return {}; @@ -271,6 +274,7 @@ static void mutateOperand(Operand* operand, OperandType type) { newOperand.zeroPoint = 0; break; case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT16_ASYMM: newOperand.dimensions = operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;