Add TENSOR_QUANT16_ASYMM to operand types

Add new OperandType::TENSOR_QUANT16_ASYMM.
Add VTS validation for the new type.

Bug: 113561892
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: I4f9ed6a33d5d3ec227e9f335df71954c73edf344
Merged-In: I4f9ed6a33d5d3ec227e9f335df71954c73edf344
(cherry picked from commit 5d7c99527e)
This commit is contained in:
Lev Proleev
2018-10-01 11:18:31 +01:00
committed by Przemyslaw Szczepaniak
parent 08662c6f66
commit 68c8c17467
2 changed files with 20 additions and 4 deletions

View File

@@ -30,6 +30,18 @@ enum OperandType : @1.0::OperandType {
* represents false; any other value represents true.
*/
BOOL = 6,
/**
* A tensor of 16 bit signed integers that represent real numbers.
*
* Attached to this tensor are two numbers that are used to convert the 16
* bit integer to the real value and vice versa. These two numbers are:
* - scale: a 32 bit floating point value greater than zero.
* - zeroPoint: a 32 bit integer, in range [-32768, 32767].
*
* The formula is:
* realValue = (integerValue - zeroPoint) * scale.
*/
TENSOR_QUANT16_ASYMM = 7,
};
/**

View File

@@ -129,10 +129,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const int32_t invalidOperandTypes[] = {
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
static_cast<int32_t>(OperandType::BOOL) + 1, // upper bound fundamental
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
static_cast<int32_t>(OperandType::TENSOR_QUANT16_ASYMM) + 1, // upper bound fundamental
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
};
static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
@@ -160,6 +160,7 @@ static uint32_t getInvalidRank(OperandType type) {
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
return 0;
default:
return 0;
@@ -190,6 +191,7 @@ static float getInvalidScale(OperandType type) {
case OperandType::TENSOR_INT32:
return -1.0f;
case OperandType::TENSOR_QUANT8_ASYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
return 0.0f;
default:
return 0.0f;
@@ -219,6 +221,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
case OperandType::TENSOR_INT32:
return {1};
case OperandType::TENSOR_QUANT8_ASYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
return {-1, 256};
default:
return {};
@@ -271,6 +274,7 @@ static void mutateOperand(Operand* operand, OperandType type) {
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;