diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index a1a1bad577..4ab0c24ed8 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -40,7 +40,7 @@ enum OperandType : @1.0::OperandType { * scale is a 32 bit floating point with value greater then zero. */ TENSOR_QUANT16_SYMM = 7, - /** A tensor of 16 bit floating point values. */ + /** A tensor of IEEE 754 16 bit floating point values. */ TENSOR_FLOAT16 = 8, /** * A tensor of 8 bit boolean values. @@ -49,16 +49,22 @@ enum OperandType : @1.0::OperandType { * represents false; any other value represents true. */ TENSOR_BOOL8 = 9, + /** An IEEE 754 16 bit floating point scalar value. */ + FLOAT16 = 10, + /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF + * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX. + */ + /* ADDING A NEW OEM TYPE REQUIRES UPDATING THE VALUE OF + * OperandTypeRange::OPERAND_OEM_MAX. + */ }; /** - * The range of values in the OperandType enum. - * - * THE MAX VALUES MUST BE UPDATED WHEN ADDING NEW TYPES to the OperandType enum. + * The range of operand values in the OperandType enum. */ enum OperandTypeRange : uint32_t { OPERAND_FUNDAMENTAL_MIN = 0, - OPERAND_FUNDAMENTAL_MAX = 9, + OPERAND_FUNDAMENTAL_MAX = 10, OPERAND_OEM_MIN = 10000, OPERAND_OEM_MAX = 10001, }; @@ -122,16 +128,22 @@ enum OperationType : @1.1::OperationType { ROTATED_BBOX_TRANSFORM = 87, ABS = 88, ROI_POOLING = 89, + EQUAL = 90, + NOT_EQUAL = 91, + /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF + * OperationTypeRange::OPERATION_FUNDAMENTAL_MAX. + */ + /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF + * OperationTypeRange::OPERATION_OEM_MAX. + */ }; /** * The range of values in the OperationType enum. - * - * THE MAX VALUES MUST BE UPDATED WHEN ADDING NEW TYPES to the OperationType enum. */ enum OperationTypeRange : uint32_t { OPERATION_FUNDAMENTAL_MIN = 0, - OPERATION_FUNDAMENTAL_MAX = 87, + OPERATION_FUNDAMENTAL_MAX = 91, OPERATION_OEM_MIN = 10000, OPERATION_OEM_MAX = 10000, }; diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index 802499287a..8f6d54f7f9 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -151,6 +151,7 @@ static void mutateOperandTypeTest(const sp& device, const Model& model) static uint32_t getInvalidRank(OperandType type) { switch (type) { + case OperandType::FLOAT16: case OperandType::FLOAT32: case OperandType::INT32: case OperandType::UINT32: @@ -182,6 +183,7 @@ static void mutateOperandRankTest(const sp& device, const Model& model) static float getInvalidScale(OperandType type) { switch (type) { + case OperandType::FLOAT16: case OperandType::FLOAT32: case OperandType::INT32: case OperandType::UINT32: @@ -214,6 +216,7 @@ static void mutateOperandScaleTest(const sp& device, const Model& model static std::vector getInvalidZeroPoints(OperandType type) { switch (type) { + case OperandType::FLOAT16: case OperandType::FLOAT32: case OperandType::INT32: case OperandType::UINT32: @@ -257,6 +260,7 @@ static void mutateOperand(Operand* operand, OperandType type) { Operand newOperand = *operand; newOperand.type = type; switch (type) { + case OperandType::FLOAT16: case OperandType::FLOAT32: case OperandType::INT32: case OperandType::UINT32: @@ -300,8 +304,9 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con for (const Operation& operation : model.operations) { // Skip mutateOperationOperandTypeTest for the following operations. // - LSH_PROJECTION's second argument is allowed to have any type. - // - ARGMIN and ARGMAX's first argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM). - // - CAST's argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM). + // - ARGMIN and ARGMAX's first argument can be any of + // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). + // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). switch (operation.type) { case OperationType::LSH_PROJECTION: { if (operand == operation.inputs[1]) { @@ -311,8 +316,8 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con case OperationType::CAST: case OperationType::ARGMAX: case OperationType::ARGMIN: { - if (type == OperandType::TENSOR_FLOAT32 || type == OperandType::TENSOR_INT32 || - type == OperandType::TENSOR_QUANT8_ASYMM) { + if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 || + type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM) { return true; } } break;