diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index a1a1bad577..f196792f8b 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -49,12 +49,16 @@ enum OperandType : @1.0::OperandType { * represents false; any other value represents true. */ TENSOR_BOOL8 = 9, + /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF + * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX. + */ + /* ADDING A NEW OEM TYPE REQUIRES UPDATING THE VALUE OF + * OperandTypeRange::OPERAND_OEM_MAX. + */ }; /** - * The range of values in the OperandType enum. - * - * THE MAX VALUES MUST BE UPDATED WHEN ADDING NEW TYPES to the OperandType enum. + * The range of operand values in the OperandType enum. */ enum OperandTypeRange : uint32_t { OPERAND_FUNDAMENTAL_MIN = 0, @@ -122,16 +126,20 @@ enum OperationType : @1.1::OperationType { ROTATED_BBOX_TRANSFORM = 87, ABS = 88, ROI_POOLING = 89, + /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF + * OperationTypeRange::OPERATION_FUNDAMENTAL_MAX. + */ + /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF + * OperationTypeRange::OPERATION_OEM_MAX. + */ }; /** * The range of values in the OperationType enum. - * - * THE MAX VALUES MUST BE UPDATED WHEN ADDING NEW TYPES to the OperationType enum. */ enum OperationTypeRange : uint32_t { OPERATION_FUNDAMENTAL_MIN = 0, - OPERATION_FUNDAMENTAL_MAX = 87, + OPERATION_FUNDAMENTAL_MAX = 89, OPERATION_OEM_MIN = 10000, OPERATION_OEM_MAX = 10000, }; diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index 802499287a..7a9edf42a1 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -300,8 +300,9 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con for (const Operation& operation : model.operations) { // Skip mutateOperationOperandTypeTest for the following operations. // - LSH_PROJECTION's second argument is allowed to have any type. - // - ARGMIN and ARGMAX's first argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM). - // - CAST's argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM). + // - ARGMIN and ARGMAX's first argument can be any of + // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). + // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). switch (operation.type) { case OperationType::LSH_PROJECTION: { if (operand == operation.inputs[1]) { @@ -311,8 +312,8 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con case OperationType::CAST: case OperationType::ARGMAX: case OperationType::ARGMIN: { - if (type == OperandType::TENSOR_FLOAT32 || type == OperandType::TENSOR_INT32 || - type == OperandType::TENSOR_QUANT8_ASYMM) { + if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 || + type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM) { return true; } } break;