Merge "Add FILL and RANK ops"

This commit is contained in:
Lev Proleev
2020-01-22 21:42:58 +00:00
committed by Android (Google) Code Review
4 changed files with 70 additions and 4 deletions

View File

@@ -655,7 +655,7 @@ d1f382d14e1384b907d5bb5780df7f01934650d556fedbed2f15a90773c657d6 android.hardwar
4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback
7d23020248194abbee8091cc624f39a5a6d7ccba338b172d5d2d3df0cceffbee android.hardware.neuralnetworks@1.3::IPreparedModel
0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback
26c643aedf4e28b8d82e517d9cd70601b37f881e1ea94f09808d9e233517e400 android.hardware.neuralnetworks@1.3::types
5f1a4e0c29fc686ed476f9f04eed35e4405d21288cb2746b978d6891de5cc37d android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
c67aaf26a7a40d14ea61e70e20afacbd0bb906df1704d585ac8599fbb69dd44b android.hardware.wifi.hostapd@1.2::IHostapd
11f6448d15336361180391c8ebcdfd2d7cf77b3782d577e594d583aadc9c2877 android.hardware.wifi.hostapd@1.2::types

View File

@@ -5036,6 +5036,56 @@ enum OperationType : int32_t {
*/
HARD_SWISH = 99,
/**
* Creates a tensor filled with a scalar value.
*
* Supported output tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
*
* Inputs:
* * 0: A 1-D tensor, specifying the desired output tensor shape.
* * 1: A scalar, specifying the value to fill the output tensors with.
* For output tensor of {@link OperandType::TENSOR_FLOAT16},
* the scalar must be of {@link OperandType::FLOAT16}.
* For output tensor of {@link OperandType::TENSOR_FLOAT32},
* the scalar must be of {@link OperandType::FLOAT32}.
* For output tensor of {@link OperandType::TENSOR_INT32},
* the scalar must be of {@link OperandType::INT32}.
*
* Outputs:
* * 0: The output tensor.
*/
FILL = 100,
/**
* Returns the rank of a tensor.
*
* The rank of a tensor is the number of dimensions in it. Also known as
* "order", "degree", "ndims".
*
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
* * {@link OperandType::TENSOR_QUANT16_SYMM}
* * {@link OperandType::TENSOR_BOOL8}
* * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
* * {@link OperandType::TENSOR_QUANT16_ASYMM}
* * {@link OperandType::TENSOR_QUANT8_SYMM}
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
*
* Inputs:
* * 0: The input tensor.
*
* Outputs:
* * 0: A scalar of {@link OperandType::INT32}, specifying the rank
* of the input tensor.
*/
RANK = 101,
/**
* DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
* OEM operation and data types.
@@ -5058,7 +5108,7 @@ enum OperationType : int32_t {
enum OperationTypeRange : uint32_t {
BASE_MIN = 0,
FUNDAMENTAL_MIN = 0,
FUNDAMENTAL_MAX = 99,
FUNDAMENTAL_MAX = 101,
OEM_MIN = 10000,
OEM_MAX = 10000,
BASE_MAX = 0xFFFF,

View File

@@ -793,8 +793,9 @@ TEST_P(QuantizationCouplingTest, Test) {
INSTANTIATE_GENERATED_TEST(GeneratedTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
return !testModel.expectFailure && !testModel.hasScalarOutputs();
});
INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });

View File

@@ -337,6 +337,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con
// - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
// - AXIS_ALIGNED_BBOX_TRANSFORM bounding boxes (arg 1) can be of
// TENSOR_QUANT8_ASYMM or TENSOR_QUANT8_ASYMM_SIGNED.
// - RANK's input can have any TENSOR_* type.
switch (operation.type) {
case OperationType::LSH_PROJECTION: {
if (operand == operation.inputs[1]) {
@@ -399,6 +400,20 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con
return true;
}
} break;
case OperationType::RANK: {
if (operand == operation.inputs[0] &&
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 ||
type == OperandType::TENSOR_INT32 ||
type == OperandType::TENSOR_QUANT8_ASYMM ||
type == OperandType::TENSOR_QUANT16_SYMM ||
type == OperandType::TENSOR_BOOL8 ||
type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
type == OperandType::TENSOR_QUANT16_ASYMM ||
type == OperandType::TENSOR_QUANT8_SYMM ||
type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) {
return true;
}
} break;
default:
break;
}