Sync NNAPI Operand and Operation documentation fixes

This CL adds the typo fixes, bug fixes, and missing descriptions added
to the NNAPI but not to the NN HAL. This CL also adds description of
implicit padding, which was present but not documented in 1.0.

Bug: 77541934
Bug: 75459529
Test: mma
Change-Id: I7baa9e515057c43e759849284b3bb0d420a71f1b
This commit is contained in:
Michael Butler
2018-04-03 13:03:50 -07:00
parent eb684d5bed
commit 25f3ad328b
4 changed files with 705 additions and 408 deletions

View File

@@ -241,11 +241,11 @@ a432d6d9200248dc2126827bcd6cdea31dd65eff39b939f64585d27d915a5857 android.hardwar
86ba9c03978b79a742e990420bc5ced0673d25a939f82572996bef92621e2014 android.hardware.cas@1.0::IMediaCasService
503da837d1a67cbdb7c08a033e927e5430ae1b159d98bf72c6336b4dcc5e76f5 android.hardware.cas.native@1.0::types
619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler
246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
93eb3757ceaf21590fa4cd1d4a7dfe3b3794af5396100a6d25630879352abce9 android.hardware.neuralnetworks@1.0::IDevice
f66f9a38541bf92001d3adcce678cd7e3da2262124befb460b1c9aea9492813b android.hardware.neuralnetworks@1.0::IExecutionCallback
953607822954435874f4b81686440a604e2a88cdd2d9164c6293f3d5772510d7 android.hardware.neuralnetworks@1.0::IPreparedModel
73e03573494ba96f0e711ab7f1956c5b2d54c3da690cd7ecf4d6d0f287447730 android.hardware.neuralnetworks@1.0::IPreparedModelCallback
246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal
c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback
@@ -259,7 +259,8 @@ fb92e2b40f8e9d494e8fd3b4ac18499a3216342e7cff160714c3bbf3660b6e79 android.hardwar
251594ea9b27447bfa005ebd806e58fb0ae4aad84a69938129c9800ec0c64eda android.hardware.gnss@1.0::IGnssMeasurementCallback
4e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback
5804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice
6721fc5b64d997f3eda15b762a0dd9f3fa414926219dbca58312972d565b4bee android.hardware.neuralnetworks@1.0::types
12e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback
702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types
d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types
b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types
@@ -338,7 +339,7 @@ b8c7ed58aa8740361e63d0ce9e7c94227572a629f356958840b34809d2393a7c android.hardwar
4a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection
6aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types
3e4d8e0085ebe8549efb8ad4b8b400a141a3fa3f47ae23696b3e05a1612eb003 android.hardware.neuralnetworks@1.1::IDevice
e808a6f61cd7b47887c599d8843e67a2dcbf4ec5aadd5d22fdce93020070ef1b android.hardware.neuralnetworks@1.1::types
50db076b03a6760557fc60ef433ba9dd2ff983cf3305eeb504b0fff3eaa604ff android.hardware.neuralnetworks@1.1::types
8d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc
e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback
5e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types

View File

@@ -28,7 +28,7 @@ interface IExecutionCallback {
* ErrorStatus resulting from the execution. If the asynchronous task
* is not launched, notify must be invoked with the appropriate error.
*
* @return param Error status returned from launching the asynchronous task
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
* (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful

File diff suppressed because it is too large Load Diff

View File

@@ -27,25 +27,24 @@ import @1.0::PerformanceInfo;
*/
enum OperationType : @1.0::OperationType {
/**
* BatchToSpace for N-D tensors.
* BatchToSpace for N-dimensional tensors.
*
* This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape
* This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape
* block_shape + [batch], interleaves these blocks back into the grid defined by the
* spatial dimensions [1, ..., M], to obtain a result with the same rank as the input.
* The spatial dimensions of this intermediate result are then optionally cropped
* according to the amount to crop to produce the output.
*
* This is the reverse of SpaceToBatch.
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor rank: up to 4
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 0: An n-D tensor, specifying the tensor to be reshaped
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
* input tensor. All values must be >= 1.
* 2: A 1-D Tensor of type TENSOR_INT32, the amount to crop for each spatial diemension of the
* input tensor. All values must be >= 0.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -53,9 +52,9 @@ enum OperationType : @1.0::OperationType {
BATCH_TO_SPACE_ND = 29,
/**
* Divides the second tensor from the first tensor, element-wise.
* Element-wise division of two tensors.
*
* Takes two input tensors of identical OperandType and compatible dimensions. The output
* Takes two input tensors of identical type and compatible dimensions. The output
* is the result of dividing the first input tensor by the second, optionally
* modified by an activation function.
*
@@ -71,7 +70,9 @@ enum OperationType : @1.0::OperationType {
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
*
* Supported tensor rank: up to 4
*
* Inputs:
@@ -88,15 +89,17 @@ enum OperationType : @1.0::OperationType {
/**
* Computes the mean of elements across dimensions of a tensor.
*
* Reduces input tensor along the dimensions given in axis. Unless keep_dims is true,
* the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is
* true, the reduced dimensions are retained with length 1.
* Reduces the input tensor along the given dimensions to reduce. Unless keep_dims
* is true, the rank of the tensor is reduced by 1 for each entry in axis.
* If keep_dims is true, the reduced dimensions are retained with length 1.
*
* If axis has no entries, all dimensions are reduced, and a tensor with a single
* element is returned.
* If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with
* a single element is returned.
*
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor rank: up to 4
*
* Inputs:
@@ -115,14 +118,18 @@ enum OperationType : @1.0::OperationType {
*
* This operation pads a tensor according to the specified paddings.
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 1: A 2-D Tensor of type TENSOR_INT32. The paddings, before and after for each spatial dimension
* of the input tensor.
* 0: An n-D tensor, specifying the tensor to be padded.
* 1: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial dimension of the
* input tensor. The shape of the tensor must be {rank(input0), 2}.
* padding[i, 0] specifies the number of element to be padded in the front of dimension i.
* padding[i, 1] specifies the number of element to be padded after the end of dimension i.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -130,7 +137,7 @@ enum OperationType : @1.0::OperationType {
PAD = 32,
/**
* SpaceToBatch for N-D tensors.
* SpaceToBatch for N-Dimensional tensors.
*
* This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks
* of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that
@@ -139,16 +146,20 @@ enum OperationType : @1.0::OperationType {
* batch position. Prior to division into blocks, the spatial dimensions of the input are
* optionally zero padded according to paddings.
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor rank: up to 4
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
* input tensor. All values must be >= 1.
* 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the
* input tensor. All values must be >= 0.
* input tensor. All values must be >= 0. The shape of the tensor must be {rank(input0), 2}.
* padding[i, 0] specifies the number of element to be padded in the front of dimension i.
* padding[i, 1] specifies the number of element to be padded after the end of dimension i.
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -160,17 +171,20 @@ enum OperationType : @1.0::OperationType {
*
* Given a tensor input, this operation returns a tensor of the same type with all
* dimensions of size 1 removed. If you don't want to remove all size 1 dimensions,
* you can remove specific size 1 dimensions by specifying axis.
* you can remove specific size 1 dimensions by specifying the axes (input1).
*
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor rank: up to 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 1: An 1-D Tensor of type TENSOR_INT32. The dimensions to squeeze. If None (the default),
* squeezes all dimensions. If specified, only squeezes the dimensions listed. The dimension
* index starts at 0. It is an error to squeeze a dimension that is not 1.
* 0: An n-D tensor, the tensor to be squeezed.
* 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified
* only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
* The dimension index starts at 0. An error must be reported if squeezing a dimension that
* is not 1.
*
* Outputs:
* 0: A tensor of the same type as input0. Contains the same data as input, but has one or more
@@ -181,23 +195,25 @@ enum OperationType : @1.0::OperationType {
/**
* Extracts a strided slice of a tensor.
*
* This op extracts a slice of size (end-begin)/stride from the given input tensor.
* Starting at the location specified by begin the slice continues by adding
* Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given
* input tensor. Starting at the location specified by begin the slice continues by adding
* stride to the index until all dimensions are not less than end. Note that a stride can
* be negative, which causes a reverse slice.
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 0: An n-D tensor, specifying the tensor to be sliced.
* 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input
* tensor to be sliced.
* tensor to be sliced. The length must be of rank(input0).
* 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input
* tensor to be sliced.
* tensor to be sliced. The length must be of rank(input0).
* 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input
* tensor to be sliced.
* tensor to be sliced. The length must be of rank(input0).
*
* Outputs:
* 0: A tensor of the same type as input0.
@@ -205,7 +221,7 @@ enum OperationType : @1.0::OperationType {
STRIDED_SLICE = 35,
/**
* Subtracts the second tensor from the first tensor, element-wise.
* Element-wise subtraction of two tensors.
*
* Takes two input tensors of identical type and compatible dimensions. The output
* is the result of subtracting the second input tensor from the first one, optionally
@@ -223,7 +239,9 @@ enum OperationType : @1.0::OperationType {
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
*
* Supported tensor rank: up to 4
*
* Inputs:
@@ -240,18 +258,20 @@ enum OperationType : @1.0::OperationType {
/**
* Transposes the input tensor, permuting the dimensions according to the perm tensor.
*
* The returned tensor's dimension i must correspond to the input dimension perm[i].
* The returned tensor's dimension i corresponds to the input dimension perm[i].
* If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor.
* Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors.
*
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* Supported tensor types:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4
*
* Inputs:
* 0: An n-D tensor, specifying the input.
* 1: A 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the input
* tensor.
* 0: An n-D tensor, specifying the tensor to be transposed.
* 1: An optional 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the
* input tensor.
*
* Outputs:
* 0: A tensor of the same type as input0.