From d23380f0a374c94c566216ccedfafa4a4bd82ddf Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Thu, 9 Jul 2020 15:28:18 +0100 Subject: [PATCH 1/2] Fix stale NNAPI documentation The scale and zeroPoint fields are applicable to other types since 1.2. Also makes some whitespaces changes due to the generated documentation getting out of sync with the template in frameworks/ml/nn. Fix: 160406237 Test: generate_api.sh Test: m Change-Id: Icf594d40c73ff8c05044c320ac9eb6a9c5a89754 Merged-In: Icf594d40c73ff8c05044c320ac9eb6a9c5a89754 (cherry picked from commit 0d6cefe90b949c9e2e90471f7aa959a0714eeb45) --- current.txt | 2 ++ neuralnetworks/1.2/types.hal | 9 ++++++--- neuralnetworks/1.2/types.t | 9 ++++++--- neuralnetworks/1.3/types.hal | 23 +++++++++++++---------- neuralnetworks/1.3/types.t | 9 ++++++--- 5 files changed, 33 insertions(+), 19 deletions(-) diff --git a/current.txt b/current.txt index bdbad8a3e0..82eca812ae 100644 --- a/current.txt +++ b/current.txt @@ -652,6 +652,8 @@ b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardwar # ABI preserving changes to HALs during Android S cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice +9625e85f56515ad2cf87b6a1847906db669f746ea4ab02cd3d4ca25abc9b0109 android.hardware.neuralnetworks@1.2::types +745295adfd826de650eedaf8cc6979f52a1cf30b04ea7a089a132d0089475e95 android.hardware.neuralnetworks@1.3::types 38d65fb20c60a5b823298560fc0825457ecdc49603a4b4e94bf81511790737da android.hardware.radio@1.4::types 954c334efd80e8869b66d1ce5fe2755712d96ba4b3c38d415739c330af5fb4cb android.hardware.radio@1.5::types diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index 92cf2aa5e4..7441a54dba 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -4853,15 +4853,18 @@ struct Operand { /** * Quantized scale of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or - * TENSOR_INT32. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ float scale; /** * Quantized zero-point offset of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ int32_t zeroPoint; diff --git a/neuralnetworks/1.2/types.t b/neuralnetworks/1.2/types.t index d197f6b541..21d88acf8f 100644 --- a/neuralnetworks/1.2/types.t +++ b/neuralnetworks/1.2/types.t @@ -251,15 +251,18 @@ struct Operand { /** * Quantized scale of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or - * TENSOR_INT32. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ float scale; /** * Quantized zero-point offset of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ int32_t zeroPoint; diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal index 3b2b14c98c..7ec60648c5 100644 --- a/neuralnetworks/1.3/types.hal +++ b/neuralnetworks/1.3/types.hal @@ -5103,8 +5103,8 @@ enum OperationType : int32_t { * signature of this operation. That is, if the operation has (3 + n) inputs * and m outputs, both subgraphs must have n inputs and m outputs with the same * types, ranks, dimensions, scales, - * zeroPoints, and extraParams as the corresponding operation inputs and - * outputs. + * zeroPoints, and extraParams as the corresponding operation + * inputs and outputs. * All of the operands mentioned must have fully specified dimensions. * * Inputs: @@ -5170,15 +5170,15 @@ enum OperationType : int32_t { * * 0: A {@link OperandType::SUBGRAPH} reference to the condition * subgraph. The subgraph must have (m + k + n) inputs with * the same types, ranks, dimensions, - * scales, zeroPoints, and extraParams as the corresponding inputs of - * the WHILE operation and exactly one output of - * {@link OperandType::TENSOR_BOOL8} and shape [1]. + * scales, zeroPoints, and extraParams as the + * corresponding inputs of the WHILE operation and exactly one output + * of {@link OperandType::TENSOR_BOOL8} and shape [1]. * All of the operands mentioned must have fully specified dimensions. * * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph. * The subgraph must have (m + k + n) inputs and (m + k) outputs with * the same types, ranks, dimensions, - * scales, zeroPoints, and extraParams as the corresponding inputs and - * outputs of the WHILE operation. + * scales, zeroPoints, and extraParams as the + * corresponding inputs and outputs of the WHILE operation. * All of the operands mentioned must have fully specified dimensions. * * (m inputs): Initial values for input-output operands. * * (k inputs): Initial values for state-only operands. @@ -5538,15 +5538,18 @@ struct Operand { /** * Quantized scale of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or - * TENSOR_INT32. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ float scale; /** * Quantized zero-point offset of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ int32_t zeroPoint; diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t index 7220e372a7..9cffc7a32c 100644 --- a/neuralnetworks/1.3/types.t +++ b/neuralnetworks/1.3/types.t @@ -303,15 +303,18 @@ struct Operand { /** * Quantized scale of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or - * TENSOR_INT32. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ float scale; /** * Quantized zero-point offset of the operand. * - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM. + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandType}. */ int32_t zeroPoint; From a2fbeefaa8cb20813bf99b8570e21d000b9527e3 Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Mon, 13 Jul 2020 14:18:27 +0100 Subject: [PATCH 2/2] Fix typo in NNAPI documentation Fix: 160960007 Test: m Change-Id: I6c1af92f640fde833d2d93f7db4576c92a6f9719 Merged-In: I6c1af92f640fde833d2d93f7db4576c92a6f9719 (cherry picked from commit 3bbc2aedd0c02afe9eeb3c4460ee1b6d8ddc1796) --- current.txt | 2 +- neuralnetworks/1.3/types.hal | 8 ++++---- neuralnetworks/1.3/types.t | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/current.txt b/current.txt index 82eca812ae..d774dba0cc 100644 --- a/current.txt +++ b/current.txt @@ -653,7 +653,7 @@ b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardwar # ABI preserving changes to HALs during Android S cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice 9625e85f56515ad2cf87b6a1847906db669f746ea4ab02cd3d4ca25abc9b0109 android.hardware.neuralnetworks@1.2::types -745295adfd826de650eedaf8cc6979f52a1cf30b04ea7a089a132d0089475e95 android.hardware.neuralnetworks@1.3::types +9e758e208d14f7256e0885d6d8ad0b61121b21d8c313864f981727ae55bffd16 android.hardware.neuralnetworks@1.3::types 38d65fb20c60a5b823298560fc0825457ecdc49603a4b4e94bf81511790737da android.hardware.radio@1.4::types 954c334efd80e8869b66d1ce5fe2755712d96ba4b3c38d415739c330af5fb4cb android.hardware.radio@1.5::types diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal index 7ec60648c5..5f5ee0360c 100644 --- a/neuralnetworks/1.3/types.hal +++ b/neuralnetworks/1.3/types.hal @@ -5743,8 +5743,8 @@ struct Request { * Input data and information to be used in the execution of a prepared * model. * - * The index of the input corresponds to the index in Model.inputIndexes. - * E.g., input[i] corresponds to Model.inputIndexes[i]. + * The index of the input corresponds to the index in Model.main.inputIndexes. + * E.g., input[i] corresponds to Model.main.inputIndexes[i]. */ vec inputs; @@ -5752,8 +5752,8 @@ struct Request { * Output data and information to be used in the execution of a prepared * model. * - * The index of the output corresponds to the index in Model.outputIndexes. - * E.g., output[i] corresponds to Model.outputIndexes[i]. + * The index of the output corresponds to the index in Model.main.outputIndexes. + * E.g., output[i] corresponds to Model.main.outputIndexes[i]. */ vec outputs; diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t index 9cffc7a32c..2901d18525 100644 --- a/neuralnetworks/1.3/types.t +++ b/neuralnetworks/1.3/types.t @@ -508,8 +508,8 @@ struct Request { * Input data and information to be used in the execution of a prepared * model. * - * The index of the input corresponds to the index in Model.inputIndexes. - * E.g., input[i] corresponds to Model.inputIndexes[i]. + * The index of the input corresponds to the index in Model.main.inputIndexes. + * E.g., input[i] corresponds to Model.main.inputIndexes[i]. */ vec inputs; @@ -517,8 +517,8 @@ struct Request { * Output data and information to be used in the execution of a prepared * model. * - * The index of the output corresponds to the index in Model.outputIndexes. - * E.g., output[i] corresponds to Model.outputIndexes[i]. + * The index of the output corresponds to the index in Model.main.outputIndexes. + * E.g., output[i] corresponds to Model.main.outputIndexes[i]. */ vec outputs;