From 351ca59582d39547278f3b00eaf9727b0bb3444c Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Fri, 13 Dec 2019 12:21:44 +0000 Subject: [PATCH 1/2] Remove neuralnetworks@1.3::Model.Extension* in favor of 1.2 counterparts The types are the same as in 1.2. No changes are expected. Bug: 136735929 Test: m Change-Id: I7431d2e9263fafa0e63b8b1b40f6715e3832d17c Merged-In: I7431d2e9263fafa0e63b8b1b40f6715e3832d17c (cherry picked from commit 8a179f34995909213055af7817d305f7c5cc6577) --- current.txt | 2 +- neuralnetworks/1.3/types.hal | 42 ++++++------------------------------ neuralnetworks/1.3/types.t | 42 ++++++------------------------------ 3 files changed, 13 insertions(+), 73 deletions(-) diff --git a/current.txt b/current.txt index 523f408c64..4463075929 100644 --- a/current.txt +++ b/current.txt @@ -626,7 +626,7 @@ ac429fca0da4ce91218768ec31b64ded88251f8a26d8c4f27c06abdc5b1926d9 android.hardwar 234cc547d63d2f24a447aee0a9a76cab68b31c080adadc5a960598b827a69fa2 android.hardware.neuralnetworks@1.3::IDevice 058b48f0e2e725bb2b3fa2b7917b0f0a696383d03a4c57afe26f0eadb6a7af28 android.hardware.neuralnetworks@1.3::IPreparedModel 94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback -2576ba54711218ce0d7f207baa533fca9af3c630756938ede6e73fe197b7ea38 android.hardware.neuralnetworks@1.3::types +1435cf1724f9f89ff5f97d4aa6fe2a031b0ef43034cb5801b16229dc2ecfea82 android.hardware.neuralnetworks@1.3::types 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant 44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal index 6c8fe43312..bb924c26ba 100644 --- a/neuralnetworks/1.3/types.hal +++ b/neuralnetworks/1.3/types.hal @@ -20,6 +20,8 @@ import @1.0::DataLocation; import @1.0::OperandLifeTime; import @1.0::PerformanceInfo; import @1.0::RequestArgument; +import @1.2::Model.ExtensionNameAndPrefix; +import @1.2::Model.ExtensionTypeEncoding; import @1.2::OperandType; import @1.2::OperationType; import @1.2::SymmPerChannelQuantParams; @@ -5157,9 +5159,9 @@ struct Model { * {@link OperandTypeRange::BASE_MAX} or * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted * as an extension operand. The low - * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value - * correspond to the type ID within the extension and the high - * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode + * {@link @1.2::Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the + * value correspond to the type ID within the extension and the high + * {@link @1.2::Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode * the "prefix", which maps uniquely to the extension name. * * For example, if a model contains an operation whose value is @@ -5172,39 +5174,7 @@ struct Model { * prefix corresponding to each extension name and at most one extension * name corresponding to each prefix. */ - vec extensionNameToPrefix; - - /** - * A correspondence between an extension name and a prefix of operand and - * operation type values. - */ - struct ExtensionNameAndPrefix { - /** - * The extension name. - * - * See {@link Extension::name} for the format specification. - */ - string name; - - /** - * The unique extension identifier within the model. - * - * See {@link Model::extensionNameToPrefix}. - */ - uint16_t prefix; - }; - - /** - * Numeric values of extension operand and operation types have the - * following structure: - * - 16 high bits represent the "prefix", which corresponds uniquely to the - * extension name. - * - 16 low bits represent the type ID within the extension. - */ - enum ExtensionTypeEncoding : uint8_t { - HIGH_BITS_PREFIX = 16, - LOW_BITS_TYPE = 16, - }; + vec<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix; }; /** diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t index b1c72a9a31..b4c37697a1 100644 --- a/neuralnetworks/1.3/types.t +++ b/neuralnetworks/1.3/types.t @@ -22,6 +22,8 @@ import @1.0::DataLocation; import @1.0::OperandLifeTime; import @1.0::PerformanceInfo; import @1.0::RequestArgument; +import @1.2::Model.ExtensionNameAndPrefix; +import @1.2::Model.ExtensionTypeEncoding; import @1.2::OperandType; import @1.2::OperationType; import @1.2::SymmPerChannelQuantParams; @@ -341,9 +343,9 @@ struct Model { * {@link OperandTypeRange::BASE_MAX} or * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted * as an extension operand. The low - * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value - * correspond to the type ID within the extension and the high - * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode + * {@link @1.2::Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the + * value correspond to the type ID within the extension and the high + * {@link @1.2::Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode * the "prefix", which maps uniquely to the extension name. * * For example, if a model contains an operation whose value is @@ -356,39 +358,7 @@ struct Model { * prefix corresponding to each extension name and at most one extension * name corresponding to each prefix. */ - vec extensionNameToPrefix; - - /** - * A correspondence between an extension name and a prefix of operand and - * operation type values. - */ - struct ExtensionNameAndPrefix { - /** - * The extension name. - * - * See {@link Extension::name} for the format specification. - */ - string name; - - /** - * The unique extension identifier within the model. - * - * See {@link Model::extensionNameToPrefix}. - */ - uint16_t prefix; - }; - - /** - * Numeric values of extension operand and operation types have the - * following structure: - * - 16 high bits represent the "prefix", which corresponds uniquely to the - * extension name. - * - 16 low bits represent the type ID within the extension. - */ - enum ExtensionTypeEncoding : uint8_t { - HIGH_BITS_PREFIX = 16, - LOW_BITS_TYPE = 16, - }; + vec<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix; }; /** From f8124a861f2828db7415876c3d1aaee84279bd51 Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Fri, 13 Dec 2019 12:24:35 +0000 Subject: [PATCH 2/2] Add NNAPI control flow Bug: 136735929 Bug: 139181916 Test: m Change-Id: I7a75175f00fc98df626c40ea669021ccd40130e0 Merged-In: I7a75175f00fc98df626c40ea669021ccd40130e0 (cherry picked from commit a785a3faacaed173b7e8c697dfc48f791ae8c79c) --- current.txt | 4 +- neuralnetworks/1.3/IDevice.hal | 11 +- neuralnetworks/1.3/types.hal | 223 +++++++++++++++--- neuralnetworks/1.3/types.t | 126 ++++++++-- .../functional/CompilationCachingTests.cpp | 2 +- .../vts/functional/GeneratedTestHarness.cpp | 13 +- .../1.3/vts/functional/ValidateModel.cpp | 117 ++++----- 7 files changed, 373 insertions(+), 123 deletions(-) diff --git a/current.txt b/current.txt index 4463075929..eae0d8758d 100644 --- a/current.txt +++ b/current.txt @@ -623,10 +623,10 @@ bbeee9604128ede83ee755b67e73b5ad29e6e1dbac9ec41fea6ffe2745b0c50a android.hardwar adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation ac429fca0da4ce91218768ec31b64ded88251f8a26d8c4f27c06abdc5b1926d9 android.hardware.keymaster@4.1::types 4b5c8546533db9412fec6d32c0ef42b22e5e68dbf390c775ec3c22bb2d501102 android.hardware.neuralnetworks@1.3::IBuffer -234cc547d63d2f24a447aee0a9a76cab68b31c080adadc5a960598b827a69fa2 android.hardware.neuralnetworks@1.3::IDevice +5a6b75f13f0e010a4268defa4f627b862ab2899fb04f9d985194a25bd8f9fe0d android.hardware.neuralnetworks@1.3::IDevice 058b48f0e2e725bb2b3fa2b7917b0f0a696383d03a4c57afe26f0eadb6a7af28 android.hardware.neuralnetworks@1.3::IPreparedModel 94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback -1435cf1724f9f89ff5f97d4aa6fe2a031b0ef43034cb5801b16229dc2ecfea82 android.hardware.neuralnetworks@1.3::types +12c51f9d04a52324510419aeee3e37bb3607e6900556cdde79774d80ed989855 android.hardware.neuralnetworks@1.3::types 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant 44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal index 9afd77830d..8dc41f7c21 100644 --- a/neuralnetworks/1.3/IDevice.hal +++ b/neuralnetworks/1.3/IDevice.hal @@ -48,9 +48,14 @@ interface IDevice extends @1.2::IDevice { /** * Gets the supported operations in a model. * - * getSupportedOperations indicates which operations of a model are fully - * supported by the vendor driver. If an operation may not be supported for - * any reason, getSupportedOperations must return false for that operation. + * getSupportedOperations indicates which operations of the top-level + * subgraph are fully supported by the vendor driver. If an operation may + * not be supported for any reason, getSupportedOperations must return + * false for that operation. + * + * The {@link OperationType::IF} and {@link OperationType::WHILE} + * operations may only be fully supported if the vendor driver fully + * supports all operations in the referenced subgraphs. * * @param model A model whose operations--and their corresponding operands-- * are to be verified by the driver. diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal index bb924c26ba..a6d274a05a 100644 --- a/neuralnetworks/1.3/types.hal +++ b/neuralnetworks/1.3/types.hal @@ -17,7 +17,6 @@ package android.hardware.neuralnetworks@1.3; import @1.0::DataLocation; -import @1.0::OperandLifeTime; import @1.0::PerformanceInfo; import @1.0::RequestArgument; import @1.2::Model.ExtensionNameAndPrefix; @@ -42,6 +41,13 @@ enum OperandType : @1.2::OperandType { */ TENSOR_QUANT8_ASYMM_SIGNED = 14, + /** + * A reference to a subgraph. + * + * Must have the lifetime {@link OperandLifeTime::SUBGRAPH}. + */ + SUBGRAPH = 15, + /* * DEPRECATED. Since HAL version 1.2, extensions are the preferred * alternative to OEM operation and data types. @@ -70,7 +76,7 @@ enum OperandType : @1.2::OperandType { enum OperandTypeRange : uint32_t { BASE_MIN = 0, FUNDAMENTAL_MIN = 0, - FUNDAMENTAL_MAX = 14, + FUNDAMENTAL_MAX = 15, OEM_MIN = 10000, OEM_MAX = 10001, BASE_MAX = 0xFFFF, @@ -4878,6 +4884,92 @@ enum OperationType : int32_t { */ QUANTIZED_LSTM = 95, + /** + * Executes one of the two referenced subgraphs as determined by a boolean + * value. + * + * The inputs and outputs of the two referenced subgraphs must agree with the + * signature of this operation. That is, if the operation has (3 + n) inputs + * and m outputs, both subgraphs must have n inputs and m outputs with the same + * types as the corresponding operation inputs and outputs. + * + * Inputs: + * * 0: A value of type {@link OperandType::TENSOR_BOOL8} and shape [1] + * that determines which of the two referenced subgraphs to execute. + * * 1: A {@link OperandType::SUBGRAPH} reference to the subgraph to be + * executed if the condition is true. + * * 2: A {@link OperandType::SUBGRAPH} reference to the subgraph to be + * executed if the condition is false. + * * 3 ~ (n + 2): Inputs to be passed to the subgraph selected for execution. + * + * Outputs: + * * 0 ~ (m - 1): Outputs produced by the selected subgraph. + */ + IF = 96, + + /** + * Executes the body subgraph until the condition subgraph outputs false. + * + * The inputs to this operation are the condition subgraph, the body subgraph, + * and operand values for the first iteration of the loop. The values are + * implicitly split into three groups of input-output, state-only, and + * input-only values, as described below. + * + * The outputs of this operation are the final values of input-output + * operands. + * + * Both the condition and body subgraph receive (m + k + n) inputs. + * * The first m (m >= 1) inputs are input-output operands. For the first + * iteration, these are initialized from the corresponding inputs of the + * WHILE operation. In subsequent iterations, their values come from the + * corresponding outputs of the body subgraph produced during the previous + * iteration. + * * The next k (k >= 0) inputs are state-only operands. They are similar to + * the input-output operands, except that their values are no longer + * available after the loop terminates. + * * The last n (n >= 0) inputs are input-only operands. Their values come + * from the corresponding inputs of the WHILE operation. + * + * The body subgraph produces (m + k) outputs. + * * The first m outputs are input-output operands. They become the outputs + * of the WHILE operation when a termination condition is reached. + * * The last k outputs are state-only operands. Their values are no longer + * available after the loop terminates. + * + * The numbers m, k, and n are inferred by the driver as follows: + * m = (WHILE operation output count) + * k = (body subgraph output count) - m + * n = (body subgraph input count) - m - k + * + * The pseudo-code below illustrates the flow of a WHILE operation with + * inputs condition, body, initial_input_output, initial_state, input_only + * (m = 1, k = 1, n = 1): + * + * input_output = initial_input_output + * state = initial_state + * while condition(input_output, state, input_only): + * input_output, state = body(input_output, state, input_only) + * return input_output + * + * Inputs: + * * 0: A {@link OperandType::SUBGRAPH} reference to the condition + * subgraph. The subgraph must have (m + k + n) inputs with + * the same types as the corresponding inputs of the WHILE operation + * and exactly one output of {@link OperandType::TENSOR_BOOL8} + * and shape [1]. + * * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph. + * The subgraph must have (m + k + n) inputs and (m + k) outputs with + * the same types as the corresponding inputs and outputs of the WHILE + * operation. + * * (m inputs): Initial values for input-output operands. + * * (k inputs): Initial values for state-only operands. + * * (n inputs): Values for input-only operands. + * + * Outputs: + * * 0 ~ (m - 1): Outputs produced by the loop. + */ + WHILE = 97, + /** * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to * OEM operation and data types. @@ -4900,13 +4992,12 @@ enum OperationType : int32_t { enum OperationTypeRange : uint32_t { BASE_MIN = 0, FUNDAMENTAL_MIN = 0, - FUNDAMENTAL_MAX = 95, + FUNDAMENTAL_MAX = 97, OEM_MIN = 10000, OEM_MAX = 10000, BASE_MAX = 0xFFFF, }; - /** * The capabilities of a driver. * @@ -4967,6 +5058,59 @@ struct Operation { vec outputs; }; +/** + * How an operand is used. + */ +enum OperandLifeTime : int32_t { + /** + * The operand is internal to the model. It's created by an operation and + * consumed by other operations. It must be an output operand of + * exactly one operation. + */ + TEMPORARY_VARIABLE, + + /** + * The operand is an input of a subgraph. It must not be an output + * operand of any operation. + * + * An operand can't be both input and output of a subgraph. + */ + SUBGRAPH_INPUT, + + /** + * The operand is an output of a subgraph. It must be an output + * operand of exactly one operation. + * + * An operand can't be both input and output of a subgraph. + */ + SUBGRAPH_OUTPUT, + + /** + * The operand is a constant found in Model.operandValues. It must + * not be an output operand of any operation. + */ + CONSTANT_COPY, + + /** + * The operand is a constant that was specified via a Memory + * object. It must not be an output operand of any operation. + */ + CONSTANT_REFERENCE, + + /** + * The operand does not have a value. This is valid only for optional + * arguments of operations. + */ + NO_VALUE, + + /** + * The operand is a reference to a subgraph. It must be an input to one + * or more {@link OperationType::IF} or {@link OperationType::WHILE} + * operations. + */ + SUBGRAPH, +}; + /** * Describes one operand of the model's graph. */ @@ -5003,7 +5147,7 @@ struct Operand { * . The operand has lifetime CONSTANT_COPY or * CONSTANT_REFERENCE. * - * . The operand has lifetime MODEL_INPUT. Fully + * . The operand has lifetime SUBGRAPH_INPUT. Fully * specified dimensions must either be present in the * Operand or they must be provided in the corresponding * RequestArgument. @@ -5051,8 +5195,8 @@ struct Operand { /** * Where to find the data for this operand. - * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or - * NO_VALUE: + * If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT, + * or NO_VALUE: * - All the fields must be 0. * If the lifetime is CONSTANT_COPY: * - location.poolIndex is 0. @@ -5062,6 +5206,11 @@ struct Operand { * - location.poolIndex is set. * - location.offset is the offset in bytes into the specified pool. * - location.length is set. + * If the lifetime is SUBGRAPH: + * - location.poolIndex is 0. + * - location.offset is the index of the referenced subgraph in + * {@link Model::referenced}. + * - location.length is 0. */ DataLocation location; @@ -5100,32 +5249,19 @@ struct Operand { */ struct Model { /** - * All operands included in the model. + * The top-level subgraph. */ - vec operands; + Subgraph main; /** - * All operations included in the model. + * Referenced subgraphs. * - * The operations are sorted into execution order. Every operand - * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be - * written before it is read. - */ - vec operations; - - /** - * Input indexes of the model. There must be at least one. + * Each subgraph is referenced by the main subgraph or at least one other + * referenced subgraph. * - * Each value corresponds to the index of the operand in "operands". + * There must be no reference cycles. */ - vec inputIndexes; - - /** - * Output indexes of the model. There must be at least one. - * - * Each value corresponds to the index of the operand in "operands". - */ - vec outputIndexes; + vec referenced; /** * A byte buffer containing operand data that were copied into the model. @@ -5177,6 +5313,39 @@ struct Model { vec<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix; }; +/** + * An excerpt of the execution graph. + */ +struct Subgraph { + /** + * All operands included in the subgraph. + */ + vec operands; + + /** + * All operations included in the subgraph. + * + * The operations are sorted into execution order. Every operand + * with lifetime SUBGRAPH_OUTPUT or TEMPORARY_VARIABLE must be + * written before it is read. + */ + vec operations; + + /** + * Input indexes of the subgraph. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec inputIndexes; + + /** + * Output indexes of the subgraph. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec outputIndexes; +}; + /** * A buffer descriptor. Describes the properties of a buffer. */ diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t index b4c37697a1..f3319e5cbe 100644 --- a/neuralnetworks/1.3/types.t +++ b/neuralnetworks/1.3/types.t @@ -19,7 +19,6 @@ package android.hardware.neuralnetworks@1.3; import @1.0::DataLocation; -import @1.0::OperandLifeTime; import @1.0::PerformanceInfo; import @1.0::RequestArgument; import @1.2::Model.ExtensionNameAndPrefix; @@ -90,7 +89,6 @@ enum OperationTypeRange : uint32_t { BASE_MAX = 0xFFFF, }; - /** * The capabilities of a driver. * @@ -151,6 +149,59 @@ struct Operation { vec outputs; }; +/** + * How an operand is used. + */ +enum OperandLifeTime : int32_t { + /** + * The operand is internal to the model. It's created by an operation and + * consumed by other operations. It must be an output operand of + * exactly one operation. + */ + TEMPORARY_VARIABLE, + + /** + * The operand is an input of a subgraph. It must not be an output + * operand of any operation. + * + * An operand can't be both input and output of a subgraph. + */ + SUBGRAPH_INPUT, + + /** + * The operand is an output of a subgraph. It must be an output + * operand of exactly one operation. + * + * An operand can't be both input and output of a subgraph. + */ + SUBGRAPH_OUTPUT, + + /** + * The operand is a constant found in Model.operandValues. It must + * not be an output operand of any operation. + */ + CONSTANT_COPY, + + /** + * The operand is a constant that was specified via a Memory + * object. It must not be an output operand of any operation. + */ + CONSTANT_REFERENCE, + + /** + * The operand does not have a value. This is valid only for optional + * arguments of operations. + */ + NO_VALUE, + + /** + * The operand is a reference to a subgraph. It must be an input to one + * or more {@link OperationType::IF} or {@link OperationType::WHILE} + * operations. + */ + SUBGRAPH, +}; + /** * Describes one operand of the model's graph. */ @@ -187,7 +238,7 @@ struct Operand { * . The operand has lifetime CONSTANT_COPY or * CONSTANT_REFERENCE. * - * . The operand has lifetime MODEL_INPUT. Fully + * . The operand has lifetime SUBGRAPH_INPUT. Fully * specified dimensions must either be present in the * Operand or they must be provided in the corresponding * RequestArgument. @@ -235,8 +286,8 @@ struct Operand { /** * Where to find the data for this operand. - * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or - * NO_VALUE: + * If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT, + * or NO_VALUE: * - All the fields must be 0. * If the lifetime is CONSTANT_COPY: * - location.poolIndex is 0. @@ -246,6 +297,11 @@ struct Operand { * - location.poolIndex is set. * - location.offset is the offset in bytes into the specified pool. * - location.length is set. + * If the lifetime is SUBGRAPH: + * - location.poolIndex is 0. + * - location.offset is the index of the referenced subgraph in + * {@link Model::referenced}. + * - location.length is 0. */ DataLocation location; @@ -284,32 +340,19 @@ struct Operand { */ struct Model { /** - * All operands included in the model. + * The top-level subgraph. */ - vec operands; + Subgraph main; /** - * All operations included in the model. + * Referenced subgraphs. * - * The operations are sorted into execution order. Every operand - * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be - * written before it is read. - */ - vec operations; - - /** - * Input indexes of the model. There must be at least one. + * Each subgraph is referenced by the main subgraph or at least one other + * referenced subgraph. * - * Each value corresponds to the index of the operand in "operands". + * There must be no reference cycles. */ - vec inputIndexes; - - /** - * Output indexes of the model. There must be at least one. - * - * Each value corresponds to the index of the operand in "operands". - */ - vec outputIndexes; + vec referenced; /** * A byte buffer containing operand data that were copied into the model. @@ -361,6 +404,39 @@ struct Model { vec<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix; }; +/** + * An excerpt of the execution graph. + */ +struct Subgraph { + /** + * All operands included in the subgraph. + */ + vec operands; + + /** + * All operations included in the subgraph. + * + * The operations are sorted into execution order. Every operand + * with lifetime SUBGRAPH_OUTPUT or TEMPORARY_VARIABLE must be + * written before it is read. + */ + vec operations; + + /** + * Input indexes of the subgraph. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec inputIndexes; + + /** + * Output indexes of the subgraph. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec outputIndexes; +}; + /** * A buffer descriptor. Describes the properties of a buffer. */ diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp index fe8d907d36..5cb466fe28 100644 --- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp @@ -308,7 +308,7 @@ class CompilationCachingTestBase : public testing::Test { model, [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec& supported) { ASSERT_EQ(ErrorStatus::NONE, status); - ASSERT_EQ(supported.size(), model.operations.size()); + ASSERT_EQ(supported.size(), model.main.operations.size()); fullySupportsModel = std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); }); diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index 4f747f4afa..805d5b53aa 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -59,7 +59,6 @@ using hidl::memory::V1_0::IMemory; using implementation::PreparedModelCallback; using V1_0::DataLocation; using V1_0::ErrorStatus; -using V1_0::OperandLifeTime; using V1_0::RequestArgument; using V1_1::ExecutionPreference; using V1_2::Constant; @@ -269,10 +268,10 @@ Model createModel(const TestModel& testModel) { } } - return {.operands = std::move(operands), - .operations = std::move(operations), - .inputIndexes = testModel.inputIndexes, - .outputIndexes = testModel.outputIndexes, + return {.main = {.operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = testModel.inputIndexes, + .outputIndexes = testModel.outputIndexes}, .operandValues = std::move(operandValues), .pools = std::move(pools), .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; @@ -290,8 +289,8 @@ static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) { } static void makeOutputDimensionsUnspecified(Model* model) { - for (auto i : model->outputIndexes) { - auto& dims = model->operands[i].dimensions; + for (auto i : model->main.outputIndexes) { + auto& dims = model->main.operands[i].dimensions; std::fill(dims.begin(), dims.end(), 0); } } diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 8395111d78..cc862645a5 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -25,7 +25,6 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { using implementation::PreparedModelCallback; using V1_0::ErrorStatus; -using V1_0::OperandLifeTime; using V1_1::ExecutionPreference; using V1_2::SymmPerChannelQuantParams; using HidlToken = @@ -83,22 +82,22 @@ static void validate(const sp& device, const std::string& message, Mode } static uint32_t addOperand(Model* model) { - return hidl_vec_push_back(&model->operands, + return hidl_vec_push_back(&model->main.operands, { .type = OperandType::INT32, .dimensions = {}, .numberOfConsumers = 0, .scale = 0.0f, .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_INPUT, + .lifetime = OperandLifeTime::SUBGRAPH_INPUT, .location = {.poolIndex = 0, .offset = 0, .length = 0}, }); } static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { uint32_t index = addOperand(model); - model->operands[index].numberOfConsumers = 1; - model->operands[index].lifetime = lifetime; + model->main.operands[index].numberOfConsumers = 1; + model->main.operands[index].lifetime = lifetime; return index; } @@ -112,13 +111,13 @@ static const uint32_t invalidOperandTypes[] = { }; static void mutateOperandTypeTest(const sp& device, const Model& model) { - for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { for (uint32_t invalidOperandType : invalidOperandTypes) { const std::string message = "mutateOperandTypeTest: operand " + std::to_string(operand) + " set to value " + std::to_string(invalidOperandType); validate(device, message, model, [operand, invalidOperandType](Model* model) { - model->operands[operand].type = static_cast(invalidOperandType); + model->main.operands[operand].type = static_cast(invalidOperandType); }); } } @@ -150,15 +149,15 @@ static uint32_t getInvalidRank(OperandType type) { } static void mutateOperandRankTest(const sp& device, const Model& model) { - for (size_t operand = 0; operand < model.operands.size(); ++operand) { - const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.main.operands[operand].type); if (invalidRank == 0) { continue; } const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + " has rank of " + std::to_string(invalidRank); validate(device, message, model, [operand, invalidRank](Model* model) { - model->operands[operand].dimensions = std::vector(invalidRank, 0); + model->main.operands[operand].dimensions = std::vector(invalidRank, 0); }); } } @@ -190,12 +189,12 @@ static float getInvalidScale(OperandType type) { } static void mutateOperandScaleTest(const sp& device, const Model& model) { - for (size_t operand = 0; operand < model.operands.size(); ++operand) { - const float invalidScale = getInvalidScale(model.operands[operand].type); + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.main.operands[operand].type); const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + " has scale of " + std::to_string(invalidScale); validate(device, message, model, [operand, invalidScale](Model* model) { - model->operands[operand].scale = invalidScale; + model->main.operands[operand].scale = invalidScale; }); } } @@ -229,15 +228,15 @@ static std::vector getInvalidZeroPoints(OperandType type) { } static void mutateOperandZeroPointTest(const sp& device, const Model& model) { - for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { const std::vector invalidZeroPoints = - getInvalidZeroPoints(model.operands[operand].type); + getInvalidZeroPoints(model.main.operands[operand].type); for (int32_t invalidZeroPoint : invalidZeroPoints) { const std::string message = "mutateOperandZeroPointTest: operand " + std::to_string(operand) + " has zero point of " + std::to_string(invalidZeroPoint); validate(device, message, model, [operand, invalidZeroPoint](Model* model) { - model->operands[operand].zeroPoint = invalidZeroPoint; + model->main.operands[operand].zeroPoint = invalidZeroPoint; }); } } @@ -310,11 +309,11 @@ static void mutateOperand(Operand* operand, OperandType type) { static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) { // Do not test OEM types - if (type == model.operands[operand].type || type == OperandType::OEM || + if (type == model.main.operands[operand].type || type == OperandType::OEM || type == OperandType::TENSOR_OEM_BYTE) { return true; } - for (const Operation& operation : model.operations) { + for (const Operation& operation : model.main.operations) { // Skip mutateOperationOperandTypeTest for the following operations. // - LSH_PROJECTION's second argument is allowed to have any type. // - ARGMIN and ARGMAX's first argument can be any of @@ -401,7 +400,7 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con } static void mutateOperationOperandTypeTest(const sp& device, const Model& model) { - for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { for (OperandType invalidOperandType : hidl_enum_range{}) { if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) { continue; @@ -410,7 +409,7 @@ static void mutateOperationOperandTypeTest(const sp& device, const Mode std::to_string(operand) + " set to type " + toString(invalidOperandType); validate(device, message, model, [operand, invalidOperandType](Model* model) { - mutateOperand(&model->operands[operand], invalidOperandType); + mutateOperand(&model->main.operands[operand], invalidOperandType); }); } } @@ -425,13 +424,13 @@ static const uint32_t invalidOperationTypes[] = { }; static void mutateOperationTypeTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { for (uint32_t invalidOperationType : invalidOperationTypes) { const std::string message = "mutateOperationTypeTest: operation " + std::to_string(operation) + " set to value " + std::to_string(invalidOperationType); validate(device, message, model, [operation, invalidOperationType](Model* model) { - model->operations[operation].type = + model->main.operations[operation].type = static_cast(invalidOperationType); }); } @@ -441,14 +440,14 @@ static void mutateOperationTypeTest(const sp& device, const Model& mode ///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// static void mutateOperationInputOperandIndexTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { - const uint32_t invalidOperand = model.operands.size(); - for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + const uint32_t invalidOperand = model.main.operands.size(); + for (size_t input = 0; input < model.main.operations[operation].inputs.size(); ++input) { const std::string message = "mutateOperationInputOperandIndexTest: operation " + std::to_string(operation) + " input " + std::to_string(input); validate(device, message, model, [operation, input, invalidOperand](Model* model) { - model->operations[operation].inputs[input] = invalidOperand; + model->main.operations[operation].inputs[input] = invalidOperand; }); } } @@ -457,14 +456,15 @@ static void mutateOperationInputOperandIndexTest(const sp& device, cons ///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// static void mutateOperationOutputOperandIndexTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { - const uint32_t invalidOperand = model.operands.size(); - for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + const uint32_t invalidOperand = model.main.operands.size(); + for (size_t output = 0; output < model.main.operations[operation].outputs.size(); + ++output) { const std::string message = "mutateOperationOutputOperandIndexTest: operation " + std::to_string(operation) + " output " + std::to_string(output); validate(device, message, model, [operation, output, invalidOperand](Model* model) { - model->operations[operation].outputs[output] = invalidOperand; + model->main.operations[operation].outputs[output] = invalidOperand; }); } } @@ -485,17 +485,17 @@ static void removeValueAndDecrementGreaterValues(hidl_vec* vec, uint32 } static void removeOperand(Model* model, uint32_t index) { - hidl_vec_removeAt(&model->operands, index); - for (Operation& operation : model->operations) { + hidl_vec_removeAt(&model->main.operands, index); + for (Operation& operation : model->main.operations) { removeValueAndDecrementGreaterValues(&operation.inputs, index); removeValueAndDecrementGreaterValues(&operation.outputs, index); } - removeValueAndDecrementGreaterValues(&model->inputIndexes, index); - removeValueAndDecrementGreaterValues(&model->outputIndexes, index); + removeValueAndDecrementGreaterValues(&model->main.inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->main.outputIndexes, index); } static bool removeOperandSkip(size_t operand, const Model& model) { - for (const Operation& operation : model.operations) { + for (const Operation& operation : model.main.operations) { // Skip removeOperandTest for the following operations. // - SPLIT's outputs are not checked during prepareModel. if (operation.type == OperationType::SPLIT) { @@ -520,7 +520,7 @@ static bool removeOperandSkip(size_t operand, const Model& model) { } static void removeOperandTest(const sp& device, const Model& model) { - for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { if (removeOperandSkip(operand, model)) { continue; } @@ -533,14 +533,14 @@ static void removeOperandTest(const sp& device, const Model& model) { ///////////////////////// REMOVE OPERATION ///////////////////////// static void removeOperation(Model* model, uint32_t index) { - for (uint32_t operand : model->operations[index].inputs) { - model->operands[operand].numberOfConsumers--; + for (uint32_t operand : model->main.operations[index].inputs) { + model->main.operands[operand].numberOfConsumers--; } - hidl_vec_removeAt(&model->operations, index); + hidl_vec_removeAt(&model->main.operations, index); } static void removeOperationTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { const std::string message = "removeOperationTest: operation " + std::to_string(operation); validate(device, message, model, [operation](Model* model) { removeOperation(model, operation); }); @@ -615,9 +615,9 @@ static bool removeOperationInputSkip(const Operation& op, size_t input) { } static void removeOperationInputTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { - for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { - const Operation& op = model.operations[operation]; + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + for (size_t input = 0; input < model.main.operations[operation].inputs.size(); ++input) { + const Operation& op = model.main.operations[operation]; if (removeOperationInputSkip(op, input)) { continue; } @@ -625,9 +625,9 @@ static void removeOperationInputTest(const sp& device, const Model& mod std::to_string(operation) + ", input " + std::to_string(input); validate(device, message, model, [operation, input](Model* model) { - uint32_t operand = model->operations[operation].inputs[input]; - model->operands[operand].numberOfConsumers--; - hidl_vec_removeAt(&model->operations[operation].inputs, input); + uint32_t operand = model->main.operations[operation].inputs[input]; + model->main.operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->main.operations[operation].inputs, input); }); } } @@ -636,13 +636,14 @@ static void removeOperationInputTest(const sp& device, const Model& mod ///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// static void removeOperationOutputTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { - for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + for (size_t output = 0; output < model.main.operations[operation].outputs.size(); + ++output) { const std::string message = "removeOperationOutputTest: operation " + std::to_string(operation) + ", output " + std::to_string(output); validate(device, message, model, [operation, output](Model* model) { - hidl_vec_removeAt(&model->operations[operation].outputs, output); + hidl_vec_removeAt(&model->main.operations[operation].outputs, output); }); } } @@ -669,15 +670,15 @@ static bool addOperationInputSkip(const Operation& op) { } static void addOperationInputTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { - if (addOperationInputSkip(model.operations[operation])) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + if (addOperationInputSkip(model.main.operations[operation])) { continue; } const std::string message = "addOperationInputTest: operation " + std::to_string(operation); validate(device, message, model, [operation](Model* model) { - uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); - hidl_vec_push_back(&model->operations[operation].inputs, index); - hidl_vec_push_back(&model->inputIndexes, index); + uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_INPUT); + hidl_vec_push_back(&model->main.operations[operation].inputs, index); + hidl_vec_push_back(&model->main.inputIndexes, index); }); } } @@ -685,13 +686,13 @@ static void addOperationInputTest(const sp& device, const Model& model) ///////////////////////// ADD OPERATION OUTPUT ///////////////////////// static void addOperationOutputTest(const sp& device, const Model& model) { - for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { const std::string message = "addOperationOutputTest: operation " + std::to_string(operation); validate(device, message, model, [operation](Model* model) { - uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); - hidl_vec_push_back(&model->operations[operation].outputs, index); - hidl_vec_push_back(&model->outputIndexes, index); + uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_OUTPUT); + hidl_vec_push_back(&model->main.operations[operation].outputs, index); + hidl_vec_push_back(&model->main.outputIndexes, index); }); } }