From dc5d7c26e2816bb4387ec9e08a25ff87b4347152 Mon Sep 17 00:00:00 2001 From: Kevin Rocard Date: Mon, 8 Apr 2019 11:15:07 -0700 Subject: [PATCH 01/19] Audio HAL: Add missing device to XSD The XSD has to be kept manually synced to the HAL definition. When some formats were introduced and the corresponding enum values were added in the HAL .hal, the XSD was not updated. Test: xmllint --noout --schema hardware/interfaces/audio/4.0/config/audio_policy_configuration.xsd --xinclude out/target/product/*/vendor/etc/audio_policy_configuration.xml Bug: 128967080 Change-Id: I832398f325d4de18e0deb644f11060fe27d9bdb5 Signed-off-by: Kevin Rocard --- audio/4.0/config/audio_policy_configuration.xsd | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/audio/4.0/config/audio_policy_configuration.xsd b/audio/4.0/config/audio_policy_configuration.xsd index 58bab227f7..f26e41b423 100644 --- a/audio/4.0/config/audio_policy_configuration.xsd +++ b/audio/4.0/config/audio_policy_configuration.xsd @@ -357,6 +357,10 @@ + + + + From 82299438b59a1eda41529edb08419cb514247e5e Mon Sep 17 00:00:00 2001 From: Brian Duddie Date: Mon, 7 Oct 2019 15:53:27 -0700 Subject: [PATCH 02/19] Add sensors 1.0 default HAL to uhid group Ensures it can access /dev/uinput in Android Q, sepolicy permitting. Bug: 142105193 Test: confirm hall sensor works again on marlin Change-Id: I585c32d4da4bdc0917068e4d81adeca43d257e56 --- sensors/1.0/default/android.hardware.sensors@1.0-service.rc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc index b54842d66f..db340af5cd 100644 --- a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc +++ b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc @@ -1,6 +1,6 @@ service vendor.sensors-hal-1-0 /vendor/bin/hw/android.hardware.sensors@1.0-service class hal user system - group system wakelock + group system wakelock uhid capabilities BLOCK_SUSPEND rlimit rtprio 10 10 From 53fc8ed1c1a8e77da6c9d38bd62f1e3726e678a9 Mon Sep 17 00:00:00 2001 From: Joshua Duong Date: Thu, 10 Oct 2019 18:32:45 +0000 Subject: [PATCH 03/19] Revert "Audio HAL: Add missing device to XSD" This reverts commit dc5d7c26e2816bb4387ec9e08a25ff87b4347152. Reason for revert: qt-dev build break Change-Id: I9871b313e2ad23cec2d57019ffd5993a5c31caef --- audio/4.0/config/audio_policy_configuration.xsd | 4 ---- 1 file changed, 4 deletions(-) diff --git a/audio/4.0/config/audio_policy_configuration.xsd b/audio/4.0/config/audio_policy_configuration.xsd index f26e41b423..58bab227f7 100644 --- a/audio/4.0/config/audio_policy_configuration.xsd +++ b/audio/4.0/config/audio_policy_configuration.xsd @@ -357,10 +357,6 @@ - - - - From abf6b1c889242c0f73bf270e8dfb868b15d82f5b Mon Sep 17 00:00:00 2001 From: Kevin Rocard Date: Mon, 8 Apr 2019 11:15:07 -0700 Subject: [PATCH 04/19] DO NOT MERGE: Audio HAL: Add missing device to XSD The XSD has to be kept manually synced to the HAL definition. When some formats were introduced and the corresponding enum values were added in the HAL .hal, the XSD was not updated. This change is for P branch only. Test: xmllint --noout --schema hardware/interfaces/audio/4.0/config/audio_policy_configuration.xsd --xinclude out/target/product/*/vendor/etc/audio_policy_configuration.xml Bug: 128967080 Change-Id: Iad91c510b9b908fdf5cabc46c61d7c687f1acd1e --- audio/4.0/config/audio_policy_configuration.xsd | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/audio/4.0/config/audio_policy_configuration.xsd b/audio/4.0/config/audio_policy_configuration.xsd index 58bab227f7..f26e41b423 100644 --- a/audio/4.0/config/audio_policy_configuration.xsd +++ b/audio/4.0/config/audio_policy_configuration.xsd @@ -357,6 +357,10 @@ + + + + From 7f0ee628b142bdb75469b064491d61f7048204d8 Mon Sep 17 00:00:00 2001 From: Lev Proleev Date: Thu, 8 Aug 2019 14:08:31 +0100 Subject: [PATCH 05/19] Create NNAPI HAL v1.3 and add TENSOR_QUANT8_ASYMM_SIGNED OperandType Bug: 137828494 Bug: 139120468 Bug: 136735770 Test: mma Change-Id: I28f74e4b364fec1d7431a96cf5687256b3106069 Merged-In: I28f74e4b364fec1d7431a96cf5687256b3106069 (cherry picked from commit 5a7b67ab8fafd171f07d0ba99338f85c42993e0f) --- current.txt | 2 + neuralnetworks/1.2/vts/functional/Android.bp | 1 + neuralnetworks/1.3/Android.bp | 21 ++ neuralnetworks/1.3/IDevice.hal | 171 +++++++++ neuralnetworks/1.3/types.hal | 361 +++++++++++++++++++ 5 files changed, 556 insertions(+) create mode 100644 neuralnetworks/1.3/Android.bp create mode 100644 neuralnetworks/1.3/IDevice.hal create mode 100644 neuralnetworks/1.3/types.hal diff --git a/current.txt b/current.txt index 21db2328f8..4c5f15500d 100644 --- a/current.txt +++ b/current.txt @@ -586,6 +586,8 @@ fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardwar # HALs released in Android R 07d0a252b2d8fa35887908a996ba395cf392968395fc30afab791f46e0c22a52 android.hardware.boot@1.1::IBootControl 74049a402be913963edfdd80828a53736570e9d8124a1bf18166b6ed46a6b0ab android.hardware.boot@1.1::types +34515afa2bb792d3c6d8495a5f5d907d179c8507ca5e55c10050d02ae1d516ef android.hardware.neuralnetworks@1.3::IDevice +e2d20d4eb24f40b44a3766d05f77052581cb3f4df35fb48c0cc5d9cdcf5c872e android.hardware.neuralnetworks@1.3::types 544049dcda3f943ad67d83d5277f06681a3782982a9af5a78b5d4e8d295d061a android.hardware.vibrator@1.4::IVibrator 5e1c12efbbba89c9143d10b1b90eceff8bc79aa079f5106215b528e104fef101 android.hardware.vibrator@1.4::IVibratorCallback 033eae03c09ebc75e82db37bc39995dfaa9086745577b44d9e14e9ccb48bd8cc android.hardware.vibrator@1.4::types diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp index 89ab62a0a5..9c50d36ec5 100644 --- a/neuralnetworks/1.2/vts/functional/Android.bp +++ b/neuralnetworks/1.2/vts/functional/Android.bp @@ -37,6 +37,7 @@ cc_test { "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.2", + "android.hardware.neuralnetworks@1.3", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libgmock", diff --git a/neuralnetworks/1.3/Android.bp b/neuralnetworks/1.3/Android.bp new file mode 100644 index 0000000000..0615ec67dd --- /dev/null +++ b/neuralnetworks/1.3/Android.bp @@ -0,0 +1,21 @@ +// This file is autogenerated by hidl-gen -Landroidbp. + +hidl_interface { + name: "android.hardware.neuralnetworks@1.3", + root: "android.hardware", + vndk: { + enabled: true, + }, + srcs: [ + "types.hal", + "IDevice.hal", + ], + interfaces: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", + "android.hidl.base@1.0", + "android.hidl.safe_union@1.0", + ], + gen_java: false, +} diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal new file mode 100644 index 0000000000..ee36fb4e51 --- /dev/null +++ b/neuralnetworks/1.3/IDevice.hal @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.neuralnetworks@1.3; + +import @1.0::ErrorStatus; +import @1.1::ExecutionPreference; +import @1.2::Constant; +import @1.2::DeviceType; +import @1.2::Extension; +import @1.2::IDevice; +import @1.2::IPreparedModelCallback; + +/** + * This interface represents a device driver. + */ +interface IDevice extends @1.2::IDevice { + /** + * Gets the capabilities of a driver. + * + * @return status Error status of the call, must be: + * - NONE if successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * @return capabilities Capabilities of the driver. + */ + getCapabilities_1_3() generates (ErrorStatus status, Capabilities capabilities); + + /** + * Gets the supported operations in a model. + * + * getSupportedOperations indicates which operations of a model are fully + * supported by the vendor driver. If an operation may not be supported for + * any reason, getSupportedOperations must return false for that operation. + * + * @param model A model whose operations--and their corresponding operands-- + * are to be verified by the driver. + * @return status Error status of the call, must be: + * - NONE if successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if provided model is invalid + * @return supportedOperations A list of supported operations, where true + * indicates the operation is supported and false indicates the + * operation is not supported. The index of "supported" corresponds with + * the index of the operation it is describing. + */ + getSupportedOperations_1_3(Model model) + generates (ErrorStatus status, vec supportedOperations); + + /** + * Asynchronously creates a prepared model for execution and optionally + * saves it into cache files. + * + * prepareModel is used to make any necessary transformations to or + * alternative representations to a model for execution, possibly including + * transformations on the constant data, optimization on the model's graph, + * or compilation into the device's native binary format. The model itself + * is not changed. + * + * Optionally, caching information may be provided for the driver to save + * the prepared model to cache files for faster model compilation time when + * the same model preparation is requested in the future. There are two + * types of cache file handles provided to the driver: model cache and data + * cache. For more information on the two types of cache handles, refer to + * getNumberOfCacheFilesNeeded. + * + * The file descriptors must be opened with read and write permission. A + * file may have any size, and the corresponding file descriptor may have + * any offset. The driver must truncate a file to zero size before writing + * to that file. The file descriptors may be closed by the client once the + * asynchronous preparation has finished. The driver must dup a file + * descriptor if it wants to get access to the cache file later. + * + * The model is prepared asynchronously with respect to the caller. The + * prepareModel function must verify the inputs to the preparedModel + * function related to preparing the model (as opposed to saving the + * prepared model to cache) are correct. If there is an error, prepareModel + * must immediately invoke the callback with the appropriate ErrorStatus + * value and nullptr for the IPreparedModel, then return with the same + * ErrorStatus. If the inputs to the prepareModel function that are related + * to preparing the model are valid and there is no error, prepareModel must + * launch an asynchronous task to prepare the model in the background, and + * immediately return from prepareModel with ErrorStatus::NONE. If the + * asynchronous task fails to launch, prepareModel must immediately invoke + * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the + * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE. + * + * When the asynchronous task has finished preparing the model, it must + * immediately invoke the callback function provided as an input to + * prepareModel. If the model was prepared successfully, the callback object + * must be invoked with an error status of ErrorStatus::NONE and the + * produced IPreparedModel object. If an error occurred preparing the model, + * the callback object must be invoked with the appropriate ErrorStatus + * value and nullptr for the IPreparedModel. + * + * Optionally, the driver may save the prepared model to cache during the + * asynchronous preparation. Any error that occurs when saving to cache must + * not affect the status of preparing the model. Even if the input arguments + * related to the cache may be invalid, or the driver may fail to save to + * cache, the prepareModel function must finish preparing the model. The + * driver may choose not to save to cache even if the caching information is + * provided and valid. + * + * The only information that may be unknown to the model at this stage is + * the shape of the tensors, which may only be known at execution time. As + * such, some driver services may return partially prepared models, where + * the prepared model may only be finished when it is paired with a set of + * inputs to the model. Note that the same prepared model object may be used + * with different shapes of inputs on different (possibly concurrent) + * executions. + * + * Multiple threads may call prepareModel on the same model concurrently. + * + * @param model The model to be prepared for execution. + * @param preference Indicates the intended execution behavior of a prepared + * model. + * @param modelCache A vector of handles with each entry holding exactly one + * cache file descriptor for the security-sensitive cache. The length of + * the vector must either be 0 indicating that caching information is + * not provided, or match the numModelCache returned from + * getNumberOfCacheFilesNeeded. The cache handles will be provided in + * the same order when retrieving the preparedModel from cache files + * with prepareModelFromCache. + * @param dataCache A vector of handles with each entry holding exactly one + * cache file descriptor for the constants' cache. The length of the + * vector must either be 0 indicating that caching information is not + * provided, or match the numDataCache returned from + * getNumberOfCacheFilesNeeded. The cache handles will be provided in + * the same order when retrieving the preparedModel from cache files + * with prepareModelFromCache. + * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN + * identifying the prepared model. The same token will be provided when + * retrieving the prepared model from the cache files with + * prepareModelFromCache. Tokens should be chosen to have a low rate of + * collision for a particular application. The driver cannot detect a + * collision; a collision will result in a failed execution or in a + * successful execution that produces incorrect output values. If both + * modelCache and dataCache are empty indicating that caching + * information is not provided, this token must be ignored. + * @param callback A callback object used to return the error status of + * preparing the model for execution and the prepared model if + * successful, nullptr otherwise. The callback object's notify function + * must be called exactly once, even if the model could not be prepared. + * @return status Error status of launching a task which prepares the model + * in the background; must be: + * - NONE if preparation task is successfully launched + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if one of the input arguments related to preparing + * the model is invalid + */ + prepareModel_1_3(Model model, ExecutionPreference preference, + vec modelCache, vec dataCache, + uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token, + IPreparedModelCallback callback) + generates (ErrorStatus status); +}; diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal new file mode 100644 index 0000000000..db5dd51017 --- /dev/null +++ b/neuralnetworks/1.3/types.hal @@ -0,0 +1,361 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.neuralnetworks@1.3; + +import @1.0::DataLocation; +import @1.0::OperandLifeTime; +import @1.0::PerformanceInfo; +import @1.2::OperandType; +import @1.2::OperationType; +import @1.2::SymmPerChannelQuantParams; + +import android.hidl.safe_union@1.0::Monostate; + +/** + * NOTE: Since NNAPI 1.2, OEM operation and data type are deprecated. Extensions + * are the preferred alternative. + * + * NOTE: Adding a new fundamental type requires updating the value of + * OperandTypeRange::FUNDAMENTAL_MAX. + */ +enum OperandType : @1.2::OperandType { + /** + * A tensor of 8 bit signed integers that represent real numbers. + * + * Attached to this tensor are two numbers that can be used to convert the + * 8 bit integer to the real value and vice versa. These two numbers are: + * - scale: a 32 bit floating point value greater than zero. + * - zeroPoint: a 32 bit integer, in range [-128, 127]. + * + * The formula is: + * real_value = (integer_value - zeroPoint) * scale. + * + * Available since API level 30. + */ + TENSOR_QUANT8_ASYMM_SIGNED = 14, +}; + +/** + * The range of operand values in the OperandType enum. + */ +enum OperandTypeRange : uint32_t { + BASE_MIN = 0, + FUNDAMENTAL_MIN = 0, + FUNDAMENTAL_MAX = 14, + OEM_MIN = 10000, + OEM_MAX = 10001, + BASE_MAX = 0xFFFF, +}; + + +/** + * The capabilities of a driver. + * + * Performance of an operation comes from the type of its first operand. + * This represents performance for non extension operand types. + */ +struct Capabilities { + /** + * Driver performance when operating on float32 data but performing + * calculations with range and/or precision as low as that of the IEEE + * 754 16-bit floating-point format. + */ + PerformanceInfo relaxedFloat32toFloat16PerformanceScalar; + PerformanceInfo relaxedFloat32toFloat16PerformanceTensor; + + /** + * Driver performance when operating on a particular data type. + * In the case of float32 data, this is used when the calculations + * are not relaxed. + */ + struct OperandPerformance { + OperandType type; + PerformanceInfo info; + }; + + /** + * Performance by operand type. Must be sorted by OperandType. + * If a particular OperandType is not present in operandPerformance, + * its performance is treated as + * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }. + */ + vec operandPerformance; +}; + +/** + * Describes one operand of the model's graph. + */ +struct Operand { + /** + * The data type. + * + * Besides the values listed in {@link OperandType}, any value above + * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted + * as an extension type according to {@link Model::extensionNameToPrefix}. + */ + OperandType type; + + /** + * Dimensions of the operand. + * + * For a scalar operand, dimensions.size() must be 0. + * + * A tensor operand with all dimensions specified has "fully + * specified" dimensions. Whenever possible (i.e., whenever the + * dimensions are known at model construction time), a tensor + * operand should have (but is not required to have) fully + * specified dimensions, in order to enable the best possible + * performance. + * + * If a tensor operand's dimensions are not fully specified, the + * dimensions of the operand are deduced from the operand + * dimensions and values of the operation for which that operand + * is an output. + * + * In the following situations, a tensor operand's dimensions must + * be fully specified: + * + * . The operand has lifetime CONSTANT_COPY or + * CONSTANT_REFERENCE. + * + * . The operand has lifetime MODEL_INPUT. Fully + * specified dimensions must either be present in the + * Operand or they must be provided in the corresponding + * RequestArgument. + * EXCEPTION: If the input is optional and omitted + * (by setting the hasNoValue field of the corresponding + * RequestArgument to true) then it need not have fully + * specified dimensions. + * + * A tensor operand with some number of unspecified dimensions is + * represented by setting each unspecified dimension to 0. + * + * A tensor operand with unspecified rank is represented by providing + * an empty dimensions vector. + */ + vec dimensions; + + /** + * The number of times this operand appears as an operation input. + * + * (For example, if this operand appears once in one operation's + * input list, and three times in another operation's input list, + * then numberOfConsumers = 4.) + */ + uint32_t numberOfConsumers; + + /** + * Quantized scale of the operand. + * + * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or + * TENSOR_INT32. + */ + float scale; + + /** + * Quantized zero-point offset of the operand. + * + * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM. + */ + int32_t zeroPoint; + + /** + * How the operand is used. + */ + OperandLifeTime lifetime; + + /** + * Where to find the data for this operand. + * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or + * NO_VALUE: + * - All the fields must be 0. + * If the lifetime is CONSTANT_COPY: + * - location.poolIndex is 0. + * - location.offset is the offset in bytes into Model.operandValues. + * - location.length is set. + * If the lifetime is CONSTANT_REFERENCE: + * - location.poolIndex is set. + * - location.offset is the offset in bytes into the specified pool. + * - location.length is set. + */ + DataLocation location; + + /** + * Additional parameters specific to a particular operand type. + */ + safe_union ExtraParams { + /** + * No additional parameters. + */ + Monostate none; + + /** + * Symmetric per-channel quantization parameters. + * + * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL. + */ + SymmPerChannelQuantParams channelQuant; + + /** + * Extension operand parameters. + * + * The framework treats this as an opaque data blob. + * The format is up to individual extensions. + */ + vec extension; + } extraParams; +}; + +/** + * Describes one operation of the model's graph. + */ +struct Operation { + /** + * The operation type. + */ + OperationType type; + + /** + * Describes the table that contains the indexes of the inputs of the + * operation. The offset is the index in the operandIndexes table. + */ + vec inputs; + + /** + * Describes the table that contains the indexes of the outputs of the + * operation. The offset is the index in the operandIndexes table. + */ + vec outputs; +}; + +/** + * A Neural Network Model. + * + * This includes not only the execution graph, but also constant data such as + * weights or scalars added at construction time. The only information that + * may not be known is the shape of the input tensors. + */ +struct Model { + /** + * All operands included in the model. + */ + vec operands; + + /** + * All operations included in the model. + * + * The operations are sorted into execution order. Every operand + * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be + * written before it is read. + */ + vec operations; + + /** + * Input indexes of the model. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec inputIndexes; + + /** + * Output indexes of the model. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec outputIndexes; + + /** + * A byte buffer containing operand data that were copied into the model. + * + * An operand's value must be located here if and only if Operand::lifetime + * equals OperandLifeTime::CONSTANT_COPY. + */ + vec operandValues; + + /** + * A collection of shared memory pools containing operand values. + * + * An operand's value must be located here if and only if Operand::lifetime + * equals OperandLifeTime::CONSTANT_REFERENCE. + */ + vec pools; + + /** + * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or + * precision as low as that of the IEEE 754 16-bit floating-point format. + * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the + * range and precision of the IEEE 754 32-bit floating-point format. + */ + bool relaxComputationFloat32toFloat16; + + /** + * The mapping between extension names and prefixes of operand and + * operation type values. + * + * An operand or operation whose numeric type value is above + * {@link OperandTypeRange::BASE_MAX} or + * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted + * as an extension operand. The low + * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value + * correspond to the type ID within the extension and the high + * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode + * the "prefix", which maps uniquely to the extension name. + * + * For example, if a model contains an operation whose value is + * 0xAAAABBBB and extensionNameToPrefix contains an entry with + * prefix=0xAAAA and name="vendor.test.test_extension", then + * the operation should be interpreted as the operation 0xBBBB + * of the extension named vendor.test.test_extension. + * + * This is a one-to-one correspondence. That is, there must be at most one + * prefix corresponding to each extension name and at most one extension + * name corresponding to each prefix. + */ + vec extensionNameToPrefix; + + /** + * A correspondence between an extension name and a prefix of operand and + * operation type values. + */ + struct ExtensionNameAndPrefix { + /** + * The extension name. + * + * See {@link Extension::name} for the format specification. + */ + string name; + + /** + * The unique extension identifier within the model. + * + * See {@link Model::extensionNameToPrefix}. + */ + uint16_t prefix; + }; + + /** + * Numeric values of extension operand and operation types have the + * following structure: + * - 16 high bits represent the "prefix", which corresponds uniquely to the + * extension name. + * - 16 low bits represent the type ID within the extension. + */ + enum ExtensionTypeEncoding : uint8_t { + HIGH_BITS_PREFIX = 16, + LOW_BITS_TYPE = 16, + }; +}; From 4d00307c5c167d72b8e81b2a134f7ab4a5e05fbd Mon Sep 17 00:00:00 2001 From: Lev Proleev Date: Fri, 30 Aug 2019 11:35:34 +0100 Subject: [PATCH 06/19] Copy VTS tests from v1.2 to v1.3 So that it's easier to see what actually has changed in VTS tests for version 1.3 Bug: 139120468 Test: m Change-Id: I09797f5f3898501a008186a22dd411b00e9e2c67 Merged-In: I09797f5f3898501a008186a22dd411b00e9e2c67 (cherry picked from commit 3b13b55ac1532647ee6f261489d23ca4269c1440) --- neuralnetworks/1.3/vts/OWNERS | 16 + .../1.3/vts/functional/BasicTests.cpp | 114 ++ .../1.3/vts/functional/Callbacks.cpp | 143 ++ .../functional/CompilationCachingTests.cpp | 1374 +++++++++++++++++ .../vts/functional/GeneratedTestHarness.cpp | 408 +++++ .../1.3/vts/functional/GeneratedTestHarness.h | 65 + .../1.3/vts/functional/TestAssertions.cpp | 141 ++ .../1.3/vts/functional/ValidateBurst.cpp | 400 +++++ .../1.3/vts/functional/ValidateModel.cpp | 713 +++++++++ .../1.3/vts/functional/ValidateRequest.cpp | 168 ++ .../vts/functional/VtsHalNeuralnetworks.cpp | 171 ++ .../1.3/vts/functional/VtsHalNeuralnetworks.h | 57 + .../vts/functional/include/1.2/Callbacks.h | 325 ++++ 13 files changed, 4095 insertions(+) create mode 100644 neuralnetworks/1.3/vts/OWNERS create mode 100644 neuralnetworks/1.3/vts/functional/BasicTests.cpp create mode 100644 neuralnetworks/1.3/vts/functional/Callbacks.cpp create mode 100644 neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp create mode 100644 neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp create mode 100644 neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h create mode 100644 neuralnetworks/1.3/vts/functional/TestAssertions.cpp create mode 100644 neuralnetworks/1.3/vts/functional/ValidateBurst.cpp create mode 100644 neuralnetworks/1.3/vts/functional/ValidateModel.cpp create mode 100644 neuralnetworks/1.3/vts/functional/ValidateRequest.cpp create mode 100644 neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp create mode 100644 neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h create mode 100644 neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h diff --git a/neuralnetworks/1.3/vts/OWNERS b/neuralnetworks/1.3/vts/OWNERS new file mode 100644 index 0000000000..b5a8e1f473 --- /dev/null +++ b/neuralnetworks/1.3/vts/OWNERS @@ -0,0 +1,16 @@ +# Neuralnetworks team +butlermichael@google.com +dgross@google.com +jeanluc@google.com +levp@google.com +miaowang@google.com +mikie@google.com +mks@google.com +pszczepaniak@google.com +slavash@google.com +vddang@google.com +xusongw@google.com + +# VTS team +yim@google.com +yuexima@google.com diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp new file mode 100644 index 0000000000..8e82c5376e --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using V1_0::DeviceStatus; +using V1_0::ErrorStatus; +using V1_0::PerformanceInfo; + +// create device test +TEST_P(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_P(NeuralnetworksHidlTest, StatusTest) { + Return status = kDevice->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); +} + +// initialization +TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { + using OperandPerformance = Capabilities::OperandPerformance; + Return ret = kDevice->getCapabilities_1_2([](ErrorStatus status, + const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + + auto isPositive = [](const PerformanceInfo& perf) { + return perf.execTime > 0.0f && perf.powerUsage > 0.0f; + }; + + EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + const auto& opPerf = capabilities.operandPerformance; + EXPECT_TRUE(std::all_of( + opPerf.begin(), opPerf.end(), + [isPositive](const OperandPerformance& a) { return isPositive(a.info); })); + EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + })); + }); + EXPECT_TRUE(ret.isOk()); +} + +// device version test +TEST_P(NeuralnetworksHidlTest, GetDeviceVersionStringTest) { + Return ret = + kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0, version.size()); + }); + EXPECT_TRUE(ret.isOk()); +} + +// device type test +TEST_P(NeuralnetworksHidlTest, GetDeviceTypeTest) { + Return ret = kDevice->getType([](ErrorStatus status, DeviceType type) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU || + type == DeviceType::GPU || type == DeviceType::ACCELERATOR); + }); + EXPECT_TRUE(ret.isOk()); +} + +// device supported extensions test +TEST_P(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) { + Return ret = kDevice->getSupportedExtensions( + [](ErrorStatus status, const hidl_vec& extensions) { + EXPECT_EQ(ErrorStatus::NONE, status); + for (auto& extension : extensions) { + std::string extensionName = extension.name; + EXPECT_FALSE(extensionName.empty()); + for (char c : extensionName) { + EXPECT_TRUE(('a' <= c && c <= 'z') || ('0' <= c && c <= '9') || c == '_' || + c == '.') + << "Extension name contains an illegal character: " << c; + } + EXPECT_NE(extensionName.find('.'), std::string::npos) + << "Extension name must start with the reverse domain name of the " + "vendor"; + } + }); + EXPECT_TRUE(ret.isOk()); +} + +// getNumberOfCacheFilesNeeded test +TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) { + Return ret = kDevice->getNumberOfCacheFilesNeeded( + [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LE(numModelCache, + static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); + EXPECT_LE(numDataCache, static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); + }); + EXPECT_TRUE(ret.isOk()); +} +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/Callbacks.cpp b/neuralnetworks/1.3/vts/functional/Callbacks.cpp new file mode 100644 index 0000000000..3972ad6ff2 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/Callbacks.cpp @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Callbacks" + +#include "1.2/Callbacks.h" + +#include + +#include + +namespace android::hardware::neuralnetworks::V1_2::implementation { + +using V1_0::ErrorStatus; + +constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits::max(), + .timeInDriver = std::numeric_limits::max()}; + +// PreparedModelCallback methods begin here + +Return PreparedModelCallback::notify(ErrorStatus errorStatus, + const sp& preparedModel) { + { + std::lock_guard hold(mMutex); + + // quick-return if object has already been notified + if (mNotified) { + return Void(); + } + + // store results and mark as notified + mErrorStatus = errorStatus; + mPreparedModel = preparedModel; + mNotified = true; + } + + mCondition.notify_all(); + return Void(); +} + +Return PreparedModelCallback::notify_1_2(ErrorStatus errorStatus, + const sp& preparedModel) { + return notify(errorStatus, preparedModel); +} + +void PreparedModelCallback::wait() const { + std::unique_lock lock(mMutex); + mCondition.wait(lock, [this] { return mNotified; }); +} + +ErrorStatus PreparedModelCallback::getStatus() const { + wait(); + return mErrorStatus; +} + +sp PreparedModelCallback::getPreparedModel() const { + wait(); + return mPreparedModel; +} + +// ExecutionCallback methods begin here + +Return ExecutionCallback::notify(ErrorStatus errorStatus) { + notifyInternal(errorStatus, {}, kNoTiming); + return Void(); +} + +Return ExecutionCallback::notify_1_2(ErrorStatus errorStatus, + const hidl_vec& outputShapes, + const Timing& timing) { + if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE. + if (outputShapes.size() == 0) { + LOG(ERROR) << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE"; + notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + return Void(); + } + } else if (errorStatus != ErrorStatus::NONE) { + // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE. + if (outputShapes.size() != 0) { + LOG(ERROR) << "Notified with non-empty output shape vector when error status is " + "neither NONE nor OUTPUT_INSUFFICIENT_SIZE"; + notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + return Void(); + } + } + notifyInternal(errorStatus, outputShapes, timing); + return Void(); +} + +void ExecutionCallback::wait() const { + std::unique_lock lock(mMutex); + mCondition.wait(lock, [this] { return mNotified; }); +} + +ErrorStatus ExecutionCallback::getStatus() const { + wait(); + return mErrorStatus; +} + +const std::vector& ExecutionCallback::getOutputShapes() const { + wait(); + return mOutputShapes; +} + +Timing ExecutionCallback::getTiming() const { + wait(); + return mTiming; +} + +void ExecutionCallback::notifyInternal(ErrorStatus errorStatus, + const hidl_vec& outputShapes, + const Timing& timing) { + { + std::lock_guard hold(mMutex); + + // quick-return if object has already been notified + if (mNotified) { + return; + } + + mErrorStatus = errorStatus; + mOutputShapes = outputShapes; + mTiming = timing; + mNotified = true; + } + mCondition.notify_all(); +} + +} // namespace android::hardware::neuralnetworks::V1_2::implementation diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp new file mode 100644 index 0000000000..2130a76b75 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp @@ -0,0 +1,1374 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "1.2/Callbacks.h" +#include "GeneratedTestHarness.h" +#include "MemoryUtils.h" +#include "TestHarness.h" +#include "Utils.h" +#include "VtsHalNeuralnetworks.h" + +// Forward declaration of the mobilenet generated test models in +// frameworks/ml/nn/runtime/test/generated/. +namespace generated_tests::mobilenet_224_gender_basic_fixed { +const test_helper::TestModel& get_test_model(); +} // namespace generated_tests::mobilenet_224_gender_basic_fixed + +namespace generated_tests::mobilenet_quantized { +const test_helper::TestModel& get_test_model(); +} // namespace generated_tests::mobilenet_quantized + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using namespace test_helper; +using implementation::PreparedModelCallback; +using V1_0::ErrorStatus; +using V1_1::ExecutionPreference; + +namespace float32_model { + +constexpr auto get_test_model = generated_tests::mobilenet_224_gender_basic_fixed::get_test_model; + +} // namespace float32_model + +namespace quant8_model { + +constexpr auto get_test_model = generated_tests::mobilenet_quantized::get_test_model; + +} // namespace quant8_model + +namespace { + +enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY }; + +// Creates cache handles based on provided file groups. +// The outer vector corresponds to handles and the inner vector is for fds held by each handle. +void createCacheHandles(const std::vector>& fileGroups, + const std::vector& mode, hidl_vec* handles) { + handles->resize(fileGroups.size()); + for (uint32_t i = 0; i < fileGroups.size(); i++) { + std::vector fds; + for (const auto& file : fileGroups[i]) { + int fd; + if (mode[i] == AccessMode::READ_ONLY) { + fd = open(file.c_str(), O_RDONLY); + } else if (mode[i] == AccessMode::WRITE_ONLY) { + fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); + } else if (mode[i] == AccessMode::READ_WRITE) { + fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + } else { + FAIL(); + } + ASSERT_GE(fd, 0); + fds.push_back(fd); + } + native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0); + ASSERT_NE(cacheNativeHandle, nullptr); + std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]); + (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true); + } +} + +void createCacheHandles(const std::vector>& fileGroups, AccessMode mode, + hidl_vec* handles) { + createCacheHandles(fileGroups, std::vector(fileGroups.size(), mode), handles); +} + +// Create a chain of broadcast operations. The second operand is always constant tensor [1]. +// For simplicity, activation scalar is shared. The second operand is not shared +// in the model to let driver maintain a non-trivial size of constant data and the corresponding +// data locations in cache. +// +// --------- activation -------- +// ↓ ↓ ↓ ↓ +// E.g. input -> ADD -> ADD -> ADD -> ... -> ADD -> output +// ↑ ↑ ↑ ↑ +// [1] [1] [1] [1] +// +// This function assumes the operation is either ADD or MUL. +template +TestModel createLargeTestModelImpl(TestOperationType op, uint32_t len) { + EXPECT_TRUE(op == TestOperationType::ADD || op == TestOperationType::MUL); + + // Model operations and operands. + std::vector operations(len); + std::vector operands(len * 2 + 2); + + // The activation scalar, value = 0. + operands[0] = { + .type = TestOperandType::INT32, + .dimensions = {}, + .numberOfConsumers = len, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = TestOperandLifeTime::CONSTANT_COPY, + .data = TestBuffer::createFromVector({0}), + }; + + // The buffer value of the constant second operand. The logical value is always 1.0f. + CppType bufferValue; + // The scale of the first and second operand. + float scale1, scale2; + if (operandType == TestOperandType::TENSOR_FLOAT32) { + bufferValue = 1.0f; + scale1 = 0.0f; + scale2 = 0.0f; + } else if (op == TestOperationType::ADD) { + bufferValue = 1; + scale1 = 1.0f; + scale2 = 1.0f; + } else { + // To satisfy the constraint on quant8 MUL: input0.scale * input1.scale < output.scale, + // set input1 to have scale = 0.5f and bufferValue = 2, i.e. 1.0f in floating point. + bufferValue = 2; + scale1 = 1.0f; + scale2 = 0.5f; + } + + for (uint32_t i = 0; i < len; i++) { + const uint32_t firstInputIndex = i * 2 + 1; + const uint32_t secondInputIndex = firstInputIndex + 1; + const uint32_t outputIndex = secondInputIndex + 1; + + // The first operation input. + operands[firstInputIndex] = { + .type = operandType, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = scale1, + .zeroPoint = 0, + .lifetime = (i == 0 ? TestOperandLifeTime::MODEL_INPUT + : TestOperandLifeTime::TEMPORARY_VARIABLE), + .data = (i == 0 ? TestBuffer::createFromVector({1}) : TestBuffer()), + }; + + // The second operation input, value = 1. + operands[secondInputIndex] = { + .type = operandType, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = scale2, + .zeroPoint = 0, + .lifetime = TestOperandLifeTime::CONSTANT_COPY, + .data = TestBuffer::createFromVector({bufferValue}), + }; + + // The operation. All operations share the same activation scalar. + // The output operand is created as an input in the next iteration of the loop, in the case + // of all but the last member of the chain; and after the loop as a model output, in the + // case of the last member of the chain. + operations[i] = { + .type = op, + .inputs = {firstInputIndex, secondInputIndex, /*activation scalar*/ 0}, + .outputs = {outputIndex}, + }; + } + + // For TestOperationType::ADD, output = 1 + 1 * len = len + 1 + // For TestOperationType::MUL, output = 1 * 1 ^ len = 1 + CppType outputResult = static_cast(op == TestOperationType::ADD ? len + 1u : 1u); + + // The model output. + operands.back() = { + .type = operandType, + .dimensions = {1}, + .numberOfConsumers = 0, + .scale = scale1, + .zeroPoint = 0, + .lifetime = TestOperandLifeTime::MODEL_OUTPUT, + .data = TestBuffer::createFromVector({outputResult}), + }; + + return { + .operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = {1}, + .outputIndexes = {len * 2 + 1}, + .isRelaxed = false, + }; +} + +} // namespace + +// Tag for the compilation caching tests. +class CompilationCachingTestBase : public testing::Test { + protected: + CompilationCachingTestBase(sp device, OperandType type) + : kDevice(std::move(device)), kOperandType(type) {} + + void SetUp() override { + testing::Test::SetUp(); + ASSERT_NE(kDevice.get(), nullptr); + + // Create cache directory. The cache directory and a temporary cache file is always created + // to test the behavior of prepareModelFromCache, even when caching is not supported. + char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX"; + char* cacheDir = mkdtemp(cacheDirTemp); + ASSERT_NE(cacheDir, nullptr); + mCacheDir = cacheDir; + mCacheDir.push_back('/'); + + Return ret = kDevice->getNumberOfCacheFilesNeeded( + [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + EXPECT_EQ(ErrorStatus::NONE, status); + mNumModelCache = numModelCache; + mNumDataCache = numDataCache; + }); + EXPECT_TRUE(ret.isOk()); + mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0; + + // Create empty cache files. + mTmpCache = mCacheDir + "tmp"; + for (uint32_t i = 0; i < mNumModelCache; i++) { + mModelCache.push_back({mCacheDir + "model" + std::to_string(i)}); + } + for (uint32_t i = 0; i < mNumDataCache; i++) { + mDataCache.push_back({mCacheDir + "data" + std::to_string(i)}); + } + // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files. + hidl_vec modelHandle, dataHandle, tmpHandle; + createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle); + createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle); + createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle); + + if (!mIsCachingSupported) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service does not " + "support compilation caching."; + std::cout << "[ ] Early termination of test because vendor service does not " + "support compilation caching." + << std::endl; + } + } + + void TearDown() override { + // If the test passes, remove the tmp directory. Otherwise, keep it for debugging purposes. + if (!testing::Test::HasFailure()) { + // Recursively remove the cache directory specified by mCacheDir. + auto callback = [](const char* entry, const struct stat*, int, struct FTW*) { + return remove(entry); + }; + nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS); + } + testing::Test::TearDown(); + } + + // Model and examples creators. According to kOperandType, the following methods will return + // either float32 model/examples or the quant8 variant. + TestModel createTestModel() { + if (kOperandType == OperandType::TENSOR_FLOAT32) { + return float32_model::get_test_model(); + } else { + return quant8_model::get_test_model(); + } + } + + TestModel createLargeTestModel(OperationType op, uint32_t len) { + if (kOperandType == OperandType::TENSOR_FLOAT32) { + return createLargeTestModelImpl( + static_cast(op), len); + } else { + return createLargeTestModelImpl( + static_cast(op), len); + } + } + + // See if the service can handle the model. + bool isModelFullySupported(const Model& model) { + bool fullySupportsModel = false; + Return supportedCall = kDevice->getSupportedOperations_1_2( + model, + [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_EQ(supported.size(), model.operations.size()); + fullySupportsModel = std::all_of(supported.begin(), supported.end(), + [](bool valid) { return valid; }); + }); + EXPECT_TRUE(supportedCall.isOk()); + return fullySupportsModel; + } + + void saveModelToCache(const Model& model, const hidl_vec& modelCache, + const hidl_vec& dataCache, + sp* preparedModel = nullptr) { + if (preparedModel != nullptr) *preparedModel = nullptr; + + // Launch prepare model. + sp preparedModelCallback = new PreparedModelCallback(); + hidl_array cacheToken(mToken); + Return prepareLaunchStatus = + kDevice->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, + modelCache, dataCache, cacheToken, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(static_cast(prepareLaunchStatus), ErrorStatus::NONE); + + // Retrieve prepared model. + preparedModelCallback->wait(); + ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE); + if (preparedModel != nullptr) { + *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) + .withDefault(nullptr); + } + } + + bool checkEarlyTermination(ErrorStatus status) { + if (status == ErrorStatus::GENERAL_FAILURE) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "save the prepared model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "save the prepared model that it does not support." + << std::endl; + return true; + } + return false; + } + + bool checkEarlyTermination(const Model& model) { + if (!isModelFullySupported(model)) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return true; + } + return false; + } + + void prepareModelFromCache(const hidl_vec& modelCache, + const hidl_vec& dataCache, + sp* preparedModel, ErrorStatus* status) { + // Launch prepare model from cache. + sp preparedModelCallback = new PreparedModelCallback(); + hidl_array cacheToken(mToken); + Return prepareLaunchStatus = kDevice->prepareModelFromCache( + modelCache, dataCache, cacheToken, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + if (static_cast(prepareLaunchStatus) != ErrorStatus::NONE) { + *preparedModel = nullptr; + *status = static_cast(prepareLaunchStatus); + return; + } + + // Retrieve prepared model. + preparedModelCallback->wait(); + *status = preparedModelCallback->getStatus(); + *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) + .withDefault(nullptr); + } + + // Absolute path to the temporary cache directory. + std::string mCacheDir; + + // Groups of file paths for model and data cache in the tmp cache directory, initialized with + // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles + // and the inner vector is for fds held by each handle. + std::vector> mModelCache; + std::vector> mDataCache; + + // A separate temporary file path in the tmp cache directory. + std::string mTmpCache; + + uint8_t mToken[static_cast(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {}; + uint32_t mNumModelCache; + uint32_t mNumDataCache; + uint32_t mIsCachingSupported; + + const sp kDevice; + // The primary data type of the testModel. + const OperandType kOperandType; +}; + +using CompilationCachingTestParam = std::tuple; + +// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first +// pass running with float32 models and the second pass running with quant8 models. +class CompilationCachingTest : public CompilationCachingTestBase, + public testing::WithParamInterface { + protected: + CompilationCachingTest() + : CompilationCachingTestBase(getData(std::get(GetParam())), + std::get(GetParam())) {} +}; + +TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + sp preparedModel = nullptr; + + // Save the compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(model, modelCache, dataCache); + } + + // Retrieve preparedModel from cache. + { + preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (!mIsCachingSupported) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + return; + } else if (checkEarlyTermination(status)) { + ASSERT_EQ(preparedModel, nullptr); + return; + } else { + ASSERT_EQ(status, ErrorStatus::NONE); + ASSERT_NE(preparedModel, nullptr); + } + } + + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); +} + +TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + sp preparedModel = nullptr; + + // Save the compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + uint8_t dummyBytes[] = {0, 0}; + // Write a dummy integer to the cache. + // The driver should be able to handle non-empty cache and non-zero fd offset. + for (uint32_t i = 0; i < modelCache.size(); i++) { + ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes, + sizeof(dummyBytes)), + sizeof(dummyBytes)); + } + for (uint32_t i = 0; i < dataCache.size(); i++) { + ASSERT_EQ( + write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)), + sizeof(dummyBytes)); + } + saveModelToCache(model, modelCache, dataCache); + } + + // Retrieve preparedModel from cache. + { + preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + uint8_t dummyByte = 0; + // Advance the offset of each handle by one byte. + // The driver should be able to handle non-zero fd offset. + for (uint32_t i = 0; i < modelCache.size(); i++) { + ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0); + } + for (uint32_t i = 0; i < dataCache.size(); i++) { + ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0); + } + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (!mIsCachingSupported) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + return; + } else if (checkEarlyTermination(status)) { + ASSERT_EQ(preparedModel, nullptr); + return; + } else { + ASSERT_EQ(status, ErrorStatus::NONE); + ASSERT_NE(preparedModel, nullptr); + } + } + + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); +} + +TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + + // Test with number of model cache files greater than mNumModelCache. + { + hidl_vec modelCache, dataCache; + // Pass an additional cache file for model cache. + mModelCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.pop_back(); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of model cache files smaller than mNumModelCache. + if (mModelCache.size() > 0) { + hidl_vec modelCache, dataCache; + // Pop out the last cache file. + auto tmp = mModelCache.back(); + mModelCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files greater than mNumDataCache. + { + hidl_vec modelCache, dataCache; + // Pass an additional cache file for data cache. + mDataCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.pop_back(); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files smaller than mNumDataCache. + if (mDataCache.size() > 0) { + hidl_vec modelCache, dataCache; + // Pop out the last cache file. + auto tmp = mDataCache.back(); + mDataCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + + // Save the compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(model, modelCache, dataCache); + } + + // Test with number of model cache files greater than mNumModelCache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mModelCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of model cache files smaller than mNumModelCache. + if (mModelCache.size() > 0) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mModelCache.back(); + mModelCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files greater than mNumDataCache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mDataCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files smaller than mNumDataCache. + if (mDataCache.size() > 0) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mDataCache.back(); + mDataCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + + // Go through each handle in model cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumModelCache; i++) { + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + mModelCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].pop_back(); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in model cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumModelCache; i++) { + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + auto tmp = mModelCache[i].back(); + mModelCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumDataCache; i++) { + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + mDataCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].pop_back(); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumDataCache; i++) { + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + auto tmp = mDataCache[i].back(); + mDataCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + + // Save the compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(model, modelCache, dataCache); + } + + // Go through each handle in model cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mModelCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in model cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mModelCache[i].back(); + mModelCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mDataCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mDataCache[i].back(); + mDataCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); + std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); + + // Go through each handle in model cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumModelCache; i++) { + hidl_vec modelCache, dataCache; + modelCacheMode[i] = AccessMode::READ_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + modelCacheMode[i] = AccessMode::READ_WRITE; + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumDataCache; i++) { + hidl_vec modelCache, dataCache; + dataCacheMode[i] = AccessMode::READ_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + dataCacheMode[i] = AccessMode::READ_WRITE; + sp preparedModel = nullptr; + saveModelToCache(model, modelCache, dataCache, &preparedModel); + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + EvaluatePreparedModel(preparedModel, testModel, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) { + // Create test HIDL model and compile. + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); + std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); + + // Save the compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(model, modelCache, dataCache); + } + + // Go through each handle in model cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + modelCacheMode[i] = AccessMode::WRITE_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + modelCacheMode[i] = AccessMode::READ_WRITE; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + dataCacheMode[i] = AccessMode::WRITE_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + dataCacheMode[i] = AccessMode::READ_WRITE; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } +} + +// Copy file contents between file groups. +// The outer vector corresponds to handles and the inner vector is for fds held by each handle. +// The outer vector sizes must match and the inner vectors must have size = 1. +static void copyCacheFiles(const std::vector>& from, + const std::vector>& to) { + constexpr size_t kBufferSize = 1000000; + uint8_t buffer[kBufferSize]; + + ASSERT_EQ(from.size(), to.size()); + for (uint32_t i = 0; i < from.size(); i++) { + ASSERT_EQ(from[i].size(), 1u); + ASSERT_EQ(to[i].size(), 1u); + int fromFd = open(from[i][0].c_str(), O_RDONLY); + int toFd = open(to[i][0].c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); + ASSERT_GE(fromFd, 0); + ASSERT_GE(toFd, 0); + + ssize_t readBytes; + while ((readBytes = read(fromFd, &buffer, kBufferSize)) > 0) { + ASSERT_EQ(write(toFd, &buffer, readBytes), readBytes); + } + ASSERT_GE(readBytes, 0); + + close(fromFd); + close(toFd); + } +} + +// Number of operations in the large test model. +constexpr uint32_t kLargeModelSize = 100; +constexpr uint32_t kNumIterationsTOCTOU = 100; + +TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) { + if (!mIsCachingSupported) return; + + // Create test models and check if fully supported by the service. + const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + const Model modelMul = createModel(testModelMul); + if (checkEarlyTermination(modelMul)) return; + const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + const Model modelAdd = createModel(testModelAdd); + if (checkEarlyTermination(modelAdd)) return; + + // Save the modelMul compilation to cache. + auto modelCacheMul = mModelCache; + for (auto& cache : modelCacheMul) { + cache[0].append("_mul"); + } + { + hidl_vec modelCache, dataCache; + createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(modelMul, modelCache, dataCache); + } + + // Use a different token for modelAdd. + mToken[0]++; + + // This test is probabilistic, so we run it multiple times. + for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) { + // Save the modelAdd compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + + // Spawn a thread to copy the cache content concurrently while saving to cache. + std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache)); + saveModelToCache(modelAdd, modelCache, dataCache); + thread.join(); + } + + // Retrieve preparedModel from cache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + + // The preparation may fail or succeed, but must not crash. If the preparation succeeds, + // the prepared model must be executed with the correct result and not crash. + if (status != ErrorStatus::NONE) { + ASSERT_EQ(preparedModel, nullptr); + } else { + ASSERT_NE(preparedModel, nullptr); + EvaluatePreparedModel(preparedModel, testModelAdd, + /*testDynamicOutputShape=*/false); + } + } + } +} + +TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) { + if (!mIsCachingSupported) return; + + // Create test models and check if fully supported by the service. + const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + const Model modelMul = createModel(testModelMul); + if (checkEarlyTermination(modelMul)) return; + const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + const Model modelAdd = createModel(testModelAdd); + if (checkEarlyTermination(modelAdd)) return; + + // Save the modelMul compilation to cache. + auto modelCacheMul = mModelCache; + for (auto& cache : modelCacheMul) { + cache[0].append("_mul"); + } + { + hidl_vec modelCache, dataCache; + createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(modelMul, modelCache, dataCache); + } + + // Use a different token for modelAdd. + mToken[0]++; + + // This test is probabilistic, so we run it multiple times. + for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) { + // Save the modelAdd compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(modelAdd, modelCache, dataCache); + } + + // Retrieve preparedModel from cache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + + // Spawn a thread to copy the cache content concurrently while preparing from cache. + std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache)); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + thread.join(); + + // The preparation may fail or succeed, but must not crash. If the preparation succeeds, + // the prepared model must be executed with the correct result and not crash. + if (status != ErrorStatus::NONE) { + ASSERT_EQ(preparedModel, nullptr); + } else { + ASSERT_NE(preparedModel, nullptr); + EvaluatePreparedModel(preparedModel, testModelAdd, + /*testDynamicOutputShape=*/false); + } + } + } +} + +TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) { + if (!mIsCachingSupported) return; + + // Create test models and check if fully supported by the service. + const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + const Model modelMul = createModel(testModelMul); + if (checkEarlyTermination(modelMul)) return; + const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + const Model modelAdd = createModel(testModelAdd); + if (checkEarlyTermination(modelAdd)) return; + + // Save the modelMul compilation to cache. + auto modelCacheMul = mModelCache; + for (auto& cache : modelCacheMul) { + cache[0].append("_mul"); + } + { + hidl_vec modelCache, dataCache; + createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(modelMul, modelCache, dataCache); + } + + // Use a different token for modelAdd. + mToken[0]++; + + // Save the modelAdd compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(modelAdd, modelCache, dataCache); + } + + // Replace the model cache of modelAdd with modelMul. + copyCacheFiles(modelCacheMul, mModelCache); + + // Retrieve the preparedModel from cache, expect failure. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } +} + +static const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices()); +static const auto kOperandTypeChoices = + testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM); + +std::string printCompilationCachingTest( + const testing::TestParamInfo& info) { + const auto& [namedDevice, operandType] = info.param; + const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8"); + return gtestCompliantName(getName(namedDevice) + "_" + type); +} + +INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, + testing::Combine(kNamedDeviceChoices, kOperandTypeChoices), + printCompilationCachingTest); + +using CompilationCachingSecurityTestParam = std::tuple; + +class CompilationCachingSecurityTest + : public CompilationCachingTestBase, + public testing::WithParamInterface { + protected: + CompilationCachingSecurityTest() + : CompilationCachingTestBase(getData(std::get(GetParam())), + std::get(GetParam())) {} + + void SetUp() { + CompilationCachingTestBase::SetUp(); + generator.seed(kSeed); + } + + // Get a random integer within a closed range [lower, upper]. + template + T getRandomInt(T lower, T upper) { + std::uniform_int_distribution dis(lower, upper); + return dis(generator); + } + + // Randomly flip one single bit of the cache entry. + void flipOneBitOfCache(const std::string& filename, bool* skip) { + FILE* pFile = fopen(filename.c_str(), "r+"); + ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0); + long int fileSize = ftell(pFile); + if (fileSize == 0) { + fclose(pFile); + *skip = true; + return; + } + ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0); + int readByte = fgetc(pFile); + ASSERT_NE(readByte, EOF); + ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0); + ASSERT_NE(fputc(static_cast(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF); + fclose(pFile); + *skip = false; + } + + // Randomly append bytes to the cache entry. + void appendBytesToCache(const std::string& filename, bool* skip) { + FILE* pFile = fopen(filename.c_str(), "a"); + uint32_t appendLength = getRandomInt(1, 256); + for (uint32_t i = 0; i < appendLength; i++) { + ASSERT_NE(fputc(getRandomInt(0, 255), pFile), EOF); + } + fclose(pFile); + *skip = false; + } + + enum class ExpectedResult { GENERAL_FAILURE, NOT_CRASH }; + + // Test if the driver behaves as expected when given corrupted cache or token. + // The modifier will be invoked after save to cache but before prepare from cache. + // The modifier accepts one pointer argument "skip" as the returning value, indicating + // whether the test should be skipped or not. + void testCorruptedCache(ExpectedResult expected, std::function modifier) { + const TestModel& testModel = createTestModel(); + const Model model = createModel(testModel); + if (checkEarlyTermination(model)) return; + + // Save the compilation to cache. + { + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(model, modelCache, dataCache); + } + + bool skip = false; + modifier(&skip); + if (skip) return; + + // Retrieve preparedModel from cache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + + switch (expected) { + case ExpectedResult::GENERAL_FAILURE: + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + break; + case ExpectedResult::NOT_CRASH: + ASSERT_EQ(preparedModel == nullptr, status != ErrorStatus::NONE); + break; + default: + FAIL(); + } + } + } + + const uint32_t kSeed = std::get(GetParam()); + std::mt19937 generator; +}; + +TEST_P(CompilationCachingSecurityTest, CorruptedModelCache) { + if (!mIsCachingSupported) return; + for (uint32_t i = 0; i < mNumModelCache; i++) { + testCorruptedCache(ExpectedResult::GENERAL_FAILURE, + [this, i](bool* skip) { flipOneBitOfCache(mModelCache[i][0], skip); }); + } +} + +TEST_P(CompilationCachingSecurityTest, WrongLengthModelCache) { + if (!mIsCachingSupported) return; + for (uint32_t i = 0; i < mNumModelCache; i++) { + testCorruptedCache(ExpectedResult::GENERAL_FAILURE, + [this, i](bool* skip) { appendBytesToCache(mModelCache[i][0], skip); }); + } +} + +TEST_P(CompilationCachingSecurityTest, CorruptedDataCache) { + if (!mIsCachingSupported) return; + for (uint32_t i = 0; i < mNumDataCache; i++) { + testCorruptedCache(ExpectedResult::NOT_CRASH, + [this, i](bool* skip) { flipOneBitOfCache(mDataCache[i][0], skip); }); + } +} + +TEST_P(CompilationCachingSecurityTest, WrongLengthDataCache) { + if (!mIsCachingSupported) return; + for (uint32_t i = 0; i < mNumDataCache; i++) { + testCorruptedCache(ExpectedResult::NOT_CRASH, + [this, i](bool* skip) { appendBytesToCache(mDataCache[i][0], skip); }); + } +} + +TEST_P(CompilationCachingSecurityTest, WrongToken) { + if (!mIsCachingSupported) return; + testCorruptedCache(ExpectedResult::GENERAL_FAILURE, [this](bool* skip) { + // Randomly flip one single bit in mToken. + uint32_t ind = + getRandomInt(0u, static_cast(Constant::BYTE_SIZE_OF_CACHE_TOKEN) - 1); + mToken[ind] ^= (1U << getRandomInt(0, 7)); + *skip = false; + }); +} + +std::string printCompilationCachingSecurityTest( + const testing::TestParamInfo& info) { + const auto& [namedDevice, operandType, seed] = info.param; + const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8"); + return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + std::to_string(seed)); +} + +INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest, + testing::Combine(kNamedDeviceChoices, kOperandTypeChoices, + testing::Range(0U, 10U)), + printCompilationCachingSecurityTest); + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp new file mode 100644 index 0000000000..2beec983e0 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "GeneratedTestHarness.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "1.0/Utils.h" +#include "1.2/Callbacks.h" +#include "ExecutionBurstController.h" +#include "MemoryUtils.h" +#include "TestHarness.h" +#include "Utils.h" +#include "VtsHalNeuralnetworks.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using namespace test_helper; +using hidl::memory::V1_0::IMemory; +using implementation::ExecutionCallback; +using implementation::PreparedModelCallback; +using V1_0::DataLocation; +using V1_0::ErrorStatus; +using V1_0::OperandLifeTime; +using V1_0::Request; +using V1_1::ExecutionPreference; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; + +enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; + +Model createModel(const TestModel& testModel) { + // Model operands. + hidl_vec operands(testModel.operands.size()); + size_t constCopySize = 0, constRefSize = 0; + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + + DataLocation loc = {}; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + loc = {.poolIndex = 0, + .offset = static_cast(constCopySize), + .length = static_cast(op.data.size())}; + constCopySize += op.data.alignedSize(); + } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + loc = {.poolIndex = 0, + .offset = static_cast(constRefSize), + .length = static_cast(op.data.size())}; + constRefSize += op.data.alignedSize(); + } + + Operand::ExtraParams extraParams; + if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + extraParams.channelQuant(SymmPerChannelQuantParams{ + .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim}); + } + + operands[i] = {.type = static_cast(op.type), + .dimensions = op.dimensions, + .numberOfConsumers = op.numberOfConsumers, + .scale = op.scale, + .zeroPoint = op.zeroPoint, + .lifetime = static_cast(op.lifetime), + .location = loc, + .extraParams = std::move(extraParams)}; + } + + // Model operations. + hidl_vec operations(testModel.operations.size()); + std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(), + [](const TestOperation& op) -> Operation { + return {.type = static_cast(op.type), + .inputs = op.inputs, + .outputs = op.outputs}; + }); + + // Constant copies. + hidl_vec operandValues(constCopySize); + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, operandValues.data() + operands[i].location.offset); + } + } + + // Shared memory. + hidl_vec pools = {}; + if (constRefSize > 0) { + hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize)); + CHECK_NE(pools[0].size(), 0u); + + // load data + sp mappedMemory = mapMemory(pools[0]); + CHECK(mappedMemory.get() != nullptr); + uint8_t* mappedPtr = + reinterpret_cast(static_cast(mappedMemory->getPointer())); + CHECK(mappedPtr != nullptr); + + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, mappedPtr + operands[i].location.offset); + } + } + } + + return {.operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = testModel.inputIndexes, + .outputIndexes = testModel.outputIndexes, + .operandValues = std::move(operandValues), + .pools = std::move(pools), + .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; +} + +static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) { + const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size(); + return byteSize > 1u; +} + +static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) { + auto& length = request->outputs[outputIndex].location.length; + ASSERT_GT(length, 1u); + length -= 1u; +} + +static void makeOutputDimensionsUnspecified(Model* model) { + for (auto i : model->outputIndexes) { + auto& dims = model->operands[i].dimensions; + std::fill(dims.begin(), dims.end(), 0); + } +} + +static Return ExecutePreparedModel(const sp& preparedModel, + const Request& request, MeasureTiming measure, + sp& callback) { + return preparedModel->execute_1_2(request, measure, callback); +} +static Return ExecutePreparedModel(const sp& preparedModel, + const Request& request, MeasureTiming measure, + hidl_vec* outputShapes, + Timing* timing) { + ErrorStatus result; + Return ret = preparedModel->executeSynchronously( + request, measure, + [&result, outputShapes, timing](ErrorStatus error, const hidl_vec& shapes, + const Timing& time) { + result = error; + *outputShapes = shapes; + *timing = time; + }); + if (!ret.isOk()) { + return ErrorStatus::GENERAL_FAILURE; + } + return result; +} +static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst( + const sp& preparedModel) { + return android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true); +} +enum class Executor { ASYNC, SYNC, BURST }; + +void EvaluatePreparedModel(const sp& preparedModel, const TestModel& testModel, + Executor executor, MeasureTiming measure, OutputType outputType) { + // If output0 does not have size larger than one byte, we can not test with insufficient buffer. + if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) { + return; + } + + Request request = createRequest(testModel); + if (outputType == OutputType::INSUFFICIENT) { + makeOutputInsufficientSize(/*outputIndex=*/0, &request); + } + + ErrorStatus executionStatus; + hidl_vec outputShapes; + Timing timing; + switch (executor) { + case Executor::ASYNC: { + SCOPED_TRACE("asynchronous"); + + // launch execution + sp executionCallback = new ExecutionCallback(); + Return executionLaunchStatus = + ExecutePreparedModel(preparedModel, request, measure, executionCallback); + ASSERT_TRUE(executionLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); + + // retrieve execution status + executionCallback->wait(); + executionStatus = executionCallback->getStatus(); + outputShapes = executionCallback->getOutputShapes(); + timing = executionCallback->getTiming(); + + break; + } + case Executor::SYNC: { + SCOPED_TRACE("synchronous"); + + // execute + Return executionReturnStatus = + ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing); + ASSERT_TRUE(executionReturnStatus.isOk()); + executionStatus = static_cast(executionReturnStatus); + + break; + } + case Executor::BURST: { + SCOPED_TRACE("burst"); + + // create burst + const std::shared_ptr<::android::nn::ExecutionBurstController> controller = + CreateBurst(preparedModel); + ASSERT_NE(nullptr, controller.get()); + + // create memory keys + std::vector keys(request.pools.size()); + for (size_t i = 0; i < keys.size(); ++i) { + keys[i] = reinterpret_cast(&request.pools[i]); + } + + // execute burst + std::tie(executionStatus, outputShapes, timing) = + controller->compute(request, measure, keys); + + break; + } + } + + if (outputType != OutputType::FULLY_SPECIFIED && + executionStatus == ErrorStatus::GENERAL_FAILURE) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "execute model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "execute model that it does not support." + << std::endl; + GTEST_SKIP(); + } + if (measure == MeasureTiming::NO) { + EXPECT_EQ(UINT64_MAX, timing.timeOnDevice); + EXPECT_EQ(UINT64_MAX, timing.timeInDriver); + } else { + if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) { + EXPECT_LE(timing.timeOnDevice, timing.timeInDriver); + } + } + + switch (outputType) { + case OutputType::FULLY_SPECIFIED: + // If the model output operands are fully specified, outputShapes must be either + // either empty, or have the same number of elements as the number of outputs. + ASSERT_EQ(ErrorStatus::NONE, executionStatus); + ASSERT_TRUE(outputShapes.size() == 0 || + outputShapes.size() == testModel.outputIndexes.size()); + break; + case OutputType::UNSPECIFIED: + // If the model output operands are not fully specified, outputShapes must have + // the same number of elements as the number of outputs. + ASSERT_EQ(ErrorStatus::NONE, executionStatus); + ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size()); + break; + case OutputType::INSUFFICIENT: + ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); + ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size()); + ASSERT_FALSE(outputShapes[0].isSufficient); + return; + } + + // Go through all outputs, check returned output shapes. + for (uint32_t i = 0; i < outputShapes.size(); i++) { + EXPECT_TRUE(outputShapes[i].isSufficient); + const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions; + const std::vector actual = outputShapes[i].dimensions; + EXPECT_EQ(expect, actual); + } + + // Retrieve execution results. + const std::vector outputs = getOutputBuffers(request); + + // We want "close-enough" results. + checkResults(testModel, outputs); +} + +void EvaluatePreparedModel(const sp& preparedModel, const TestModel& testModel, + bool testDynamicOutputShape) { + if (testDynamicOutputShape) { + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, + OutputType::INSUFFICIENT); + } else { + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, + OutputType::FULLY_SPECIFIED); + } +} + +void Execute(const sp& device, const TestModel& testModel, bool testDynamicOutputShape) { + Model model = createModel(testModel); + if (testDynamicOutputShape) { + makeOutputDimensionsUnspecified(&model); + } + + sp preparedModel; + createPreparedModel(device, model, &preparedModel); + if (preparedModel == nullptr) return; + + EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape); +} + +void GeneratedTestBase::SetUp() { + testing::TestWithParam::SetUp(); + ASSERT_NE(kDevice, nullptr); +} + +std::vector getNamedModels(const FilterFn& filter) { + return TestModelManager::get().getTestModels(filter); +} + +std::string printGeneratedTest(const testing::TestParamInfo& info) { + const auto& [namedDevice, namedModel] = info.param; + return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel)); +} + +// Tag for the generated tests +class GeneratedTest : public GeneratedTestBase {}; + +// Tag for the dynamic output shape tests +class DynamicOutputShapeTest : public GeneratedTest {}; + +TEST_P(GeneratedTest, Test) { + Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false); +} + +TEST_P(DynamicOutputShapeTest, Test) { + Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true); +} + +INSTANTIATE_GENERATED_TEST(GeneratedTest, + [](const TestModel& testModel) { return !testModel.expectFailure; }); + +INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, + [](const TestModel& testModel) { return !testModel.expectFailure; }); + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h new file mode 100644 index 0000000000..dfc980c169 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H + +#include +#include +#include +#include +#include +#include "1.0/Utils.h" +#include "TestHarness.h" +#include "VtsHalNeuralnetworks.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using NamedModel = Named; +using GeneratedTestParam = std::tuple; + +class GeneratedTestBase : public testing::TestWithParam { + protected: + void SetUp() override; + const sp kDevice = getData(std::get(GetParam())); + const test_helper::TestModel& kTestModel = *getData(std::get(GetParam())); +}; + +using FilterFn = std::function; +std::vector getNamedModels(const FilterFn& filter); + +std::string printGeneratedTest(const testing::TestParamInfo& info); + +#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \ + INSTANTIATE_TEST_SUITE_P(TestGenerated, TestSuite, \ + testing::Combine(testing::ValuesIn(getNamedDevices()), \ + testing::ValuesIn(getNamedModels(filter))), \ + printGeneratedTest) + +// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp. +// TODO: Clean up the hierarchy for ValidationTest. +class ValidationTest : public GeneratedTestBase {}; + +Model createModel(const test_helper::TestModel& testModel); + +void PrepareModel(const sp& device, const Model& model, sp* preparedModel); + +void EvaluatePreparedModel(const sp& preparedModel, + const test_helper::TestModel& testModel, bool testDynamicOutputShape); + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional + +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp new file mode 100644 index 0000000000..a0aa3c37d1 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "TestHarness.h" + +namespace android::hardware::neuralnetworks::V1_2 { + +// Make sure that the HIDL enums are compatible with the values defined in +// frameworks/ml/nn/tools/test_generator/test_harness/include/TestHarness.h. +using namespace test_helper; +#define CHECK_TEST_ENUM(EnumType, enumValue) \ + static_assert(static_cast(Test##EnumType::enumValue) == EnumType::enumValue) + +CHECK_TEST_ENUM(OperandType, FLOAT32); +CHECK_TEST_ENUM(OperandType, INT32); +CHECK_TEST_ENUM(OperandType, UINT32); +CHECK_TEST_ENUM(OperandType, TENSOR_FLOAT32); +CHECK_TEST_ENUM(OperandType, TENSOR_INT32); +CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM); +CHECK_TEST_ENUM(OperandType, BOOL); +CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_SYMM); +CHECK_TEST_ENUM(OperandType, TENSOR_FLOAT16); +CHECK_TEST_ENUM(OperandType, TENSOR_BOOL8); +CHECK_TEST_ENUM(OperandType, FLOAT16); +CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM_PER_CHANNEL); +CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_ASYMM); +CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM); + +CHECK_TEST_ENUM(OperationType, ADD); +CHECK_TEST_ENUM(OperationType, AVERAGE_POOL_2D); +CHECK_TEST_ENUM(OperationType, CONCATENATION); +CHECK_TEST_ENUM(OperationType, CONV_2D); +CHECK_TEST_ENUM(OperationType, DEPTHWISE_CONV_2D); +CHECK_TEST_ENUM(OperationType, DEPTH_TO_SPACE); +CHECK_TEST_ENUM(OperationType, DEQUANTIZE); +CHECK_TEST_ENUM(OperationType, EMBEDDING_LOOKUP); +CHECK_TEST_ENUM(OperationType, FLOOR); +CHECK_TEST_ENUM(OperationType, FULLY_CONNECTED); +CHECK_TEST_ENUM(OperationType, HASHTABLE_LOOKUP); +CHECK_TEST_ENUM(OperationType, L2_NORMALIZATION); +CHECK_TEST_ENUM(OperationType, L2_POOL_2D); +CHECK_TEST_ENUM(OperationType, LOCAL_RESPONSE_NORMALIZATION); +CHECK_TEST_ENUM(OperationType, LOGISTIC); +CHECK_TEST_ENUM(OperationType, LSH_PROJECTION); +CHECK_TEST_ENUM(OperationType, LSTM); +CHECK_TEST_ENUM(OperationType, MAX_POOL_2D); +CHECK_TEST_ENUM(OperationType, MUL); +CHECK_TEST_ENUM(OperationType, RELU); +CHECK_TEST_ENUM(OperationType, RELU1); +CHECK_TEST_ENUM(OperationType, RELU6); +CHECK_TEST_ENUM(OperationType, RESHAPE); +CHECK_TEST_ENUM(OperationType, RESIZE_BILINEAR); +CHECK_TEST_ENUM(OperationType, RNN); +CHECK_TEST_ENUM(OperationType, SOFTMAX); +CHECK_TEST_ENUM(OperationType, SPACE_TO_DEPTH); +CHECK_TEST_ENUM(OperationType, SVDF); +CHECK_TEST_ENUM(OperationType, TANH); +CHECK_TEST_ENUM(OperationType, BATCH_TO_SPACE_ND); +CHECK_TEST_ENUM(OperationType, DIV); +CHECK_TEST_ENUM(OperationType, MEAN); +CHECK_TEST_ENUM(OperationType, PAD); +CHECK_TEST_ENUM(OperationType, SPACE_TO_BATCH_ND); +CHECK_TEST_ENUM(OperationType, SQUEEZE); +CHECK_TEST_ENUM(OperationType, STRIDED_SLICE); +CHECK_TEST_ENUM(OperationType, SUB); +CHECK_TEST_ENUM(OperationType, TRANSPOSE); +CHECK_TEST_ENUM(OperationType, ABS); +CHECK_TEST_ENUM(OperationType, ARGMAX); +CHECK_TEST_ENUM(OperationType, ARGMIN); +CHECK_TEST_ENUM(OperationType, AXIS_ALIGNED_BBOX_TRANSFORM); +CHECK_TEST_ENUM(OperationType, BIDIRECTIONAL_SEQUENCE_LSTM); +CHECK_TEST_ENUM(OperationType, BIDIRECTIONAL_SEQUENCE_RNN); +CHECK_TEST_ENUM(OperationType, BOX_WITH_NMS_LIMIT); +CHECK_TEST_ENUM(OperationType, CAST); +CHECK_TEST_ENUM(OperationType, CHANNEL_SHUFFLE); +CHECK_TEST_ENUM(OperationType, DETECTION_POSTPROCESSING); +CHECK_TEST_ENUM(OperationType, EQUAL); +CHECK_TEST_ENUM(OperationType, EXP); +CHECK_TEST_ENUM(OperationType, EXPAND_DIMS); +CHECK_TEST_ENUM(OperationType, GATHER); +CHECK_TEST_ENUM(OperationType, GENERATE_PROPOSALS); +CHECK_TEST_ENUM(OperationType, GREATER); +CHECK_TEST_ENUM(OperationType, GREATER_EQUAL); +CHECK_TEST_ENUM(OperationType, GROUPED_CONV_2D); +CHECK_TEST_ENUM(OperationType, HEATMAP_MAX_KEYPOINT); +CHECK_TEST_ENUM(OperationType, INSTANCE_NORMALIZATION); +CHECK_TEST_ENUM(OperationType, LESS); +CHECK_TEST_ENUM(OperationType, LESS_EQUAL); +CHECK_TEST_ENUM(OperationType, LOG); +CHECK_TEST_ENUM(OperationType, LOGICAL_AND); +CHECK_TEST_ENUM(OperationType, LOGICAL_NOT); +CHECK_TEST_ENUM(OperationType, LOGICAL_OR); +CHECK_TEST_ENUM(OperationType, LOG_SOFTMAX); +CHECK_TEST_ENUM(OperationType, MAXIMUM); +CHECK_TEST_ENUM(OperationType, MINIMUM); +CHECK_TEST_ENUM(OperationType, NEG); +CHECK_TEST_ENUM(OperationType, NOT_EQUAL); +CHECK_TEST_ENUM(OperationType, PAD_V2); +CHECK_TEST_ENUM(OperationType, POW); +CHECK_TEST_ENUM(OperationType, PRELU); +CHECK_TEST_ENUM(OperationType, QUANTIZE); +CHECK_TEST_ENUM(OperationType, QUANTIZED_16BIT_LSTM); +CHECK_TEST_ENUM(OperationType, RANDOM_MULTINOMIAL); +CHECK_TEST_ENUM(OperationType, REDUCE_ALL); +CHECK_TEST_ENUM(OperationType, REDUCE_ANY); +CHECK_TEST_ENUM(OperationType, REDUCE_MAX); +CHECK_TEST_ENUM(OperationType, REDUCE_MIN); +CHECK_TEST_ENUM(OperationType, REDUCE_PROD); +CHECK_TEST_ENUM(OperationType, REDUCE_SUM); +CHECK_TEST_ENUM(OperationType, ROI_ALIGN); +CHECK_TEST_ENUM(OperationType, ROI_POOLING); +CHECK_TEST_ENUM(OperationType, RSQRT); +CHECK_TEST_ENUM(OperationType, SELECT); +CHECK_TEST_ENUM(OperationType, SIN); +CHECK_TEST_ENUM(OperationType, SLICE); +CHECK_TEST_ENUM(OperationType, SPLIT); +CHECK_TEST_ENUM(OperationType, SQRT); +CHECK_TEST_ENUM(OperationType, TILE); +CHECK_TEST_ENUM(OperationType, TOPK_V2); +CHECK_TEST_ENUM(OperationType, TRANSPOSE_CONV_2D); +CHECK_TEST_ENUM(OperationType, UNIDIRECTIONAL_SEQUENCE_LSTM); +CHECK_TEST_ENUM(OperationType, UNIDIRECTIONAL_SEQUENCE_RNN); +CHECK_TEST_ENUM(OperationType, RESIZE_NEAREST_NEIGHBOR); + +#undef CHECK_TEST_ENUM + +} // namespace android::hardware::neuralnetworks::V1_2 diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp new file mode 100644 index 0000000000..1d4493d208 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp @@ -0,0 +1,400 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "1.2/Callbacks.h" +#include "ExecutionBurstController.h" +#include "ExecutionBurstServer.h" +#include "GeneratedTestHarness.h" +#include "TestHarness.h" +#include "Utils.h" + +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using nn::ExecutionBurstController; +using nn::RequestChannelSender; +using nn::ResultChannelReceiver; +using V1_0::ErrorStatus; +using V1_0::Request; +using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback; + +// This constant value represents the length of an FMQ that is large enough to +// return a result from a burst execution for all of the generated test cases. +constexpr size_t kExecutionBurstChannelLength = 1024; + +// This constant value represents a length of an FMQ that is not large enough +// to return a result from a burst execution for some of the generated test +// cases. +constexpr size_t kExecutionBurstChannelSmallLength = 8; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static bool badTiming(Timing timing) { + return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX; +} + +static void createBurst(const sp& preparedModel, const sp& callback, + std::unique_ptr* sender, + std::unique_ptr* receiver, + sp* context, + size_t resultChannelLength = kExecutionBurstChannelLength) { + ASSERT_NE(nullptr, preparedModel.get()); + ASSERT_NE(nullptr, sender); + ASSERT_NE(nullptr, receiver); + ASSERT_NE(nullptr, context); + + // create FMQ objects + auto [fmqRequestChannel, fmqRequestDescriptor] = + RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true); + auto [fmqResultChannel, fmqResultDescriptor] = + ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true); + ASSERT_NE(nullptr, fmqRequestChannel.get()); + ASSERT_NE(nullptr, fmqResultChannel.get()); + ASSERT_NE(nullptr, fmqRequestDescriptor); + ASSERT_NE(nullptr, fmqResultDescriptor); + + // configure burst + ErrorStatus errorStatus; + sp burstContext; + const Return ret = preparedModel->configureExecutionBurst( + callback, *fmqRequestDescriptor, *fmqResultDescriptor, + [&errorStatus, &burstContext](ErrorStatus status, const sp& context) { + errorStatus = status; + burstContext = context; + }); + ASSERT_TRUE(ret.isOk()); + ASSERT_EQ(ErrorStatus::NONE, errorStatus); + ASSERT_NE(nullptr, burstContext.get()); + + // return values + *sender = std::move(fmqRequestChannel); + *receiver = std::move(fmqResultChannel); + *context = burstContext; +} + +static void createBurstWithResultChannelLength( + const sp& preparedModel, size_t resultChannelLength, + std::shared_ptr* controller) { + ASSERT_NE(nullptr, preparedModel.get()); + ASSERT_NE(nullptr, controller); + + // create FMQ objects + std::unique_ptr sender; + std::unique_ptr receiver; + sp callback = new ExecutionBurstCallback(); + sp context; + ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context, + resultChannelLength)); + ASSERT_NE(nullptr, sender.get()); + ASSERT_NE(nullptr, receiver.get()); + ASSERT_NE(nullptr, context.get()); + + // return values + *controller = std::make_shared(std::move(sender), std::move(receiver), + context, callback); +} + +// Primary validation function. This function will take a valid serialized +// request, apply a mutation to it to invalidate the serialized request, then +// pass it to interface calls that use the serialized request. Note that the +// serialized request here is passed by value, and any mutation to the +// serialized request does not leave this function. +static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiver, + const std::string& message, std::vector serialized, + const std::function*)>& mutation) { + mutation(&serialized); + + // skip if packet is too large to send + if (serialized.size() > kExecutionBurstChannelLength) { + return; + } + + SCOPED_TRACE(message); + + // send invalid packet + ASSERT_TRUE(sender->sendPacket(serialized)); + + // receive error + auto results = receiver->getBlocking(); + ASSERT_TRUE(results.has_value()); + const auto [status, outputShapes, timing] = std::move(*results); + EXPECT_NE(ErrorStatus::NONE, status); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_TRUE(badTiming(timing)); +} + +// For validation, valid packet entries are mutated to invalid packet entries, +// or invalid packet entries are inserted into valid packets. This function +// creates pre-set invalid packet entries for convenience. +static std::vector createBadRequestPacketEntries() { + const FmqRequestDatum::PacketInformation packetInformation = { + /*.packetSize=*/10, /*.numberOfInputOperands=*/10, /*.numberOfOutputOperands=*/10, + /*.numberOfPools=*/10}; + const FmqRequestDatum::OperandInformation operandInformation = { + /*.hasNoValue=*/false, /*.location=*/{}, /*.numberOfDimensions=*/10}; + const int32_t invalidPoolIdentifier = std::numeric_limits::max(); + std::vector bad(7); + bad[0].packetInformation(packetInformation); + bad[1].inputOperandInformation(operandInformation); + bad[2].inputOperandDimensionValue(0); + bad[3].outputOperandInformation(operandInformation); + bad[4].outputOperandDimensionValue(0); + bad[5].poolIdentifier(invalidPoolIdentifier); + bad[6].measureTiming(MeasureTiming::YES); + return bad; +} + +// For validation, valid packet entries are mutated to invalid packet entries, +// or invalid packet entries are inserted into valid packets. This function +// retrieves pre-set invalid packet entries for convenience. This function +// caches these data so they can be reused on subsequent validation checks. +static const std::vector& getBadRequestPacketEntries() { + static const std::vector bad = createBadRequestPacketEntries(); + return bad; +} + +///////////////////////// REMOVE DATUM //////////////////////////////////// + +static void removeDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver, + const std::vector& serialized) { + for (size_t index = 0; index < serialized.size(); ++index) { + const std::string message = "removeDatum: removed datum at index " + std::to_string(index); + validate(sender, receiver, message, serialized, + [index](std::vector* serialized) { + serialized->erase(serialized->begin() + index); + }); + } +} + +///////////////////////// ADD DATUM //////////////////////////////////// + +static void addDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver, + const std::vector& serialized) { + const std::vector& extra = getBadRequestPacketEntries(); + for (size_t index = 0; index <= serialized.size(); ++index) { + for (size_t type = 0; type < extra.size(); ++type) { + const std::string message = "addDatum: added datum type " + std::to_string(type) + + " at index " + std::to_string(index); + validate(sender, receiver, message, serialized, + [index, type, &extra](std::vector* serialized) { + serialized->insert(serialized->begin() + index, extra[type]); + }); + } + } +} + +///////////////////////// MUTATE DATUM //////////////////////////////////// + +static bool interestingCase(const FmqRequestDatum& lhs, const FmqRequestDatum& rhs) { + using Discriminator = FmqRequestDatum::hidl_discriminator; + + const bool differentValues = (lhs != rhs); + const bool sameDiscriminator = (lhs.getDiscriminator() == rhs.getDiscriminator()); + const auto discriminator = rhs.getDiscriminator(); + const bool isDimensionValue = (discriminator == Discriminator::inputOperandDimensionValue || + discriminator == Discriminator::outputOperandDimensionValue); + + return differentValues && !(sameDiscriminator && isDimensionValue); +} + +static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver, + const std::vector& serialized) { + const std::vector& change = getBadRequestPacketEntries(); + for (size_t index = 0; index < serialized.size(); ++index) { + for (size_t type = 0; type < change.size(); ++type) { + if (interestingCase(serialized[index], change[type])) { + const std::string message = "mutateDatum: changed datum at index " + + std::to_string(index) + " to datum type " + + std::to_string(type); + validate(sender, receiver, message, serialized, + [index, type, &change](std::vector* serialized) { + (*serialized)[index] = change[type]; + }); + } + } + } +} + +///////////////////////// BURST VALIATION TESTS //////////////////////////////////// + +static void validateBurstSerialization(const sp& preparedModel, + const Request& request) { + // create burst + std::unique_ptr sender; + std::unique_ptr receiver; + sp callback = new ExecutionBurstCallback(); + sp context; + ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context)); + ASSERT_NE(nullptr, sender.get()); + ASSERT_NE(nullptr, receiver.get()); + ASSERT_NE(nullptr, context.get()); + + // load memory into callback slots + std::vector keys; + keys.reserve(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys), + [](const auto& pool) { return reinterpret_cast(&pool); }); + const std::vector slots = callback->getSlots(request.pools, keys); + + // ensure slot std::numeric_limits::max() doesn't exist (for + // subsequent slot validation testing) + ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) { + return slot != std::numeric_limits::max(); + })); + + // serialize the request + const auto serialized = android::nn::serialize(request, MeasureTiming::YES, slots); + + // validations + removeDatumTest(sender.get(), receiver.get(), serialized); + addDatumTest(sender.get(), receiver.get(), serialized); + mutateDatumTest(sender.get(), receiver.get(), serialized); +} + +// This test validates that when the Result message size exceeds length of the +// result FMQ, the service instance gracefully fails and returns an error. +static void validateBurstFmqLength(const sp& preparedModel, + const Request& request) { + // create regular burst + std::shared_ptr controllerRegular; + ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength( + preparedModel, kExecutionBurstChannelLength, &controllerRegular)); + ASSERT_NE(nullptr, controllerRegular.get()); + + // create burst with small output channel + std::shared_ptr controllerSmall; + ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength( + preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall)); + ASSERT_NE(nullptr, controllerSmall.get()); + + // load memory into callback slots + std::vector keys(request.pools.size()); + for (size_t i = 0; i < keys.size(); ++i) { + keys[i] = reinterpret_cast(&request.pools[i]); + } + + // collect serialized result by running regular burst + const auto [statusRegular, outputShapesRegular, timingRegular] = + controllerRegular->compute(request, MeasureTiming::NO, keys); + + // skip test if regular burst output isn't useful for testing a failure + // caused by having too small of a length for the result FMQ + const std::vector serialized = + android::nn::serialize(statusRegular, outputShapesRegular, timingRegular); + if (statusRegular != ErrorStatus::NONE || + serialized.size() <= kExecutionBurstChannelSmallLength) { + return; + } + + // by this point, execution should fail because the result channel isn't + // large enough to return the serialized result + const auto [statusSmall, outputShapesSmall, timingSmall] = + controllerSmall->compute(request, MeasureTiming::NO, keys); + EXPECT_NE(ErrorStatus::NONE, statusSmall); + EXPECT_EQ(0u, outputShapesSmall.size()); + EXPECT_TRUE(badTiming(timingSmall)); +} + +static bool isSanitized(const FmqResultDatum& datum) { + using Discriminator = FmqResultDatum::hidl_discriminator; + + // check to ensure the padding values in the returned + // FmqResultDatum::OperandInformation are initialized to 0 + if (datum.getDiscriminator() == Discriminator::operandInformation) { + static_assert( + offsetof(FmqResultDatum::OperandInformation, isSufficient) == 0, + "unexpected value for offset of FmqResultDatum::OperandInformation::isSufficient"); + static_assert( + sizeof(FmqResultDatum::OperandInformation::isSufficient) == 1, + "unexpected value for size of FmqResultDatum::OperandInformation::isSufficient"); + static_assert(offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) == 4, + "unexpected value for offset of " + "FmqResultDatum::OperandInformation::numberOfDimensions"); + static_assert(sizeof(FmqResultDatum::OperandInformation::numberOfDimensions) == 4, + "unexpected value for size of " + "FmqResultDatum::OperandInformation::numberOfDimensions"); + static_assert(sizeof(FmqResultDatum::OperandInformation) == 8, + "unexpected value for size of " + "FmqResultDatum::OperandInformation"); + + constexpr size_t paddingOffset = + offsetof(FmqResultDatum::OperandInformation, isSufficient) + + sizeof(FmqResultDatum::OperandInformation::isSufficient); + constexpr size_t paddingSize = + offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) - paddingOffset; + + FmqResultDatum::OperandInformation initialized{}; + std::memset(&initialized, 0, sizeof(initialized)); + + const char* initializedPaddingStart = + reinterpret_cast(&initialized) + paddingOffset; + const char* datumPaddingStart = + reinterpret_cast(&datum.operandInformation()) + paddingOffset; + + return std::memcmp(datumPaddingStart, initializedPaddingStart, paddingSize) == 0; + } + + // there are no other padding initialization checks required, so return true + // for any sum-type that isn't FmqResultDatum::OperandInformation + return true; +} + +static void validateBurstSanitized(const sp& preparedModel, + const Request& request) { + // create burst + std::unique_ptr sender; + std::unique_ptr receiver; + sp callback = new ExecutionBurstCallback(); + sp context; + ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context)); + ASSERT_NE(nullptr, sender.get()); + ASSERT_NE(nullptr, receiver.get()); + ASSERT_NE(nullptr, context.get()); + + // load memory into callback slots + std::vector keys; + keys.reserve(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys), + [](const auto& pool) { return reinterpret_cast(&pool); }); + const std::vector slots = callback->getSlots(request.pools, keys); + + // send valid request + ASSERT_TRUE(sender->send(request, MeasureTiming::YES, slots)); + + // receive valid result + auto serialized = receiver->getPacketBlocking(); + ASSERT_TRUE(serialized.has_value()); + + // sanitize result + ASSERT_TRUE(std::all_of(serialized->begin(), serialized->end(), isSanitized)) + << "The result serialized data is not properly sanitized"; +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +void validateBurst(const sp& preparedModel, const Request& request) { + ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, request)); + ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request)); + ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, request)); +} + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp new file mode 100644 index 0000000000..30530beacc --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -0,0 +1,713 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "1.0/Utils.h" +#include "1.2/Callbacks.h" +#include "GeneratedTestHarness.h" +#include "VtsHalNeuralnetworks.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using implementation::PreparedModelCallback; +using V1_0::ErrorStatus; +using V1_0::OperandLifeTime; +using V1_1::ExecutionPreference; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void validateGetSupportedOperations(const sp& device, const std::string& message, + const Model& model) { + SCOPED_TRACE(message + " [getSupportedOperations_1_2]"); + + Return ret = device->getSupportedOperations_1_2( + model, [&](ErrorStatus status, const hidl_vec&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); + EXPECT_TRUE(ret.isOk()); +} + +static void validatePrepareModel(const sp& device, const std::string& message, + const Model& model, ExecutionPreference preference) { + SCOPED_TRACE(message + " [prepareModel_1_2]"); + + sp preparedModelCallback = new PreparedModelCallback(); + Return prepareLaunchStatus = + device->prepareModel_1_2(model, preference, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = getPreparedModel_1_2(preparedModelCallback); + ASSERT_EQ(nullptr, preparedModel.get()); +} + +static bool validExecutionPreference(ExecutionPreference preference) { + return preference == ExecutionPreference::LOW_POWER || + preference == ExecutionPreference::FAST_SINGLE_ANSWER || + preference == ExecutionPreference::SUSTAINED_SPEED; +} + +// Primary validation function. This function will take a valid model, apply a +// mutation to it to invalidate the model, then pass it to interface calls that +// use the model. Note that the model here is passed by value, and any mutation +// to the model does not leave this function. +static void validate(const sp& device, const std::string& message, Model model, + const std::function& mutation, + ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) { + mutation(&model); + if (validExecutionPreference(preference)) { + validateGetSupportedOperations(device, message, model); + } + validatePrepareModel(device, message, model, preference); +} + +static uint32_t addOperand(Model* model) { + return hidl_vec_push_back(&model->operands, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }); +} + +static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { + uint32_t index = addOperand(model); + model->operands[index].numberOfConsumers = 1; + model->operands[index].lifetime = lifetime; + return index; +} + +///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// + +static const uint32_t invalidOperandTypes[] = { + static_cast(OperandTypeRange::FUNDAMENTAL_MIN) - 1, + static_cast(OperandTypeRange::FUNDAMENTAL_MAX) + 1, + static_cast(OperandTypeRange::OEM_MIN) - 1, + static_cast(OperandTypeRange::OEM_MAX) + 1, +}; + +static void mutateOperandTypeTest(const sp& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (uint32_t invalidOperandType : invalidOperandTypes) { + const std::string message = "mutateOperandTypeTest: operand " + + std::to_string(operand) + " set to value " + + std::to_string(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + model->operands[operand].type = static_cast(invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE OPERAND RANK ///////////////////////// + +static uint32_t getInvalidRank(OperandType type) { + switch (type) { + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + return 1; + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_SYMM: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + return 0; + default: + return 0; + } +} + +static void mutateOperandRankTest(const sp& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + if (invalidRank == 0) { + continue; + } + const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + + " has rank of " + std::to_string(invalidRank); + validate(device, message, model, [operand, invalidRank](Model* model) { + model->operands[operand].dimensions = std::vector(invalidRank, 0); + }); + } +} + +///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// + +static float getInvalidScale(OperandType type) { + switch (type) { + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + return 1.0f; + case OperandType::TENSOR_INT32: + return -1.0f; + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_SYMM: + return 0.0f; + default: + return 0.0f; + } +} + +static void mutateOperandScaleTest(const sp& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.operands[operand].type); + const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + + " has scale of " + std::to_string(invalidScale); + validate(device, message, model, [operand, invalidScale](Model* model) { + model->operands[operand].scale = invalidScale; + }); + } +} + +///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// + +static std::vector getInvalidZeroPoints(OperandType type) { + switch (type) { + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + return {1}; + case OperandType::TENSOR_QUANT8_ASYMM: + return {-1, 256}; + case OperandType::TENSOR_QUANT8_SYMM: + return {-129, -1, 1, 128}; + case OperandType::TENSOR_QUANT16_ASYMM: + return {-1, 65536}; + case OperandType::TENSOR_QUANT16_SYMM: + return {-32769, -1, 1, 32768}; + default: + return {}; + } +} + +static void mutateOperandZeroPointTest(const sp& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector invalidZeroPoints = + getInvalidZeroPoints(model.operands[operand].type); + for (int32_t invalidZeroPoint : invalidZeroPoints) { + const std::string message = "mutateOperandZeroPointTest: operand " + + std::to_string(operand) + " has zero point of " + + std::to_string(invalidZeroPoint); + validate(device, message, model, [operand, invalidZeroPoint](Model* model) { + model->operands[operand].zeroPoint = invalidZeroPoint; + }); + } + } +} + +///////////////////////// VALIDATE EXTRA ??? ///////////////////////// + +// TODO: Operand::lifetime +// TODO: Operand::location + +///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// + +static void mutateOperand(Operand* operand, OperandType type) { + Operand newOperand = *operand; + newOperand.type = type; + switch (type) { + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + newOperand.dimensions = hidl_vec(); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_INT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_SYMM: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; + break; + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: { + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + + SymmPerChannelQuantParams channelQuant; + channelQuant.channelDim = 0; + channelQuant.scales = hidl_vec( + operand->dimensions.size() > 0 ? static_cast(operand->dimensions[0]) + : 0); + for (size_t i = 0; i < channelQuant.scales.size(); ++i) { + channelQuant.scales[i] = 1.0f; + } + newOperand.extraParams.channelQuant(std::move(channelQuant)); + } break; + case OperandType::OEM: + case OperandType::TENSOR_OEM_BYTE: + default: + break; + } + *operand = newOperand; +} + +static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) { + // Do not test OEM types + if (type == model.operands[operand].type || type == OperandType::OEM || + type == OperandType::TENSOR_OEM_BYTE) { + return true; + } + for (const Operation& operation : model.operations) { + // Skip mutateOperationOperandTypeTest for the following operations. + // - LSH_PROJECTION's second argument is allowed to have any type. + // - ARGMIN and ARGMAX's first argument can be any of + // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). + // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). + // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. + // - DEQUANTIZE input can be any of + // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can + // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. + // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32 + // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL + // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL + // - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL + // - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL + switch (operation.type) { + case OperationType::LSH_PROJECTION: { + if (operand == operation.inputs[1]) { + return true; + } + } break; + case OperationType::CAST: + case OperationType::ARGMAX: + case OperationType::ARGMIN: { + if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 || + type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM) { + return true; + } + } break; + case OperationType::QUANTIZE: + case OperationType::RANDOM_MULTINOMIAL: { + if (operand == operation.inputs[0] && + (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { + return true; + } + } break; + case OperationType::DEQUANTIZE: { + if (operand == operation.inputs[0] && + (type == OperandType::TENSOR_QUANT8_ASYMM || + type == OperandType::TENSOR_QUANT8_SYMM || + type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { + return true; + } + if (operand == operation.outputs[0] && + (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { + return true; + } + } break; + case OperationType::TRANSPOSE_CONV_2D: + case OperationType::GROUPED_CONV_2D: + case OperationType::DEPTHWISE_CONV_2D: + case OperationType::CONV_2D: { + if (operand == operation.inputs[1] && + (type == OperandType::TENSOR_QUANT8_ASYMM || + type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { + return true; + } + } break; + default: + break; + } + } + return false; +} + +static void mutateOperationOperandTypeTest(const sp& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (OperandType invalidOperandType : hidl_enum_range{}) { + if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) { + continue; + } + const std::string message = "mutateOperationOperandTypeTest: operand " + + std::to_string(operand) + " set to type " + + toString(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + mutateOperand(&model->operands[operand], invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// + +static const uint32_t invalidOperationTypes[] = { + static_cast(OperationTypeRange::FUNDAMENTAL_MAX) + 1, + static_cast(OperationTypeRange::OEM_MIN) - 1, + static_cast(OperationTypeRange::OEM_MAX) + 1, +}; + +static void mutateOperationTypeTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (uint32_t invalidOperationType : invalidOperationTypes) { + const std::string message = "mutateOperationTypeTest: operation " + + std::to_string(operation) + " set to value " + + std::to_string(invalidOperationType); + validate(device, message, model, [operation, invalidOperationType](Model* model) { + model->operations[operation].type = + static_cast(invalidOperationType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationInputOperandIndexTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const std::string message = "mutateOperationInputOperandIndexTest: operation " + + std::to_string(operation) + " input " + + std::to_string(input); + validate(device, message, model, [operation, input, invalidOperand](Model* model) { + model->operations[operation].inputs[input] = invalidOperand; + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationOutputOperandIndexTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "mutateOperationOutputOperandIndexTest: operation " + + std::to_string(operation) + " output " + + std::to_string(output); + validate(device, message, model, [operation, output, invalidOperand](Model* model) { + model->operations[operation].outputs[output] = invalidOperand; + }); + } + } +} + +///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// + +static void removeValueAndDecrementGreaterValues(hidl_vec* vec, uint32_t value) { + if (vec) { + // remove elements matching "value" + auto last = std::remove(vec->begin(), vec->end(), value); + vec->resize(std::distance(vec->begin(), last)); + + // decrement elements exceeding "value" + std::transform(vec->begin(), vec->end(), vec->begin(), + [value](uint32_t v) { return v > value ? v-- : v; }); + } +} + +static void removeOperand(Model* model, uint32_t index) { + hidl_vec_removeAt(&model->operands, index); + for (Operation& operation : model->operations) { + removeValueAndDecrementGreaterValues(&operation.inputs, index); + removeValueAndDecrementGreaterValues(&operation.outputs, index); + } + removeValueAndDecrementGreaterValues(&model->inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->outputIndexes, index); +} + +static bool removeOperandSkip(size_t operand, const Model& model) { + for (const Operation& operation : model.operations) { + // Skip removeOperandTest for the following operations. + // - SPLIT's outputs are not checked during prepareModel. + if (operation.type == OperationType::SPLIT) { + for (const size_t outOprand : operation.outputs) { + if (operand == outOprand) { + return true; + } + } + } + // BIDIRECTIONAL_SEQUENCE_LSTM and BIDIRECTIONAL_SEQUENCE_RNN can have either one or two + // outputs depending on their mergeOutputs parameter. + if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_LSTM || + operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) { + for (const size_t outOprand : operation.outputs) { + if (operand == outOprand) { + return true; + } + } + } + } + return false; +} + +static void removeOperandTest(const sp& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + if (removeOperandSkip(operand, model)) { + continue; + } + const std::string message = "removeOperandTest: operand " + std::to_string(operand); + validate(device, message, model, + [operand](Model* model) { removeOperand(model, operand); }); + } +} + +///////////////////////// REMOVE OPERATION ///////////////////////// + +static void removeOperation(Model* model, uint32_t index) { + for (uint32_t operand : model->operations[index].inputs) { + model->operands[operand].numberOfConsumers--; + } + hidl_vec_removeAt(&model->operations, index); +} + +static void removeOperationTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "removeOperationTest: operation " + std::to_string(operation); + validate(device, message, model, + [operation](Model* model) { removeOperation(model, operation); }); + } +} + +///////////////////////// REMOVE OPERATION INPUT ///////////////////////// + +static bool removeOperationInputSkip(const Operation& op, size_t input) { + // Skip removeOperationInputTest for the following operations. + // - CONCATENATION has at least 2 inputs, with the last element being INT32. + // - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR, + // SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional + // layout parameter. + // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis + // parameter. + switch (op.type) { + case OperationType::CONCATENATION: { + if (op.inputs.size() > 2 && input != op.inputs.size() - 1) { + return true; + } + } break; + case OperationType::DEPTHWISE_CONV_2D: { + if ((op.inputs.size() == 12 && input == 11) || (op.inputs.size() == 9 && input == 8)) { + return true; + } + } break; + case OperationType::CONV_2D: + case OperationType::AVERAGE_POOL_2D: + case OperationType::MAX_POOL_2D: + case OperationType::L2_POOL_2D: { + if ((op.inputs.size() == 11 && input == 10) || (op.inputs.size() == 8 && input == 7)) { + return true; + } + } break; + case OperationType::RESIZE_BILINEAR: { + if (op.inputs.size() == 4 && input == 3) { + return true; + } + } break; + case OperationType::SPACE_TO_DEPTH: + case OperationType::DEPTH_TO_SPACE: + case OperationType::BATCH_TO_SPACE_ND: { + if (op.inputs.size() == 3 && input == 2) { + return true; + } + } break; + case OperationType::SPACE_TO_BATCH_ND: { + if (op.inputs.size() == 4 && input == 3) { + return true; + } + } break; + case OperationType::L2_NORMALIZATION: { + if (op.inputs.size() == 2 && input == 1) { + return true; + } + } break; + case OperationType::LOCAL_RESPONSE_NORMALIZATION: { + if (op.inputs.size() == 6 && input == 5) { + return true; + } + } break; + case OperationType::SOFTMAX: { + if (op.inputs.size() == 3 && input == 2) { + return true; + } + } break; + default: + break; + } + return false; +} + +static void removeOperationInputTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const Operation& op = model.operations[operation]; + if (removeOperationInputSkip(op, input)) { + continue; + } + const std::string message = "removeOperationInputTest: operation " + + std::to_string(operation) + ", input " + + std::to_string(input); + validate(device, message, model, [operation, input](Model* model) { + uint32_t operand = model->operations[operation].inputs[input]; + model->operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->operations[operation].inputs, input); + }); + } + } +} + +///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// + +static void removeOperationOutputTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "removeOperationOutputTest: operation " + + std::to_string(operation) + ", output " + + std::to_string(output); + validate(device, message, model, [operation, output](Model* model) { + hidl_vec_removeAt(&model->operations[operation].outputs, output); + }); + } + } +} + +///////////////////////// MODEL VALIDATION ///////////////////////// + +// TODO: remove model input +// TODO: remove model output +// TODO: add unused operation + +///////////////////////// ADD OPERATION INPUT ///////////////////////// + +static bool addOperationInputSkip(const Operation& op) { + // Skip addOperationInputTest for the following operations. + // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional INT32 axis + // parameter. + if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) || + (op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) || + (op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) { + return true; + } + return false; +} + +static void addOperationInputTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + if (addOperationInputSkip(model.operations[operation])) { + continue; + } + const std::string message = "addOperationInputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); + hidl_vec_push_back(&model->operations[operation].inputs, index); + hidl_vec_push_back(&model->inputIndexes, index); + }); + } +} + +///////////////////////// ADD OPERATION OUTPUT ///////////////////////// + +static void addOperationOutputTest(const sp& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = + "addOperationOutputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); + hidl_vec_push_back(&model->operations[operation].outputs, index); + hidl_vec_push_back(&model->outputIndexes, index); + }); + } +} + +///////////////////////// VALIDATE EXECUTION PREFERENCE ///////////////////////// + +static const int32_t invalidExecutionPreferences[] = { + static_cast(ExecutionPreference::LOW_POWER) - 1, // lower bound + static_cast(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound +}; + +static void mutateExecutionPreferenceTest(const sp& device, const Model& model) { + for (int32_t preference : invalidExecutionPreferences) { + const std::string message = + "mutateExecutionPreferenceTest: preference " + std::to_string(preference); + validate( + device, message, model, [](Model*) {}, + static_cast(preference)); + } +} + +////////////////////////// ENTRY POINT ////////////////////////////// + +void validateModel(const sp& device, const Model& model) { + mutateOperandTypeTest(device, model); + mutateOperandRankTest(device, model); + mutateOperandScaleTest(device, model); + mutateOperandZeroPointTest(device, model); + mutateOperationOperandTypeTest(device, model); + mutateOperationTypeTest(device, model); + mutateOperationInputOperandIndexTest(device, model); + mutateOperationOutputOperandIndexTest(device, model); + removeOperandTest(device, model); + removeOperationTest(device, model); + removeOperationInputTest(device, model); + removeOperationOutputTest(device, model); + addOperationInputTest(device, model); + addOperationOutputTest(device, model); + mutateExecutionPreferenceTest(device, model); +} + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp new file mode 100644 index 0000000000..f25ee62617 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "1.0/Utils.h" +#include "1.2/Callbacks.h" +#include "ExecutionBurstController.h" +#include "GeneratedTestHarness.h" +#include "TestHarness.h" +#include "Utils.h" +#include "VtsHalNeuralnetworks.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using implementation::ExecutionCallback; +using V1_0::ErrorStatus; +using V1_0::Request; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static bool badTiming(Timing timing) { + return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX; +} + +// Primary validation function. This function will take a valid request, apply a +// mutation to it to invalidate the request, then pass it to interface calls +// that use the request. Note that the request here is passed by value, and any +// mutation to the request does not leave this function. +static void validate(const sp& preparedModel, const std::string& message, + Request request, const std::function& mutation) { + mutation(&request); + + // We'd like to test both with timing requested and without timing + // requested. Rather than running each test both ways, we'll decide whether + // to request timing by hashing the message. We do not use std::hash because + // it is not guaranteed stable across executions. + char hash = 0; + for (auto c : message) { + hash ^= c; + }; + MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO; + + // asynchronous + { + SCOPED_TRACE(message + " [execute_1_2]"); + + sp executionCallback = new ExecutionCallback(); + Return executeLaunchStatus = + preparedModel->execute_1_2(request, measure, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + const auto& outputShapes = executionCallback->getOutputShapes(); + Timing timing = executionCallback->getTiming(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); + ASSERT_EQ(outputShapes.size(), 0); + ASSERT_TRUE(badTiming(timing)); + } + + // synchronous + { + SCOPED_TRACE(message + " [executeSynchronously]"); + + Return executeStatus = preparedModel->executeSynchronously( + request, measure, + [](ErrorStatus error, const hidl_vec& outputShapes, + const Timing& timing) { + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); + EXPECT_EQ(outputShapes.size(), 0); + EXPECT_TRUE(badTiming(timing)); + }); + ASSERT_TRUE(executeStatus.isOk()); + } + + // burst + { + SCOPED_TRACE(message + " [burst]"); + + // create burst + std::shared_ptr<::android::nn::ExecutionBurstController> burst = + android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true); + ASSERT_NE(nullptr, burst.get()); + + // create memory keys + std::vector keys(request.pools.size()); + for (size_t i = 0; i < keys.size(); ++i) { + keys[i] = reinterpret_cast(&request.pools[i]); + } + + // execute and verify + ErrorStatus error; + std::vector outputShapes; + Timing timing; + std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error); + EXPECT_EQ(outputShapes.size(), 0); + EXPECT_TRUE(badTiming(timing)); + + // additional burst testing + if (request.pools.size() > 0) { + // valid free + burst->freeMemory(keys.front()); + + // negative test: invalid free of unknown (blank) memory + burst->freeMemory(intptr_t{}); + + // negative test: double free of memory + burst->freeMemory(keys.front()); + } + } +} + +///////////////////////// REMOVE INPUT //////////////////////////////////// + +static void removeInputTest(const sp& preparedModel, const Request& request) { + for (size_t input = 0; input < request.inputs.size(); ++input) { + const std::string message = "removeInput: removed input " + std::to_string(input); + validate(preparedModel, message, request, + [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); + } +} + +///////////////////////// REMOVE OUTPUT //////////////////////////////////// + +static void removeOutputTest(const sp& preparedModel, const Request& request) { + for (size_t output = 0; output < request.outputs.size(); ++output) { + const std::string message = "removeOutput: removed Output " + std::to_string(output); + validate(preparedModel, message, request, + [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); + } +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +void validateRequest(const sp& preparedModel, const Request& request) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); +} + +void validateRequestFailure(const sp& preparedModel, const Request& request) { + SCOPED_TRACE("Expecting request to fail [executeSynchronously]"); + Return executeStatus = preparedModel->executeSynchronously( + request, MeasureTiming::NO, + [](ErrorStatus error, const hidl_vec& outputShapes, const Timing& timing) { + ASSERT_NE(ErrorStatus::NONE, error); + EXPECT_EQ(outputShapes.size(), 0); + EXPECT_TRUE(badTiming(timing)); + }); + ASSERT_TRUE(executeStatus.isOk()); +} + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp new file mode 100644 index 0000000000..4fbd0e270f --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" +#include +#include +#include +#include +#include "1.0/Callbacks.h" +#include "1.0/Utils.h" +#include "GeneratedTestHarness.h" +#include "TestHarness.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using implementation::PreparedModelCallback; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; +using V1_0::ErrorStatus; +using V1_0::Request; +using V1_1::ExecutionPreference; + +// internal helper function +void createPreparedModel(const sp& device, const Model& model, + sp* preparedModel) { + ASSERT_NE(nullptr, preparedModel); + *preparedModel = nullptr; + + // see if service can handle model + bool fullySupportsModel = false; + const Return supportedCall = device->getSupportedOperations_1_2( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = std::all_of(supported.begin(), supported.end(), + [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + + // launch prepare model + const sp preparedModelCallback = new PreparedModelCallback(); + const Return prepareLaunchStatus = device->prepareModel_1_2( + model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + *preparedModel = getPreparedModel_1_2(preparedModelCallback); + + // The getSupportedOperations_1_2 call returns a list of operations that are + // guaranteed not to fail if prepareModel_1_2 is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare " + "model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + GTEST_SKIP(); + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); +} + +void NeuralnetworksHidlTest::SetUp() { + testing::TestWithParam::SetUp(); + ASSERT_NE(kDevice, nullptr); +} + +static NamedDevice makeNamedDevice(const std::string& name) { + return {name, IDevice::getService(name)}; +} + +static std::vector getNamedDevicesImpl() { + // Retrieves the name of all service instances that implement IDevice, + // including any Lazy HAL instances. + const std::vector names = hardware::getAllHalInstanceNames(IDevice::descriptor); + + // Get a handle to each device and pair it with its name. + std::vector namedDevices; + namedDevices.reserve(names.size()); + std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice); + return namedDevices; +} + +const std::vector& getNamedDevices() { + const static std::vector devices = getNamedDevicesImpl(); + return devices; +} + +std::string printNeuralnetworksHidlTest( + const testing::TestParamInfo& info) { + return gtestCompliantName(getName(info.param)); +} + +INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest); + +// Forward declaration from ValidateModel.cpp +void validateModel(const sp& device, const Model& model); +// Forward declaration from ValidateRequest.cpp +void validateRequest(const sp& preparedModel, const V1_0::Request& request); +// Forward declaration from ValidateRequest.cpp +void validateRequestFailure(const sp& preparedModel, const V1_0::Request& request); +// Forward declaration from ValidateBurst.cpp +void validateBurst(const sp& preparedModel, const V1_0::Request& request); + +void validateEverything(const sp& device, const Model& model, const Request& request) { + validateModel(device, model); + + // Create IPreparedModel. + sp preparedModel; + createPreparedModel(device, model, &preparedModel); + if (preparedModel == nullptr) return; + + validateRequest(preparedModel, request); + validateBurst(preparedModel, request); +} + +void validateFailure(const sp& device, const Model& model, const Request& request) { + // TODO: Should this always succeed? + // What if the invalid input is part of the model (i.e., a parameter). + validateModel(device, model); + + // Create IPreparedModel. + sp preparedModel; + createPreparedModel(device, model, &preparedModel); + if (preparedModel == nullptr) return; + + validateRequestFailure(preparedModel, request); +} + +TEST_P(ValidationTest, Test) { + const Model model = createModel(kTestModel); + const Request request = createRequest(kTestModel); + if (kTestModel.expectFailure) { + validateFailure(kDevice, model, request); + } else { + validateEverything(kDevice, model, request); + } +} + +INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; }); + +sp getPreparedModel_1_2(const sp& callback) { + sp preparedModelV1_0 = callback->getPreparedModel(); + return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr); +} + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h new file mode 100644 index 0000000000..d01336eccd --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H + +#include +#include +#include +#include +#include "1.0/Utils.h" +#include "1.2/Callbacks.h" + +namespace android::hardware::neuralnetworks::V1_2::vts::functional { + +using NamedDevice = Named>; +using NeuralnetworksHidlTestParam = NamedDevice; + +class NeuralnetworksHidlTest : public testing::TestWithParam { + protected: + void SetUp() override; + const sp kDevice = getData(GetParam()); +}; + +const std::vector& getNamedDevices(); + +std::string printNeuralnetworksHidlTest( + const testing::TestParamInfo& info); + +#define INSTANTIATE_DEVICE_TEST(TestSuite) \ + INSTANTIATE_TEST_SUITE_P(PerInstance, TestSuite, testing::ValuesIn(getNamedDevices()), \ + printNeuralnetworksHidlTest) + +// Create an IPreparedModel object. If the model cannot be prepared, +// "preparedModel" will be nullptr instead. +void createPreparedModel(const sp& device, const Model& model, + sp* preparedModel); + +// Utility function to get PreparedModel from callback and downcast to V1_2. +sp getPreparedModel_1_2(const sp& callback); + +} // namespace android::hardware::neuralnetworks::V1_2::vts::functional + +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H diff --git a/neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h b/neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h new file mode 100644 index 0000000000..bf4792cc6b --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The Callback classes are used internally by the NeuralNetworks runtime to + * synchronize between different threads. An asynchronous task is launched + * paired with a callback object. When a client thread requires the output being + * generated by the asynchronous task, the client thread can wait for the result + * and be blocked until it has completed. Any wait may safely be called + * concurrently, even on the same callback object. When the asynchronous task + * has finished its workload, it must immediately call "notify*". If the + * asynchronous task has failed to launch, the function that tried to launch the + * asynchronous task must immediately call "notify*". This "notify*" call + * awakens any client threads waiting on the callback object. + * + * These classes exist to enable synchronization across HIDL. When + * synchronization is only required in the same process, consider using + * std::future, std::mutex, std::condition_variable, or std::experimental::latch + * instead. + */ + +namespace android::hardware::neuralnetworks::V1_2::implementation { + +/** + * The PreparedModelCallback class is used to receive the error status of + * preparing a model as well as the prepared model from a task executing + * asynchronously with respect to the runtime. If a calling thread calls wait + * or get* on a PreparedModelCallback object and the corresponding asynchronous + * task has not finished preparing the model, the calling thread will block + * until the asynchronous task has either called notify or notify_1_2. + * + * If the callback object is notified more than once, only the results of the + * first call to notify* are used, and the results from subsequent calls are + * discarded. + * + * This callback object is passed as an argument to IDevice::prepareModel*. + */ +class PreparedModelCallback : public IPreparedModelCallback { + public: + /** + * IPreparedModelCallback::notify marks the callback object with the return + * status of the asynchronous model preparation along with the prepared + * model, and allows all prior and future wait calls on the + * PreparedModelCallback object to proceed. + * + * Either IPreparedModelCallback::notify or + * IPreparedModelCallback::notify_1_2 must be called on a given + * PreparedModelCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + * @param preparedModel Returned model that has been prepared for execution, + * nullptr if the model was unable to be prepared. + */ + Return notify(V1_0::ErrorStatus status, + const sp& preparedModel) override; + + /** + * IPreparedModelCallback::notify_1_2 marks the callback object with the + * return status of the asynchronous model preparation along with the + * prepared model, and allows all prior and future wait calls on the + * PreparedModelCallback object to proceed. + * + * Either IPreparedModelCallback::notify or + * IPreparedModelCallback::notify_1_2 must be called on a given + * PreparedModelCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + * @param preparedModel Returned model that has been prepared for execution, + * nullptr if the model was unable to be prepared. + */ + Return notify_1_2(V1_0::ErrorStatus status, + const sp& preparedModel) override; + + /** + * PreparedModelCallback::wait blocks until notify* has been called on the + * callback object. + */ + void wait() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by IDevice::prepareModel*. If IDevice::prepareModel* has not finished + * asynchronously preparing the model, this call will block until the + * asynchronous task notifies the object. + * + * @return status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + */ + V1_0::ErrorStatus getStatus() const; + + /** + * Retrieves the model that has been prepared for execution from the + * asynchronous task launched by IDevice::prepareModel*. If + * IDevice::prepareModel* has not finished asynchronously preparing the + * model, this call will block until the asynchronous task notifies the + * object. + * + * @return preparedModel Returned model that has been prepared for + * execution, nullptr if the model was unable to be prepared. + */ + sp getPreparedModel() const; + + private: + mutable std::mutex mMutex; + mutable std::condition_variable mCondition; + bool mNotified GUARDED_BY(mMutex) = false; + V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE; + sp mPreparedModel; +}; + +/** + * The ExecutionCallback class is used to receive the results of the execution + * from a task executing asynchronously with respect to the runtime. If a + * calling thread calls wait or get* on a ExecutionCallback object and the + * corresponding asynchronous task has not finished the execution, the calling + * thread will block until the asynchronous task has either called notify or + * notify_1_2. + * + * If the callback object is notified more than once, only the results of the + * first call to notify* are used, and the results from subsequent calls are + * discarded. + * + * This callback object is passed as an argument to IPreparedModel::execute*. + */ +class ExecutionCallback : public IExecutionCallback { + public: + /** + * IExecutionCallback::notify marks the callback object with the return + * status of the asynchronous execution that held this callback and enables + * all prior and future wait calls on the ExecutionCallback object to + * proceed. + * + * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must + * be called on a given ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large + * enough to store the resultant values + * - INVALID_ARGUMENT if the input request is invalid + */ + Return notify(V1_0::ErrorStatus status) override; + + /** + * IExecutionCallback::notify_1_2 marks the callback object with the results + * (error status, dynamic output shapes, and timing information) of the + * asynchronous execution that held this callback and enables all prior and + * future wait calls on the ExecutionCallback object to proceed. + * + * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must + * be called on a given ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * @param outputShapes A list of shape information of model output operands. + * The index into "outputShapes" corresponds to the index of the output + * operand in the Request outputs vector. outputShapes must be empty + * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. + * @param Timing Duration of execution. Unless MeasureTiming::YES was passed + * when launching the execution and status is NONE, all times must be + * reported as UINT64_MAX. A driver may choose to report any time as + * UINT64_MAX, indicating that particular measurement is not available. + */ + Return notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, + const Timing& timing) override; + + // An overload of the latest notify interface to hide the version from ExecutionBuilder. + Return notify(V1_0::ErrorStatus status, const hidl_vec& outputShapes, + const Timing& timing) { + return notify_1_2(status, outputShapes, timing); + } + + /** + * ExecutionCallback::wait blocks until notify* has been called on the + * callback object. + */ + void wait() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If + * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished + * asynchronously executing, this call will block until the asynchronous + * task notifies the object. + * + * @return status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + */ + V1_0::ErrorStatus getStatus() const; + + /** + * Retrieves the output shapes returned from the asynchronous task launched + * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not + * finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, an + * empty vector will be returned. + * + * @return outputShapes A list of shape information of model output + * operands. The index into "outputShapes" corresponds to the index of + * the output operand in the Request outputs vector. outputShapes must + * be empty unless the status is either NONE or + * OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is + * NONE and all model output operands are fully-specified at execution + * time. outputShapes must have the same number of elements as the + * number of model output operands if the status is + * OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has + * at least one output operand that is not fully-specified. + */ + const std::vector& getOutputShapes() const; + + /** + * Retrieves the duration of execution of the asynchronous task launched by + * IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not + * finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, every + * time must be UINT64_MAX. + * + * @return timing Duration of the execution. Every time must be UINT64_MAX + * unless the status is NONE. + */ + Timing getTiming() const; + + private: + /* + * ExecutionCallback::notifyInternal stores the results of the execution + * (status, output shapes, and timing information) in the ExecutionCallback + * object before any call to wait or get* return. It then enables all prior + * and future wait calls on the ExecutionCallback object to proceed. + */ + void notifyInternal(V1_0::ErrorStatus errorStatus, const hidl_vec& outputShapes, + const Timing& timing); + + // members + mutable std::mutex mMutex; + mutable std::condition_variable mCondition; + bool mNotified GUARDED_BY(mMutex) = false; + V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE; + std::vector mOutputShapes = {}; + Timing mTiming = {}; +}; + +} // namespace android::hardware::neuralnetworks::V1_2::implementation + +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H From 5ef23f16ea21b1620ecdbfabb39104f1a20941b4 Mon Sep 17 00:00:00 2001 From: Lev Proleev Date: Fri, 30 Aug 2019 11:57:18 +0100 Subject: [PATCH 07/19] Modify NNAPI VTS tests to run on version 1.3 Bug: 139120468 Test: VtsHalNeuralnetworksV1_3TargetTest Change-Id: I4654dc75c17f8801103015dc1da91663dfa28d52 Merged-In: I4654dc75c17f8801103015dc1da91663dfa28d52 (cherry picked from commit b49dadfb64d585b768b5bcf4f4a61bd3b93e87d1) --- neuralnetworks/1.2/vts/functional/Android.bp | 19 +- neuralnetworks/1.3/vts/functional/Android.bp | 58 ++++ .../1.3/vts/functional/BasicTests.cpp | 62 +--- .../1.3/vts/functional/Callbacks.cpp | 143 -------- .../functional/CompilationCachingTests.cpp | 13 +- .../vts/functional/GeneratedTestHarness.cpp | 18 +- .../1.3/vts/functional/GeneratedTestHarness.h | 19 +- .../1.3/vts/functional/TestAssertions.cpp | 9 +- .../1.3/vts/functional/ValidateBurst.cpp | 11 +- .../1.3/vts/functional/ValidateModel.cpp | 21 +- .../1.3/vts/functional/ValidateRequest.cpp | 10 +- .../vts/functional/VtsHalNeuralnetworks.cpp | 20 +- .../1.3/vts/functional/VtsHalNeuralnetworks.h | 19 +- .../vts/functional/include/1.2/Callbacks.h | 325 ------------------ 14 files changed, 170 insertions(+), 577 deletions(-) create mode 100644 neuralnetworks/1.3/vts/functional/Android.bp delete mode 100644 neuralnetworks/1.3/vts/functional/Callbacks.cpp delete mode 100644 neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp index 9c50d36ec5..bdca0e95e0 100644 --- a/neuralnetworks/1.2/vts/functional/Android.bp +++ b/neuralnetworks/1.2/vts/functional/Android.bp @@ -14,12 +14,28 @@ // limitations under the License. // +cc_library_static { + name: "VtsHalNeuralNetworksV1_2Callbacks", + defaults: ["VtsHalTargetTestDefaults"], + export_include_dirs: ["include"], + srcs: [ + "Callbacks.cpp", + ], + static_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", + ], + header_libs: [ + "libbase_headers", + ] +} + cc_test { name: "VtsHalNeuralnetworksV1_2TargetTest", defaults: ["VtsHalTargetTestDefaults"], srcs: [ "BasicTests.cpp", - "Callbacks.cpp", "CompilationCachingTests.cpp", "GeneratedTestHarness.cpp", "TestAssertions.cpp", @@ -45,6 +61,7 @@ cc_test { "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", "VtsHalNeuralNetworksV1_0_utils", + "VtsHalNeuralNetworksV1_2Callbacks", ], whole_static_libs: [ "neuralnetworks_generated_V1_0_example", diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp new file mode 100644 index 0000000000..90ce852e3e --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/Android.bp @@ -0,0 +1,58 @@ +// +// Copyright (C) 2019 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +cc_test { + name: "VtsHalNeuralNetworksV1_3TargetTest", + defaults: ["VtsHalTargetTestDefaults"], + srcs: [ + "BasicTests.cpp", + "CompilationCachingTests.cpp", + "GeneratedTestHarness.cpp", + "TestAssertions.cpp", + "ValidateBurst.cpp", + "ValidateModel.cpp", + "ValidateRequest.cpp", + "VtsHalNeuralnetworks.cpp", + ], + shared_libs: [ + "libfmq", + "libnativewindow", + ], + static_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", + "android.hardware.neuralnetworks@1.3", + "android.hidl.allocator@1.0", + "android.hidl.memory@1.0", + "libgmock", + "libhidlmemory", + "libneuralnetworks_generated_test_harness", + "libneuralnetworks_utils", + "VtsHalNeuralNetworksV1_0_utils", + "VtsHalNeuralNetworksV1_2Callbacks", + ], + whole_static_libs: [ + "neuralnetworks_generated_V1_0_example", + "neuralnetworks_generated_V1_1_example", + "neuralnetworks_generated_V1_2_example", + "neuralnetworks_generated_V1_3_example", + ], + header_libs: [ + "libneuralnetworks_headers", + ], + test_suites: ["general-tests"], +} diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp index 8e82c5376e..b64dc2f61b 100644 --- a/neuralnetworks/1.3/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp @@ -18,11 +18,14 @@ #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { using V1_0::DeviceStatus; using V1_0::ErrorStatus; using V1_0::PerformanceInfo; +using V1_2::Constant; +using V1_2::DeviceType; +using V1_2::Extension; // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} @@ -37,7 +40,7 @@ TEST_P(NeuralnetworksHidlTest, StatusTest) { // initialization TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { using OperandPerformance = Capabilities::OperandPerformance; - Return ret = kDevice->getCapabilities_1_2([](ErrorStatus status, + Return ret = kDevice->getCapabilities_1_3([](ErrorStatus status, const Capabilities& capabilities) { EXPECT_EQ(ErrorStatus::NONE, status); @@ -58,57 +61,4 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { }); EXPECT_TRUE(ret.isOk()); } - -// device version test -TEST_P(NeuralnetworksHidlTest, GetDeviceVersionStringTest) { - Return ret = - kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LT(0, version.size()); - }); - EXPECT_TRUE(ret.isOk()); -} - -// device type test -TEST_P(NeuralnetworksHidlTest, GetDeviceTypeTest) { - Return ret = kDevice->getType([](ErrorStatus status, DeviceType type) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU || - type == DeviceType::GPU || type == DeviceType::ACCELERATOR); - }); - EXPECT_TRUE(ret.isOk()); -} - -// device supported extensions test -TEST_P(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) { - Return ret = kDevice->getSupportedExtensions( - [](ErrorStatus status, const hidl_vec& extensions) { - EXPECT_EQ(ErrorStatus::NONE, status); - for (auto& extension : extensions) { - std::string extensionName = extension.name; - EXPECT_FALSE(extensionName.empty()); - for (char c : extensionName) { - EXPECT_TRUE(('a' <= c && c <= 'z') || ('0' <= c && c <= '9') || c == '_' || - c == '.') - << "Extension name contains an illegal character: " << c; - } - EXPECT_NE(extensionName.find('.'), std::string::npos) - << "Extension name must start with the reverse domain name of the " - "vendor"; - } - }); - EXPECT_TRUE(ret.isOk()); -} - -// getNumberOfCacheFilesNeeded test -TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) { - Return ret = kDevice->getNumberOfCacheFilesNeeded( - [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LE(numModelCache, - static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); - EXPECT_LE(numDataCache, static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); - }); - EXPECT_TRUE(ret.isOk()); -} -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/Callbacks.cpp b/neuralnetworks/1.3/vts/functional/Callbacks.cpp deleted file mode 100644 index 3972ad6ff2..0000000000 --- a/neuralnetworks/1.3/vts/functional/Callbacks.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "Callbacks" - -#include "1.2/Callbacks.h" - -#include - -#include - -namespace android::hardware::neuralnetworks::V1_2::implementation { - -using V1_0::ErrorStatus; - -constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits::max(), - .timeInDriver = std::numeric_limits::max()}; - -// PreparedModelCallback methods begin here - -Return PreparedModelCallback::notify(ErrorStatus errorStatus, - const sp& preparedModel) { - { - std::lock_guard hold(mMutex); - - // quick-return if object has already been notified - if (mNotified) { - return Void(); - } - - // store results and mark as notified - mErrorStatus = errorStatus; - mPreparedModel = preparedModel; - mNotified = true; - } - - mCondition.notify_all(); - return Void(); -} - -Return PreparedModelCallback::notify_1_2(ErrorStatus errorStatus, - const sp& preparedModel) { - return notify(errorStatus, preparedModel); -} - -void PreparedModelCallback::wait() const { - std::unique_lock lock(mMutex); - mCondition.wait(lock, [this] { return mNotified; }); -} - -ErrorStatus PreparedModelCallback::getStatus() const { - wait(); - return mErrorStatus; -} - -sp PreparedModelCallback::getPreparedModel() const { - wait(); - return mPreparedModel; -} - -// ExecutionCallback methods begin here - -Return ExecutionCallback::notify(ErrorStatus errorStatus) { - notifyInternal(errorStatus, {}, kNoTiming); - return Void(); -} - -Return ExecutionCallback::notify_1_2(ErrorStatus errorStatus, - const hidl_vec& outputShapes, - const Timing& timing) { - if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE. - if (outputShapes.size() == 0) { - LOG(ERROR) << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE"; - notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); - return Void(); - } - } else if (errorStatus != ErrorStatus::NONE) { - // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE. - if (outputShapes.size() != 0) { - LOG(ERROR) << "Notified with non-empty output shape vector when error status is " - "neither NONE nor OUTPUT_INSUFFICIENT_SIZE"; - notifyInternal(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); - return Void(); - } - } - notifyInternal(errorStatus, outputShapes, timing); - return Void(); -} - -void ExecutionCallback::wait() const { - std::unique_lock lock(mMutex); - mCondition.wait(lock, [this] { return mNotified; }); -} - -ErrorStatus ExecutionCallback::getStatus() const { - wait(); - return mErrorStatus; -} - -const std::vector& ExecutionCallback::getOutputShapes() const { - wait(); - return mOutputShapes; -} - -Timing ExecutionCallback::getTiming() const { - wait(); - return mTiming; -} - -void ExecutionCallback::notifyInternal(ErrorStatus errorStatus, - const hidl_vec& outputShapes, - const Timing& timing) { - { - std::lock_guard hold(mMutex); - - // quick-return if object has already been notified - if (mNotified) { - return; - } - - mErrorStatus = errorStatus; - mOutputShapes = outputShapes; - mTiming = timing; - mNotified = true; - } - mCondition.notify_all(); -} - -} // namespace android::hardware::neuralnetworks::V1_2::implementation diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp index 2130a76b75..0ac4738fff 100644 --- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp @@ -45,12 +45,15 @@ namespace generated_tests::mobilenet_quantized { const test_helper::TestModel& get_test_model(); } // namespace generated_tests::mobilenet_quantized -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { using namespace test_helper; -using implementation::PreparedModelCallback; using V1_0::ErrorStatus; using V1_1::ExecutionPreference; +using V1_2::Constant; +using V1_2::IPreparedModel; +using V1_2::OperationType; +using V1_2::implementation::PreparedModelCallback; namespace float32_model { @@ -302,7 +305,7 @@ class CompilationCachingTestBase : public testing::Test { // See if the service can handle the model. bool isModelFullySupported(const Model& model) { bool fullySupportsModel = false; - Return supportedCall = kDevice->getSupportedOperations_1_2( + Return supportedCall = kDevice->getSupportedOperations_1_3( model, [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec& supported) { ASSERT_EQ(ErrorStatus::NONE, status); @@ -323,7 +326,7 @@ class CompilationCachingTestBase : public testing::Test { sp preparedModelCallback = new PreparedModelCallback(); hidl_array cacheToken(mToken); Return prepareLaunchStatus = - kDevice->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, + kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache, dataCache, cacheToken, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(static_cast(prepareLaunchStatus), ErrorStatus::NONE); @@ -1371,4 +1374,4 @@ INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest, testing::Range(0U, 10U)), printCompilationCachingSecurityTest); -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index 2beec983e0..16a7d70fb5 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -27,6 +27,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -44,17 +47,24 @@ #include "Utils.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { using namespace test_helper; using hidl::memory::V1_0::IMemory; -using implementation::ExecutionCallback; -using implementation::PreparedModelCallback; using V1_0::DataLocation; using V1_0::ErrorStatus; using V1_0::OperandLifeTime; using V1_0::Request; using V1_1::ExecutionPreference; +using V1_2::Constant; +using V1_2::IPreparedModel; +using V1_2::MeasureTiming; +using V1_2::OperationType; +using V1_2::OutputShape; +using V1_2::SymmPerChannelQuantParams; +using V1_2::Timing; +using V1_2::implementation::ExecutionCallback; +using V1_2::implementation::PreparedModelCallback; using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; @@ -405,4 +415,4 @@ INSTANTIATE_GENERATED_TEST(GeneratedTest, INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h index dfc980c169..b9277cfd4a 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h @@ -14,19 +14,19 @@ * limitations under the License. */ -#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H -#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H -#include #include -#include +#include +#include #include #include #include "1.0/Utils.h" #include "TestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { using NamedModel = Named; using GeneratedTestParam = std::tuple; @@ -55,11 +55,12 @@ class ValidationTest : public GeneratedTestBase {}; Model createModel(const test_helper::TestModel& testModel); -void PrepareModel(const sp& device, const Model& model, sp* preparedModel); +void PrepareModel(const sp& device, const Model& model, + sp* preparedModel); -void EvaluatePreparedModel(const sp& preparedModel, +void EvaluatePreparedModel(const sp& preparedModel, const test_helper::TestModel& testModel, bool testDynamicOutputShape); -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional -#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp index a0aa3c37d1..7361078eca 100644 --- a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp +++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp @@ -14,10 +14,10 @@ * limitations under the License. */ -#include +#include #include "TestHarness.h" -namespace android::hardware::neuralnetworks::V1_2 { +namespace android::hardware::neuralnetworks::V1_3 { // Make sure that the HIDL enums are compatible with the values defined in // frameworks/ml/nn/tools/test_generator/test_harness/include/TestHarness.h. @@ -25,6 +25,8 @@ using namespace test_helper; #define CHECK_TEST_ENUM(EnumType, enumValue) \ static_assert(static_cast(Test##EnumType::enumValue) == EnumType::enumValue) +using V1_2::OperationType; + CHECK_TEST_ENUM(OperandType, FLOAT32); CHECK_TEST_ENUM(OperandType, INT32); CHECK_TEST_ENUM(OperandType, UINT32); @@ -39,6 +41,7 @@ CHECK_TEST_ENUM(OperandType, FLOAT16); CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM_PER_CHANNEL); CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_ASYMM); CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM); +CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM_SIGNED); CHECK_TEST_ENUM(OperationType, ADD); CHECK_TEST_ENUM(OperationType, AVERAGE_POOL_2D); @@ -138,4 +141,4 @@ CHECK_TEST_ENUM(OperationType, RESIZE_NEAREST_NEIGHBOR); #undef CHECK_TEST_ENUM -} // namespace android::hardware::neuralnetworks::V1_2 +} // namespace android::hardware::neuralnetworks::V1_3 diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp index 1d4493d208..95f9f427b2 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp @@ -28,13 +28,20 @@ #include #include -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { using nn::ExecutionBurstController; using nn::RequestChannelSender; using nn::ResultChannelReceiver; using V1_0::ErrorStatus; using V1_0::Request; +using V1_2::FmqRequestDatum; +using V1_2::FmqResultDatum; +using V1_2::IBurstCallback; +using V1_2::IBurstContext; +using V1_2::IPreparedModel; +using V1_2::MeasureTiming; +using V1_2::Timing; using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback; // This constant value represents the length of an FMQ that is large enough to @@ -397,4 +404,4 @@ void validateBurst(const sp& preparedModel, const Request& reque ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, request)); } -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 30530beacc..44b32a9fec 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -21,21 +21,26 @@ #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { -using implementation::PreparedModelCallback; using V1_0::ErrorStatus; using V1_0::OperandLifeTime; using V1_1::ExecutionPreference; -using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; +using V1_2::IPreparedModel; +using V1_2::OperationType; +using V1_2::OperationTypeRange; +using V1_2::SymmPerChannelQuantParams; +using V1_2::implementation::PreparedModelCallback; +using HidlToken = + hidl_array(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// static void validateGetSupportedOperations(const sp& device, const std::string& message, const Model& model) { - SCOPED_TRACE(message + " [getSupportedOperations_1_2]"); + SCOPED_TRACE(message + " [getSupportedOperations_1_3]"); - Return ret = device->getSupportedOperations_1_2( + Return ret = device->getSupportedOperations_1_3( model, [&](ErrorStatus status, const hidl_vec&) { EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); }); @@ -44,11 +49,11 @@ static void validateGetSupportedOperations(const sp& device, const std: static void validatePrepareModel(const sp& device, const std::string& message, const Model& model, ExecutionPreference preference) { - SCOPED_TRACE(message + " [prepareModel_1_2]"); + SCOPED_TRACE(message + " [prepareModel_1_3]"); sp preparedModelCallback = new PreparedModelCallback(); Return prepareLaunchStatus = - device->prepareModel_1_2(model, preference, hidl_vec(), + device->prepareModel_1_3(model, preference, hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); @@ -710,4 +715,4 @@ void validateModel(const sp& device, const Model& model) { mutateExecutionPreferenceTest(device, model); } -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp index f25ee62617..612212382c 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp @@ -24,11 +24,15 @@ #include "Utils.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { -using implementation::ExecutionCallback; using V1_0::ErrorStatus; using V1_0::Request; +using V1_2::IPreparedModel; +using V1_2::MeasureTiming; +using V1_2::OutputShape; +using V1_2::Timing; +using V1_2::implementation::ExecutionCallback; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -165,4 +169,4 @@ void validateRequestFailure(const sp& preparedModel, const Reque ASSERT_TRUE(executeStatus.isOk()); } -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp index 4fbd0e270f..4f0e150b32 100644 --- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp @@ -26,13 +26,15 @@ #include "GeneratedTestHarness.h" #include "TestHarness.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { -using implementation::PreparedModelCallback; -using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; +using HidlToken = + hidl_array(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; using V1_0::ErrorStatus; using V1_0::Request; using V1_1::ExecutionPreference; +using V1_2::IPreparedModel; +using V1_2::implementation::PreparedModelCallback; // internal helper function void createPreparedModel(const sp& device, const Model& model, @@ -42,7 +44,7 @@ void createPreparedModel(const sp& device, const Model& model, // see if service can handle model bool fullySupportsModel = false; - const Return supportedCall = device->getSupportedOperations_1_2( + const Return supportedCall = device->getSupportedOperations_1_3( model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { ASSERT_EQ(ErrorStatus::NONE, status); ASSERT_NE(0ul, supported.size()); @@ -53,7 +55,7 @@ void createPreparedModel(const sp& device, const Model& model, // launch prepare model const sp preparedModelCallback = new PreparedModelCallback(); - const Return prepareLaunchStatus = device->prepareModel_1_2( + const Return prepareLaunchStatus = device->prepareModel_1_3( model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); @@ -64,8 +66,8 @@ void createPreparedModel(const sp& device, const Model& model, const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); *preparedModel = getPreparedModel_1_2(preparedModelCallback); - // The getSupportedOperations_1_2 call returns a list of operations that are - // guaranteed not to fail if prepareModel_1_2 is called, and + // The getSupportedOperations_1_3 call returns a list of operations that are + // guaranteed not to fail if prepareModel_1_3 is called, and // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. // If a driver has any doubt that it can prepare an operation, it must // return false. So here, if a driver isn't sure if it can support an @@ -163,9 +165,9 @@ TEST_P(ValidationTest, Test) { INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; }); -sp getPreparedModel_1_2(const sp& callback) { +sp getPreparedModel_1_2(const sp& callback) { sp preparedModelV1_0 = callback->getPreparedModel(); return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr); } -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h index d01336eccd..fc654ce8f0 100644 --- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h @@ -14,17 +14,17 @@ * limitations under the License. */ -#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H -#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H -#include #include -#include +#include +#include #include #include "1.0/Utils.h" #include "1.2/Callbacks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { +namespace android::hardware::neuralnetworks::V1_3::vts::functional { using NamedDevice = Named>; using NeuralnetworksHidlTestParam = NamedDevice; @@ -47,11 +47,12 @@ std::string printNeuralnetworksHidlTest( // Create an IPreparedModel object. If the model cannot be prepared, // "preparedModel" will be nullptr instead. void createPreparedModel(const sp& device, const Model& model, - sp* preparedModel); + sp* preparedModel); // Utility function to get PreparedModel from callback and downcast to V1_2. -sp getPreparedModel_1_2(const sp& callback); +sp getPreparedModel_1_2( + const sp& callback); -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional -#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H diff --git a/neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h b/neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h deleted file mode 100644 index bf4792cc6b..0000000000 --- a/neuralnetworks/1.3/vts/functional/include/1.2/Callbacks.h +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H -#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * The Callback classes are used internally by the NeuralNetworks runtime to - * synchronize between different threads. An asynchronous task is launched - * paired with a callback object. When a client thread requires the output being - * generated by the asynchronous task, the client thread can wait for the result - * and be blocked until it has completed. Any wait may safely be called - * concurrently, even on the same callback object. When the asynchronous task - * has finished its workload, it must immediately call "notify*". If the - * asynchronous task has failed to launch, the function that tried to launch the - * asynchronous task must immediately call "notify*". This "notify*" call - * awakens any client threads waiting on the callback object. - * - * These classes exist to enable synchronization across HIDL. When - * synchronization is only required in the same process, consider using - * std::future, std::mutex, std::condition_variable, or std::experimental::latch - * instead. - */ - -namespace android::hardware::neuralnetworks::V1_2::implementation { - -/** - * The PreparedModelCallback class is used to receive the error status of - * preparing a model as well as the prepared model from a task executing - * asynchronously with respect to the runtime. If a calling thread calls wait - * or get* on a PreparedModelCallback object and the corresponding asynchronous - * task has not finished preparing the model, the calling thread will block - * until the asynchronous task has either called notify or notify_1_2. - * - * If the callback object is notified more than once, only the results of the - * first call to notify* are used, and the results from subsequent calls are - * discarded. - * - * This callback object is passed as an argument to IDevice::prepareModel*. - */ -class PreparedModelCallback : public IPreparedModelCallback { - public: - /** - * IPreparedModelCallback::notify marks the callback object with the return - * status of the asynchronous model preparation along with the prepared - * model, and allows all prior and future wait calls on the - * PreparedModelCallback object to proceed. - * - * Either IPreparedModelCallback::notify or - * IPreparedModelCallback::notify_1_2 must be called on a given - * PreparedModelCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - * @param preparedModel Returned model that has been prepared for execution, - * nullptr if the model was unable to be prepared. - */ - Return notify(V1_0::ErrorStatus status, - const sp& preparedModel) override; - - /** - * IPreparedModelCallback::notify_1_2 marks the callback object with the - * return status of the asynchronous model preparation along with the - * prepared model, and allows all prior and future wait calls on the - * PreparedModelCallback object to proceed. - * - * Either IPreparedModelCallback::notify or - * IPreparedModelCallback::notify_1_2 must be called on a given - * PreparedModelCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - * @param preparedModel Returned model that has been prepared for execution, - * nullptr if the model was unable to be prepared. - */ - Return notify_1_2(V1_0::ErrorStatus status, - const sp& preparedModel) override; - - /** - * PreparedModelCallback::wait blocks until notify* has been called on the - * callback object. - */ - void wait() const; - - /** - * Retrieves the error status returned from the asynchronous task launched - * by IDevice::prepareModel*. If IDevice::prepareModel* has not finished - * asynchronously preparing the model, this call will block until the - * asynchronous task notifies the object. - * - * @return status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - */ - V1_0::ErrorStatus getStatus() const; - - /** - * Retrieves the model that has been prepared for execution from the - * asynchronous task launched by IDevice::prepareModel*. If - * IDevice::prepareModel* has not finished asynchronously preparing the - * model, this call will block until the asynchronous task notifies the - * object. - * - * @return preparedModel Returned model that has been prepared for - * execution, nullptr if the model was unable to be prepared. - */ - sp getPreparedModel() const; - - private: - mutable std::mutex mMutex; - mutable std::condition_variable mCondition; - bool mNotified GUARDED_BY(mMutex) = false; - V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE; - sp mPreparedModel; -}; - -/** - * The ExecutionCallback class is used to receive the results of the execution - * from a task executing asynchronously with respect to the runtime. If a - * calling thread calls wait or get* on a ExecutionCallback object and the - * corresponding asynchronous task has not finished the execution, the calling - * thread will block until the asynchronous task has either called notify or - * notify_1_2. - * - * If the callback object is notified more than once, only the results of the - * first call to notify* are used, and the results from subsequent calls are - * discarded. - * - * This callback object is passed as an argument to IPreparedModel::execute*. - */ -class ExecutionCallback : public IExecutionCallback { - public: - /** - * IExecutionCallback::notify marks the callback object with the return - * status of the asynchronous execution that held this callback and enables - * all prior and future wait calls on the ExecutionCallback object to - * proceed. - * - * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must - * be called on a given ExecutionCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from launching the asynchronous task - * (if the launch fails) or from the asynchronous task itself (if the - * launch succeeds). Must be: - * - NONE if the asynchronous execution was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large - * enough to store the resultant values - * - INVALID_ARGUMENT if the input request is invalid - */ - Return notify(V1_0::ErrorStatus status) override; - - /** - * IExecutionCallback::notify_1_2 marks the callback object with the results - * (error status, dynamic output shapes, and timing information) of the - * asynchronous execution that held this callback and enables all prior and - * future wait calls on the ExecutionCallback object to proceed. - * - * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must - * be called on a given ExecutionCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from launching the asynchronous task - * (if the launch fails) or from the asynchronous task itself (if the - * launch succeeds). Must be: - * - NONE if the asynchronous execution was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified - * error - * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is - * not large enough to store the corresponding output - * - INVALID_ARGUMENT if one of the input arguments to prepareModel is - * invalid - * @param outputShapes A list of shape information of model output operands. - * The index into "outputShapes" corresponds to the index of the output - * operand in the Request outputs vector. outputShapes must be empty - * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. - * @param Timing Duration of execution. Unless MeasureTiming::YES was passed - * when launching the execution and status is NONE, all times must be - * reported as UINT64_MAX. A driver may choose to report any time as - * UINT64_MAX, indicating that particular measurement is not available. - */ - Return notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, - const Timing& timing) override; - - // An overload of the latest notify interface to hide the version from ExecutionBuilder. - Return notify(V1_0::ErrorStatus status, const hidl_vec& outputShapes, - const Timing& timing) { - return notify_1_2(status, outputShapes, timing); - } - - /** - * ExecutionCallback::wait blocks until notify* has been called on the - * callback object. - */ - void wait() const; - - /** - * Retrieves the error status returned from the asynchronous task launched - * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If - * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished - * asynchronously executing, this call will block until the asynchronous - * task notifies the object. - * - * @return status Error status returned from launching the asynchronous task - * (if the launch fails) or from the asynchronous task itself (if the - * launch succeeds). Must be: - * - NONE if the asynchronous execution was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified - * error - * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is - * not large enough to store the corresponding output - * - INVALID_ARGUMENT if one of the input arguments to prepareModel is - * invalid - */ - V1_0::ErrorStatus getStatus() const; - - /** - * Retrieves the output shapes returned from the asynchronous task launched - * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not - * finished asynchronously executing, this call will block until the - * asynchronous task notifies the object. - * - * If the asynchronous task was launched by IPreparedModel::execute, an - * empty vector will be returned. - * - * @return outputShapes A list of shape information of model output - * operands. The index into "outputShapes" corresponds to the index of - * the output operand in the Request outputs vector. outputShapes must - * be empty unless the status is either NONE or - * OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is - * NONE and all model output operands are fully-specified at execution - * time. outputShapes must have the same number of elements as the - * number of model output operands if the status is - * OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has - * at least one output operand that is not fully-specified. - */ - const std::vector& getOutputShapes() const; - - /** - * Retrieves the duration of execution of the asynchronous task launched by - * IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not - * finished asynchronously executing, this call will block until the - * asynchronous task notifies the object. - * - * If the asynchronous task was launched by IPreparedModel::execute, every - * time must be UINT64_MAX. - * - * @return timing Duration of the execution. Every time must be UINT64_MAX - * unless the status is NONE. - */ - Timing getTiming() const; - - private: - /* - * ExecutionCallback::notifyInternal stores the results of the execution - * (status, output shapes, and timing information) in the ExecutionCallback - * object before any call to wait or get* return. It then enables all prior - * and future wait calls on the ExecutionCallback object to proceed. - */ - void notifyInternal(V1_0::ErrorStatus errorStatus, const hidl_vec& outputShapes, - const Timing& timing); - - // members - mutable std::mutex mMutex; - mutable std::condition_variable mCondition; - bool mNotified GUARDED_BY(mMutex) = false; - V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE; - std::vector mOutputShapes = {}; - Timing mTiming = {}; -}; - -} // namespace android::hardware::neuralnetworks::V1_2::implementation - -#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H From 780abb8b6d3550dc4f7211b977b54be590723380 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Fri, 4 Oct 2019 14:32:51 -0700 Subject: [PATCH 08/19] Increase neuralnetworks compatibility to 1.3 Bug: 139120468 Test: mma Change-Id: I66247588f8e42ebd85cc95b844f2d352adc81462 Merged-In: I66247588f8e42ebd85cc95b844f2d352adc81462 (cherry picked from commit 7259f3ab24a5f61c99e74c5cbf7ffa8fb3a9e163) --- compatibility_matrices/compatibility_matrix.current.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml index 6a89dab562..8332df29eb 100644 --- a/compatibility_matrices/compatibility_matrix.current.xml +++ b/compatibility_matrices/compatibility_matrix.current.xml @@ -307,7 +307,7 @@ android.hardware.neuralnetworks - 1.0-2 + 1.0-3 IDevice .* From 17030494e3671fe216befcbaf8b08a7627cbb17c Mon Sep 17 00:00:00 2001 From: Kevin Rocard Date: Thu, 13 Dec 2018 11:33:17 -0800 Subject: [PATCH 09/19] DO NOT MERGE: Audio VTS: Wait after stream close Due to asynchronous nature of the destruction of server-side objects it is required to flush IPC messages to the server and wait to avoid flakiness due to an attempt to open the stream while it's still not closed on the server side. This patch is specific for Android P release. Test: atest VtsHalAudioV4_0TargetTest Bug: 118655804 Change-Id: I1a5ec28bce9802ec654c139153ec4aa6786474e5 --- .../functional/AudioPrimaryHidlHalTest.cpp | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp index 46c228a19f..71d91db360 100644 --- a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp +++ b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp @@ -191,7 +191,7 @@ TEST_F(AudioHidlTest, OpenPrimaryDeviceUsingGetDevice) { // flushCommand makes sure all local command are sent, thus should reduce // the latency between local and remote destruction. IPCThreadState::self()->flushCommands(); - usleep(100); + usleep(100*1000); } ////////////////////////////////////////////////////////////////////////////// @@ -698,13 +698,27 @@ class OpenStreamTest : public AudioConfigPrimaryTest, Return closeStream() { open = false; - return stream->close(); + auto res = stream->close(); + stream.clear(); + waitForStreamDestruction(); + return res; + } + + void waitForStreamDestruction() { + // FIXME: there is no way to know when the remote IStream is being destroyed + // Binder does not support testing if an object is alive, thus + // wait for 100ms to let the binder destruction propagates and + // the remote device has the time to be destroyed. + // flushCommand makes sure all local command are sent, thus should reduce + // the latency between local and remote destruction. + IPCThreadState::self()->flushCommands(); + usleep(100*1000); } private: void TearDown() override { if (open) { - ASSERT_OK(stream->close()); + ASSERT_OK(closeStream()); } } @@ -1051,8 +1065,12 @@ TEST_IO_STREAM(getMmapPositionNoMmap, "Get a stream Mmap position before mapping ASSERT_RESULT(invalidStateOrNotSupported, stream->stop())) TEST_IO_STREAM(close, "Make sure a stream can be closed", ASSERT_OK(closeStream())) -TEST_IO_STREAM(closeTwice, "Make sure a stream can not be closed twice", ASSERT_OK(closeStream()); - ASSERT_RESULT(Result::INVALID_STATE, closeStream())) +TEST_IO_STREAM(closeTwice, "Make sure a stream can not be closed twice", + auto streamCopy = stream; + ASSERT_OK(closeStream()); + ASSERT_RESULT(Result::INVALID_STATE, streamCopy->close()); + streamCopy.clear(); + waitForStreamDestruction()) static void testCreateTooBigMmapBuffer(IStream* stream) { MmapBufferInfo info; From 2e5961c0cb33a9f7d72e10c3eb0858e29403f99f Mon Sep 17 00:00:00 2001 From: Brian Duddie Date: Mon, 7 Oct 2019 15:53:27 -0700 Subject: [PATCH 10/19] Add sensors 1.0 default HAL to uhid group Ensures it can access /dev/uinput in Android Q, sepolicy permitting. Bug: 142105193 Test: confirm hall sensor works again on marlin Change-Id: I585c32d4da4bdc0917068e4d81adeca43d257e56 (cherry picked from commit 82299438b59a1eda41529edb08419cb514247e5e) --- sensors/1.0/default/android.hardware.sensors@1.0-service.rc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc index b54842d66f..db340af5cd 100644 --- a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc +++ b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc @@ -1,6 +1,6 @@ service vendor.sensors-hal-1-0 /vendor/bin/hw/android.hardware.sensors@1.0-service class hal user system - group system wakelock + group system wakelock uhid capabilities BLOCK_SUSPEND rlimit rtprio 10 10 From 09638bb7c83ffdc8bec2280725c1d8afeb700782 Mon Sep 17 00:00:00 2001 From: Jordan Jozwiak Date: Thu, 1 Aug 2019 18:48:03 -0700 Subject: [PATCH 11/19] DO NOT MERGE Add DISTANCE_DISPLAY_UNIT to google VHAL Bug: 138816759 Test: aae app vhal apply google & dump properties Change-Id: I6a4d571975d5fe7ba6419cd4c37b734f21162f41 (cherry picked from commit 2619443e0a8b5bb8ca19c6c5761c8f6a834e4936) Merged-In: I6a4d571975d5fe7ba6419cd4c37b734f21162f41 --- .../vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h index a46de24b16..77053cfb60 100644 --- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h +++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h @@ -545,6 +545,13 @@ const ConfigDeclaration kVehicleProperties[]{ .areaConfigs = {VehicleAreaConfig{.areaId = (0)}}}, .initialValue = {.int32Values = {(int)VehicleUnit::FAHRENHEIT}}}, + {.config = {.prop = toInt(VehicleProperty::DISTANCE_DISPLAY_UNITS), + .access = VehiclePropertyAccess::READ_WRITE, + .changeMode = VehiclePropertyChangeMode::ON_CHANGE, + .configArray = {(int)VehicleUnit::KILOMETER, (int)VehicleUnit::MILE}, + .areaConfigs = {VehicleAreaConfig{.areaId = (0)}}}, + .initialValue = {.int32Values = {(int)VehicleUnit::MILE}}}, + {.config = { .prop = toInt(VehicleProperty::NIGHT_MODE), From 88213e319332ec5063e32db97a8f2f8f466cd268 Mon Sep 17 00:00:00 2001 From: Jordan Jozwiak Date: Thu, 1 Aug 2019 18:48:03 -0700 Subject: [PATCH 12/19] DO NOT MERGE Add DISTANCE_DISPLAY_UNIT to google VHAL Bug: 138816759 Test: aae app vhal apply google & dump properties Change-Id: I6a4d571975d5fe7ba6419cd4c37b734f21162f41 (cherry picked from commit 2619443e0a8b5bb8ca19c6c5761c8f6a834e4936) --- .../vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h index 2c5e5ccac8..8a89322a75 100644 --- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h +++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h @@ -568,6 +568,16 @@ const ConfigDeclaration kVehicleProperties[]{ }, .initialValue = {.int32Values = {(int)VehicleUnit::FAHRENHEIT}}}, + {.config = + { + .prop = toInt(VehicleProperty::DISTANCE_DISPLAY_UNITS), + .access = VehiclePropertyAccess::READ_WRITE, + .changeMode = VehiclePropertyChangeMode::ON_CHANGE, + .configArray = {(int)VehicleUnit::KILOMETER, (int)VehicleUnit::MILE}, + .areaConfigs = {VehicleAreaConfig{.areaId = (0)}} + }, + .initialValue = {.int32Values = {(int)VehicleUnit::MILE}}}, + {.config = { .prop = toInt(VehicleProperty::NIGHT_MODE), From 1ed70316c5f420876e120e1e40b93e268dcff12f Mon Sep 17 00:00:00 2001 From: Mikhail Naganov Date: Mon, 4 Nov 2019 13:31:02 -0800 Subject: [PATCH 13/19] DO NOT MERGE: audio: Skip tests if audio HAL service lacks "primary" device Non-default audio service (e.g. MSD) is allowed not to have a "primary" device. In this case tests that require it can be skipped. This is Android P specific version of the change. Bug: 139321356 Bug: 141433379 Test: vts-tradefed run commandAndExit vts -m VtsHalAudioV4_0Target on a device with "msd" audio HAL module Change-Id: I3b999664130013294cebd26976a1b18354926a5e --- .../functional/AudioPrimaryHidlHalTest.cpp | 140 +++++++++++++++--- 1 file changed, 118 insertions(+), 22 deletions(-) diff --git a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp index 71d91db360..545d6c0970 100644 --- a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp +++ b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp @@ -155,6 +155,11 @@ class AudioHidlTest : public HidlTest { protected: // Cache the devicesFactory retrieval to speed up each test by ~0.5s static sp devicesFactory; + + static bool isPrimaryDeviceOptional() { + // It's OK not to have "primary" device on non-default audio HAL service. + return environment->getServiceName() != kDefaultServiceName; + } }; sp AudioHidlTest::devicesFactory; @@ -171,19 +176,7 @@ TEST_F(AudioHidlTest, OpenDeviceInvalidParameter) { ASSERT_TRUE(device == nullptr); } -TEST_F(AudioHidlTest, OpenPrimaryDeviceUsingGetDevice) { - doc::test("Calling openDevice(\"primary\") should return the primary device."); - { - Result result; - sp baseDevice; - ASSERT_OK(devicesFactory->openDevice("primary", returnIn(result, baseDevice))); - ASSERT_OK(result); - ASSERT_TRUE(baseDevice != nullptr); - - Return> primaryDevice = IPrimaryDevice::castFrom(baseDevice); - ASSERT_TRUE(primaryDevice.isOk()); - ASSERT_TRUE(sp(primaryDevice) != nullptr); - } // Destroy local IDevice proxy +static void waitForDeviceDestruction() { // FIXME: there is no way to know when the remote IDevice is being destroyed // Binder does not support testing if an object is alive, thus // wait for 100ms to let the binder destruction propagates and @@ -194,6 +187,25 @@ TEST_F(AudioHidlTest, OpenPrimaryDeviceUsingGetDevice) { usleep(100*1000); } +TEST_F(AudioHidlTest, OpenPrimaryDeviceUsingGetDevice) { + doc::test("Calling openDevice(\"primary\") should return the primary device."); + struct WaitExecutor { + ~WaitExecutor() { waitForDeviceDestruction(); } + } waitExecutor; // Make sure we wait for the device destruction on exiting from the test. + Result result; + sp baseDevice; + ASSERT_OK(devicesFactory->openDevice("primary", returnIn(result, baseDevice))); + if (result != Result::OK && isPrimaryDeviceOptional()) { + return SUCCEED() << "No primary device on this factory"; + } + ASSERT_OK(result); + ASSERT_TRUE(baseDevice != nullptr); + + Return> primaryDevice = IPrimaryDevice::castFrom(baseDevice); + ASSERT_TRUE(primaryDevice.isOk()); + ASSERT_TRUE(sp(primaryDevice) != nullptr); +} + ////////////////////////////////////////////////////////////////////////////// /////////////////////////////// openDevice primary /////////////////////////// ////////////////////////////////////////////////////////////////////////////// @@ -204,29 +216,44 @@ class AudioPrimaryHidlTest : public AudioHidlTest { /** Primary HAL test are NOT thread safe. */ void SetUp() override { ASSERT_NO_FATAL_FAILURE(AudioHidlTest::SetUp()); // setup base - if (device == nullptr) { - Result result; - ASSERT_OK(devicesFactory->openPrimaryDevice(returnIn(result, device))); - ASSERT_OK(result); - ASSERT_TRUE(device != nullptr); - - environment->registerTearDown([] { device.clear(); }); + initPrimaryDevice(); + if (device == nullptr && isPrimaryDeviceOptional()) { + return SUCCEED() << "No primary device on this factory"; + } } + ASSERT_TRUE(device != nullptr); } protected: // Cache the device opening to speed up each test by ~0.5s static sp device; + + static void initPrimaryDevice() { + ASSERT_TRUE(devicesFactory != nullptr); + Result result; + ASSERT_OK(devicesFactory->openPrimaryDevice(returnIn(result, device))); + ASSERT_OK(result); + if (device != nullptr) { + environment->registerTearDown([] { device.clear(); }); + } + } }; sp AudioPrimaryHidlTest::device; +#define SKIP_IF_NO_DEVICE \ + if (!device) { \ + doc::partialTest("No primary device on this factory"); \ + return; \ + } \ + TEST_F(AudioPrimaryHidlTest, OpenPrimaryDevice) { doc::test("Test the openDevice (called in SetUp)"); } TEST_F(AudioPrimaryHidlTest, Init) { doc::test("Test that the audio primary hal initialized correctly"); + SKIP_IF_NO_DEVICE; ASSERT_OK(device->initCheck()); } @@ -250,6 +277,7 @@ class AccessorPrimaryHidlTest : public AudioPrimaryHidlTest { void testAccessors(const string& propertyName, const Initial expectedInitial, list valuesToTest, Setter setter, Getter getter, const vector& invalidValues = {}) { + SKIP_IF_NO_DEVICE; const auto expectedResults = {Result::OK, optionality == OPTIONAL ? Result::NOT_SUPPORTED : Result::OK}; @@ -332,6 +360,7 @@ class AudioPatchPrimaryHidlTest : public AudioPrimaryHidlTest { TEST_F(AudioPatchPrimaryHidlTest, AudioPatches) { doc::test("Test if audio patches are supported"); + SKIP_IF_NO_DEVICE; if (!areAudioPatchesSupported()) { doc::partialTest("Audio patches are not supported"); return; @@ -431,6 +460,7 @@ class AudioCaptureConfigPrimaryTest : public AudioConfigPrimaryTest, public ::testing::WithParamInterface { protected: void inputBufferSizeTest(const AudioConfig& audioConfig, bool supportRequired) { + SKIP_IF_NO_DEVICE; uint64_t bufferSize; ASSERT_OK(device->getInputBufferSize(audioConfig, returnIn(res, bufferSize))); @@ -487,6 +517,7 @@ INSTANTIATE_TEST_CASE_P( TEST_F(AudioPrimaryHidlTest, setScreenState) { doc::test("Check that the hal can receive the screen state"); + SKIP_IF_NO_DEVICE; for (bool turnedOn : {false, true, true, false, false}) { ASSERT_RESULT(okOrNotSupported, device->setScreenState(turnedOn)); } @@ -498,6 +529,7 @@ TEST_F(AudioPrimaryHidlTest, setScreenState) { TEST_F(AudioPrimaryHidlTest, getParameters) { doc::test("Check that the hal can set and get parameters"); + SKIP_IF_NO_DEVICE; hidl_vec context; hidl_vec keys; hidl_vec values; @@ -513,6 +545,7 @@ TEST_F(AudioPrimaryHidlTest, getParameters) { TEST_F(AudioPrimaryHidlTest, GetMicrophonesTest) { doc::test("Make sure getMicrophones always succeeds"); + SKIP_IF_NO_DEVICE; hidl_vec microphones; ASSERT_OK(device->getMicrophones(returnIn(res, microphones))); ASSERT_OK(res); @@ -628,16 +661,19 @@ static void testDebugDump(DebugDump debugDump) { TEST_F(AudioPrimaryHidlTest, DebugDump) { doc::test("Check that the hal can dump its state without error"); + SKIP_IF_NO_DEVICE; testDebugDump([](const auto& handle) { return device->debug(handle, {/* options */}); }); } TEST_F(AudioPrimaryHidlTest, DebugDumpInvalidArguments) { doc::test("Check that the hal dump doesn't crash on invalid arguments"); + SKIP_IF_NO_DEVICE; ASSERT_OK(device->debug(hidl_handle(), {/* options */})); } TEST_F(AudioPrimaryHidlTest, SetConnectedState) { doc::test("Check that the HAL can be notified of device connection and deconnection"); + SKIP_IF_NO_DEVICE; using AD = AudioDevice; for (auto deviceType : {AD::OUT_HDMI, AD::OUT_WIRED_HEADPHONE, AD::IN_USB_HEADSET}) { SCOPED_TRACE("device=" + ::testing::PrintToString(deviceType)); @@ -654,6 +690,13 @@ TEST_F(AudioPrimaryHidlTest, SetConnectedState) { ASSERT_OK(ret); } } + + // Because there is no way of knowing if the devices were connected before + // calling setConnectedState, there is no way to restore the HAL to its + // initial state. To workaround this, destroy the HAL at the end of this test. + device.clear(); + waitForDeviceDestruction(); + ASSERT_NO_FATAL_FAILURE(initPrimaryDevice()); } ////////////////////////////////////////////////////////////////////////////// @@ -666,6 +709,7 @@ class OpenStreamTest : public AudioConfigPrimaryTest, protected: template void testOpen(Open openStream, const AudioConfig& config) { + SKIP_IF_NO_DEVICE; // FIXME: Open a stream without an IOHandle // This is not required to be accepted by hal implementations AudioIoHandle ioHandle = (AudioIoHandle)AudioHandleConsts::AUDIO_IO_HANDLE_NONE; @@ -720,6 +764,7 @@ class OpenStreamTest : public AudioConfigPrimaryTest, if (open) { ASSERT_OK(closeStream()); } + AudioConfigPrimaryTest::TearDown(); } protected: @@ -732,8 +777,9 @@ class OpenStreamTest : public AudioConfigPrimaryTest, ////////////////////////////// openOutputStream ////////////////////////////// class OutputStreamTest : public OpenStreamTest { - virtual void SetUp() override { + void SetUp() override { ASSERT_NO_FATAL_FAILURE(OpenStreamTest::SetUp()); // setup base + if (!device && !HasFailure()) return; // do not attempt to use 'device' address.device = AudioDevice::OUT_DEFAULT; const AudioConfig& config = GetParam(); // TODO: test all flag combination @@ -773,8 +819,9 @@ INSTANTIATE_TEST_CASE_P( ////////////////////////////// openInputStream ////////////////////////////// class InputStreamTest : public OpenStreamTest { - virtual void SetUp() override { + void SetUp() override { ASSERT_NO_FATAL_FAILURE(OpenStreamTest::SetUp()); // setup base + if (!device && !HasFailure()) return; // do not attempt to use 'device' address.device = AudioDevice::IN_DEFAULT; const AudioConfig& config = GetParam(); // TODO: test all supported flags and source @@ -825,15 +872,23 @@ static R extract(Return ret) { return ret; } +#define SKIP_IF_NO_STREAM \ + if (!stream) { \ + doc::partialTest("No primary device on this factory"); \ + return; \ + } + /* Could not find a way to write a test for two parametrized class fixure * thus use this macro do duplicate tests for Input and Output stream */ #define TEST_IO_STREAM(test_name, documentation, code) \ TEST_P(InputStreamTest, test_name) { \ doc::test(documentation); \ + SKIP_IF_NO_STREAM; \ code; \ } \ TEST_P(OutputStreamTest, test_name) { \ doc::test(documentation); \ + SKIP_IF_NO_STREAM; \ code; \ } @@ -862,6 +917,7 @@ static void testCapabilityGetter(const string& name, IStream* stream, Return (IStream::*getter)(), Return (IStream::*setter)(Property), bool currentMustBeSupported = true) { + SKIP_IF_NO_STREAM; hidl_vec capabilities; auto ret = capablityGetter(stream, capabilities); if (ret == Result::NOT_SUPPORTED) { @@ -931,6 +987,7 @@ TEST_IO_STREAM(SupportedFormat, "Check that the stream format is declared as sup &IStream::getFormat, &IStream::setFormat)) static void testGetDevices(IStream* stream, AudioDevice expectedDevice) { + SKIP_IF_NO_STREAM; hidl_vec devices; Result res; ASSERT_OK(stream->getDevices(returnIn(res, devices))); @@ -950,6 +1007,7 @@ TEST_IO_STREAM(GetDevices, "Check that the stream device == the one it was opene : testGetDevices(stream.get(), address.device)) static void testSetDevices(IStream* stream, const DeviceAddress& address) { + SKIP_IF_NO_STREAM; DeviceAddress otherAddress = address; otherAddress.device = (address.device & AudioDevice::BIT_IN) == 0 ? AudioDevice::OUT_SPEAKER : AudioDevice::IN_BUILTIN_MIC; @@ -963,6 +1021,7 @@ TEST_IO_STREAM(SetDevices, "Check that the stream can be rerouted to SPEAKER or : testSetDevices(stream.get(), address)) static void testGetAudioProperties(IStream* stream, AudioConfig expectedConfig) { + SKIP_IF_NO_STREAM; uint32_t sampleRateHz; hidl_bitfield mask; AudioFormat format; @@ -984,6 +1043,7 @@ TEST_IO_STREAM(SetHwAvSync, "Try to set hardware sync to an invalid value", ASSERT_RESULT(okOrNotSupportedOrInvalidArgs, stream->setHwAvSync(666))) static void checkGetHwAVSync(IDevice* device) { + SKIP_IF_NO_DEVICE; Result res; AudioHwSync sync; ASSERT_OK(device->getHwAvSync(returnIn(res, sync))); @@ -996,6 +1056,7 @@ TEST_IO_STREAM(GetHwAvSync, "Get hardware sync can not fail", checkGetHwAVSync(d static void checkGetNoParameter(IStream* stream, hidl_vec keys, initializer_list expectedResults) { + SKIP_IF_NO_STREAM; hidl_vec context; hidl_vec parameters; Result res; @@ -1073,6 +1134,7 @@ TEST_IO_STREAM(closeTwice, "Make sure a stream can not be closed twice", waitForStreamDestruction()) static void testCreateTooBigMmapBuffer(IStream* stream) { + SKIP_IF_NO_STREAM; MmapBufferInfo info; Result res; // Assume that int max is a value too big to be allocated @@ -1087,6 +1149,7 @@ TEST_IO_STREAM(CreateTooBigMmapBuffer, "Create mmap buffer too big should fail", testCreateTooBigMmapBuffer(stream.get())) static void testGetMmapPositionOfNonMmapedStream(IStream* stream) { + SKIP_IF_NO_STREAM; Result res; MmapPosition position; ASSERT_OK(stream->getMmapPosition(returnIn(res, position))); @@ -1103,6 +1166,7 @@ TEST_IO_STREAM(GetMmapPositionOfNonMmapedStream, TEST_P(InputStreamTest, GetAudioSource) { doc::test("Retrieving the audio source of an input stream should always succeed"); + SKIP_IF_NO_STREAM; AudioSource source; ASSERT_OK(stream->getAudioSource(returnIn(res, source))); if (res == Result::NOT_SUPPORTED) { @@ -1137,12 +1201,14 @@ static void testOptionalUnitaryGain(std::function(float)> setGain TEST_P(InputStreamTest, SetGain) { doc::test("The gain of an input stream should only be set between [0,1]"); + SKIP_IF_NO_STREAM; testOptionalUnitaryGain([this](float volume) { return stream->setGain(volume); }, "InputStream::setGain"); } static void testPrepareForReading(IStreamIn* stream, uint32_t frameSize, uint32_t framesCount) { Result res; + SKIP_IF_NO_STREAM; // Ignore output parameters as the call should fail ASSERT_OK(stream->prepareForReading(frameSize, framesCount, [&res](auto r, auto&, auto&, auto&, auto&) { res = r; })); @@ -1151,11 +1217,13 @@ static void testPrepareForReading(IStreamIn* stream, uint32_t frameSize, uint32_ TEST_P(InputStreamTest, PrepareForReadingWithZeroBuffer) { doc::test("Preparing a stream for reading with a 0 sized buffer should fail"); + SKIP_IF_NO_STREAM; testPrepareForReading(stream.get(), 0, 0); } TEST_P(InputStreamTest, PrepareForReadingWithHugeBuffer) { doc::test("Preparing a stream for reading with a 2^32 sized buffer should fail"); + SKIP_IF_NO_STREAM; testPrepareForReading(stream.get(), 1, std::numeric_limits::max()); } @@ -1163,12 +1231,14 @@ TEST_P(InputStreamTest, PrepareForReadingCheckOverflow) { doc::test( "Preparing a stream for reading with a overflowing sized buffer should " "fail"); + SKIP_IF_NO_STREAM; auto uintMax = std::numeric_limits::max(); testPrepareForReading(stream.get(), uintMax, uintMax); } TEST_P(InputStreamTest, GetInputFramesLost) { doc::test("The number of frames lost on a never started stream should be 0"); + SKIP_IF_NO_STREAM; auto ret = stream->getInputFramesLost(); ASSERT_IS_OK(ret); uint32_t framesLost{ret}; @@ -1179,6 +1249,7 @@ TEST_P(InputStreamTest, getCapturePosition) { doc::test( "The capture position of a non prepared stream should not be " "retrievable or 0"); + SKIP_IF_NO_STREAM; uint64_t frames; uint64_t time; ASSERT_OK(stream->getCapturePosition(returnIn(res, frames, time))); @@ -1191,6 +1262,7 @@ TEST_P(InputStreamTest, getCapturePosition) { TEST_P(InputStreamTest, updateSinkMetadata) { doc::test("The HAL should not crash on metadata change"); + SKIP_IF_NO_STREAM; hidl_enum_iterator range; // Test all possible track configuration @@ -1217,6 +1289,7 @@ TEST_P(InputStreamTest, updateSinkMetadata) { TEST_P(OutputStreamTest, getLatency) { doc::test("Make sure latency is over 0"); + SKIP_IF_NO_STREAM; auto result = stream->getLatency(); ASSERT_IS_OK(result); ASSERT_GT(result, 0U); @@ -1224,12 +1297,14 @@ TEST_P(OutputStreamTest, getLatency) { TEST_P(OutputStreamTest, setVolume) { doc::test("Try to set the output volume"); + SKIP_IF_NO_STREAM; testOptionalUnitaryGain([this](float volume) { return stream->setVolume(volume, volume); }, "setVolume"); } static void testPrepareForWriting(IStreamOut* stream, uint32_t frameSize, uint32_t framesCount) { Result res; + SKIP_IF_NO_STREAM; // Ignore output parameters as the call should fail ASSERT_OK(stream->prepareForWriting(frameSize, framesCount, [&res](auto r, auto&, auto&, auto&, auto&) { res = r; })); @@ -1238,11 +1313,13 @@ static void testPrepareForWriting(IStreamOut* stream, uint32_t frameSize, uint32 TEST_P(OutputStreamTest, PrepareForWriteWithZeroBuffer) { doc::test("Preparing a stream for writing with a 0 sized buffer should fail"); + SKIP_IF_NO_STREAM; testPrepareForWriting(stream.get(), 0, 0); } TEST_P(OutputStreamTest, PrepareForWriteWithHugeBuffer) { doc::test("Preparing a stream for writing with a 2^32 sized buffer should fail"); + SKIP_IF_NO_STREAM; testPrepareForWriting(stream.get(), 1, std::numeric_limits::max()); } @@ -1250,6 +1327,7 @@ TEST_P(OutputStreamTest, PrepareForWritingCheckOverflow) { doc::test( "Preparing a stream for writing with a overflowing sized buffer should " "fail"); + SKIP_IF_NO_STREAM; auto uintMax = std::numeric_limits::max(); testPrepareForWriting(stream.get(), uintMax, uintMax); } @@ -1270,6 +1348,7 @@ struct Capability { TEST_P(OutputStreamTest, SupportsPauseAndResumeAndDrain) { doc::test("Implementation must expose pause, resume and drain capabilities"); + SKIP_IF_NO_STREAM; Capability(stream.get()); } @@ -1288,6 +1367,7 @@ static void checkInvalidStateOr0(Result res, Value value) { TEST_P(OutputStreamTest, GetRenderPosition) { doc::test("A new stream render position should be 0 or INVALID_STATE"); + SKIP_IF_NO_STREAM; uint32_t dspFrames; ASSERT_OK(stream->getRenderPosition(returnIn(res, dspFrames))); if (res == Result::NOT_SUPPORTED) { @@ -1299,6 +1379,7 @@ TEST_P(OutputStreamTest, GetRenderPosition) { TEST_P(OutputStreamTest, GetNextWriteTimestamp) { doc::test("A new stream next write timestamp should be 0 or INVALID_STATE"); + SKIP_IF_NO_STREAM; uint64_t timestampUs; ASSERT_OK(stream->getNextWriteTimestamp(returnIn(res, timestampUs))); if (res == Result::NOT_SUPPORTED) { @@ -1316,6 +1397,7 @@ class MockOutCallbacks : public IStreamOutCallback { }; static bool isAsyncModeSupported(IStreamOut* stream) { + if (!stream) return false; auto res = stream->setCallback(new MockOutCallbacks); stream->clearCallback(); // try to restore the no callback state, ignore // any error @@ -1352,6 +1434,7 @@ TEST_P(OutputStreamTest, Resume) { doc::test( "If supported, a stream should fail to resume if not previously " "paused"); + SKIP_IF_NO_STREAM; if (!Capability(stream.get()).resume) { doc::partialTest("The output stream does not support resume"); return; @@ -1363,6 +1446,7 @@ TEST_P(OutputStreamTest, Pause) { doc::test( "If supported, a stream should fail to pause if not previously " "started"); + SKIP_IF_NO_STREAM; if (!Capability(stream.get()).pause) { doc::partialTest("The output stream does not support pause"); return; @@ -1380,16 +1464,19 @@ static void testDrain(IStreamOut* stream, AudioDrain type) { TEST_P(OutputStreamTest, DrainAll) { doc::test("If supported, a stream should always succeed to drain"); + SKIP_IF_NO_STREAM; testDrain(stream.get(), AudioDrain::ALL); } TEST_P(OutputStreamTest, DrainEarlyNotify) { doc::test("If supported, a stream should always succeed to drain"); + SKIP_IF_NO_STREAM; testDrain(stream.get(), AudioDrain::EARLY_NOTIFY); } TEST_P(OutputStreamTest, FlushStop) { doc::test("If supported, a stream should always succeed to flush"); + SKIP_IF_NO_STREAM; auto ret = stream->flush(); ASSERT_IS_OK(ret); if (ret == Result::NOT_SUPPORTED) { @@ -1403,6 +1490,7 @@ TEST_P(OutputStreamTest, GetPresentationPositionStop) { doc::test( "If supported, a stream should always succeed to retrieve the " "presentation position"); + SKIP_IF_NO_STREAM; uint64_t frames; TimeSpec mesureTS; ASSERT_OK(stream->getPresentationPosition(returnIn(res, frames, mesureTS))); @@ -1430,11 +1518,13 @@ TEST_P(OutputStreamTest, GetPresentationPositionStop) { TEST_P(OutputStreamTest, SelectPresentation) { doc::test("Verify that presentation selection does not crash"); + SKIP_IF_NO_STREAM; ASSERT_RESULT(okOrNotSupported, stream->selectPresentation(0, 0)); } TEST_P(OutputStreamTest, updateSourceMetadata) { doc::test("The HAL should not crash on metadata change"); + SKIP_IF_NO_STREAM; hidl_enum_iterator usageRange; hidl_enum_iterator contentRange; @@ -1470,11 +1560,13 @@ TEST_P(OutputStreamTest, updateSourceMetadata) { TEST_F(AudioPrimaryHidlTest, setVoiceVolume) { doc::test("Make sure setVoiceVolume only succeed if volume is in [0,1]"); + SKIP_IF_NO_DEVICE; testUnitaryGain([](float volume) { return device->setVoiceVolume(volume); }); } TEST_F(AudioPrimaryHidlTest, setMode) { doc::test("Make sure setMode always succeeds if mode is valid and fails otherwise"); + SKIP_IF_NO_DEVICE; // Test Invalid values for (int mode : {-2, -1, int(AudioMode::IN_COMMUNICATION) + 1}) { ASSERT_RESULT(Result::INVALID_ARGUMENTS, device->setMode(AudioMode(mode))) @@ -1491,6 +1583,7 @@ TEST_F(AudioPrimaryHidlTest, setBtHfpSampleRate) { doc::test( "Make sure setBtHfpSampleRate either succeeds or " "indicates that it is not supported at all, or that the provided value is invalid"); + SKIP_IF_NO_DEVICE; for (auto samplingRate : {8000, 16000, 22050, 24000}) { ASSERT_RESULT(okOrNotSupportedOrInvalidArgs, device->setBtHfpSampleRate(samplingRate)); } @@ -1500,6 +1593,7 @@ TEST_F(AudioPrimaryHidlTest, setBtHfpVolume) { doc::test( "Make sure setBtHfpVolume is either not supported or " "only succeed if volume is in [0,1]"); + SKIP_IF_NO_DEVICE; auto ret = device->setBtHfpVolume(0.0); ASSERT_TRUE(ret.isOk()); if (ret == Result::NOT_SUPPORTED) { @@ -1513,11 +1607,13 @@ TEST_F(AudioPrimaryHidlTest, setBtScoHeadsetDebugName) { doc::test( "Make sure setBtScoHeadsetDebugName either succeeds or " "indicates that it is not supported"); + SKIP_IF_NO_DEVICE; ASSERT_RESULT(okOrNotSupported, device->setBtScoHeadsetDebugName("test")); } TEST_F(AudioPrimaryHidlTest, updateRotation) { doc::test("Check that the hal can receive the current rotation"); + SKIP_IF_NO_DEVICE; for (Rotation rotation : {Rotation::DEG_0, Rotation::DEG_90, Rotation::DEG_180, Rotation::DEG_270, Rotation::DEG_0}) { ASSERT_RESULT(okOrNotSupported, device->updateRotation(rotation)); From 8dd7db3204fb962a555775176a1c498978ec530c Mon Sep 17 00:00:00 2001 From: Alistair Delva Date: Thu, 3 Oct 2019 06:48:29 -0700 Subject: [PATCH 14/19] Merge "Make gralloc0 pass-through mapper handle layers" am: b756e23e5c am: 6982d1aec5 am: 6c3213c940 am: afc577e108 Change-Id: I54f2e01bf8ffe43dbe786a73cfd7818661230090 (cherry picked from commit fc8d43692ca2d42c4de724e7992d2ca5478b81f2) --- .../passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h b/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h index 18fbb6d035..8540068a48 100644 --- a/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h +++ b/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h @@ -37,6 +37,10 @@ class Gralloc0HalImpl : public V2_0::passthrough::detail::Gralloc0HalImpl { Error validateBufferSize(const native_handle_t* bufferHandle, const IMapper::BufferDescriptorInfo& descriptorInfo, uint32_t stride) override { + if (descriptorInfo.layerCount != 1) { + return Error::BAD_VALUE; + } + if (!mModule->validateBufferSize) { return Error::NONE; } From d79abbb0d359dd037e6d0b4cc022fd26cea3ebaa Mon Sep 17 00:00:00 2001 From: Robert Shih Date: Sun, 17 Nov 2019 23:54:21 -0800 Subject: [PATCH 15/19] default drm hidl: Fix decrypt destination base ptr Bug: 144351324 Test: poc_CryptoPlugin_155 Change-Id: Id5d221cd6978d55c46c0368aceb10c1d2f559fd9 --- drm/1.0/default/CryptoPlugin.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/drm/1.0/default/CryptoPlugin.cpp b/drm/1.0/default/CryptoPlugin.cpp index 6626c0172f..c9383ff0fc 100644 --- a/drm/1.0/default/CryptoPlugin.cpp +++ b/drm/1.0/default/CryptoPlugin.cpp @@ -143,6 +143,7 @@ namespace implementation { return Void(); } + base = static_cast(static_cast(destBase->getPointer())); destPtr = static_cast(base + destination.nonsecureMemory.offset); } else if (destination.type == BufferType::NATIVE_HANDLE) { if (!secure) { From dbce8697bd270b7144e9c43db44b0bca7b3eec16 Mon Sep 17 00:00:00 2001 From: Prachi Hande Date: Thu, 14 Nov 2019 17:57:44 -0800 Subject: [PATCH 16/19] VmsUtils: Add new methods to parse availability state messages These methods are needed in the HAL client to parse sequence number and associated layers when the availablity of the VMS layers change. HAL client relies on these messages by either explicitly requesting the availability or through availability change message sent by the Car service. Bug: 144434783 Fixes: 144434783 Test: Added new tests for the new methods. Ran the tests on Hawk. Change-Id: I09497640367a894e1dfb6143ac3bbdb63d64b53e --- .../common/include/vhal_v2_0/VmsUtils.h | 26 +++- .../2.0/default/common/src/VmsUtils.cpp | 114 ++++++++++++--- .../2.0/default/tests/VmsUtils_test.cpp | 134 +++++++++++++++++- 3 files changed, 247 insertions(+), 27 deletions(-) diff --git a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h index 8ee3c545dc..f8b10cacf8 100644 --- a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h +++ b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h @@ -61,7 +61,7 @@ struct VmsLayer { struct VmsLayerAndPublisher { VmsLayerAndPublisher(VmsLayer layer, int publisher_id) - : layer(layer), publisher_id(publisher_id) {} + : layer(std::move(layer)), publisher_id(publisher_id) {} VmsLayer layer; int publisher_id; }; @@ -69,6 +69,8 @@ struct VmsLayerAndPublisher { // A VmsAssociatedLayer is used by subscribers to specify which publisher IDs // are acceptable for a given layer. struct VmsAssociatedLayer { + VmsAssociatedLayer(VmsLayer layer, std::vector publisher_ids) + : layer(std::move(layer)), publisher_ids(std::move(publisher_ids)) {} VmsLayer layer; std::vector publisher_ids; }; @@ -77,7 +79,7 @@ struct VmsAssociatedLayer { // its dependencies. Dependencies can be empty. struct VmsLayerOffering { VmsLayerOffering(VmsLayer layer, std::vector dependencies) - : layer(layer), dependencies(dependencies) {} + : layer(std::move(layer)), dependencies(std::move(dependencies)) {} VmsLayerOffering(VmsLayer layer) : layer(layer), dependencies() {} VmsLayer layer; std::vector dependencies; @@ -87,7 +89,7 @@ struct VmsLayerOffering { // with the specified publisher ID. struct VmsOffers { VmsOffers(int publisher_id, std::vector offerings) - : publisher_id(publisher_id), offerings(offerings) {} + : publisher_id(publisher_id), offerings(std::move(offerings)) {} int publisher_id; std::vector offerings; }; @@ -231,6 +233,24 @@ VmsSessionStatus parseStartSessionMessage(const VehiclePropValue& start_session, const int current_service_id, const int current_client_id, int* new_service_id); +// Returns true if the new sequence number of the availability state message is greater than +// the last seen availability sequence number. +bool isAvailabilitySequenceNumberNewer(const VehiclePropValue& availability_state, + const int last_seen_availability_sequence_number); + +// Returns sequence number of the availability state message. +int32_t getSequenceNumberForAvailabilityState(const VehiclePropValue& availability_state); + +// Takes a availability state message and returns the associated layers that are +// available to publish data. +// +// A subscriber can use this function when receiving an availability response or availability +// change message to determine which associated layers are ready to publish data. +// The caller of this function can optionally decide to not consume these layers +// if the availability change has the sequence number less than the last seen +// sequence number. +std::vector getAvailableLayers(const VehiclePropValue& availability_state); + } // namespace vms } // namespace V2_0 } // namespace vehicle diff --git a/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp b/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp index 9eba905901..a65cded261 100644 --- a/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp +++ b/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp @@ -219,12 +219,9 @@ std::vector getSubscribedLayers(const VehiclePropValue& subscriptions_ if (isValidVmsMessage(subscriptions_state) && (parseMessageType(subscriptions_state) == VmsMessageType::SUBSCRIPTIONS_CHANGE || parseMessageType(subscriptions_state) == VmsMessageType::SUBSCRIPTIONS_RESPONSE) && - subscriptions_state.value.int32Values.size() > kSubscriptionStateSequenceNumberIndex) { - const int32_t num_of_layers = subscriptions_state.value.int32Values[toInt( - VmsSubscriptionsStateIntegerValuesIndex::NUMBER_OF_LAYERS)]; - const int32_t num_of_associated_layers = subscriptions_state.value.int32Values[toInt( - VmsSubscriptionsStateIntegerValuesIndex ::NUMBER_OF_ASSOCIATED_LAYERS)]; - + subscriptions_state.value.int32Values.size() > + toInt(VmsSubscriptionsStateIntegerValuesIndex::NUMBER_OF_LAYERS)) { + int subscriptions_state_int_size = subscriptions_state.value.int32Values.size(); std::unordered_set offered_layers; for (const auto& offer : offers.offerings) { offered_layers.insert(offer.layer); @@ -232,33 +229,52 @@ std::vector getSubscribedLayers(const VehiclePropValue& subscriptions_ std::vector subscribed_layers; int current_index = toInt(VmsSubscriptionsStateIntegerValuesIndex::SUBSCRIPTIONS_START); + // Add all subscribed layers which are offered by the current publisher. + const int32_t num_of_layers = subscriptions_state.value.int32Values[toInt( + VmsSubscriptionsStateIntegerValuesIndex::NUMBER_OF_LAYERS)]; for (int i = 0; i < num_of_layers; i++) { + if (subscriptions_state_int_size < current_index + kLayerSize) { + return {}; + } VmsLayer layer = VmsLayer(subscriptions_state.value.int32Values[current_index], subscriptions_state.value.int32Values[current_index + 1], subscriptions_state.value.int32Values[current_index + 2]); if (offered_layers.find(layer) != offered_layers.end()) { - subscribed_layers.push_back(layer); + subscribed_layers.push_back(std::move(layer)); } current_index += kLayerSize; } + // Add all subscribed associated layers which are offered by the current publisher. // For this, we need to check if the associated layer has a publisher ID which is // same as that of the current publisher. - for (int i = 0; i < num_of_associated_layers; i++) { - VmsLayer layer = VmsLayer(subscriptions_state.value.int32Values[current_index], - subscriptions_state.value.int32Values[current_index + 1], - subscriptions_state.value.int32Values[current_index + 2]); - current_index += kLayerSize; - if (offered_layers.find(layer) != offered_layers.end()) { - int32_t num_of_publisher_ids = subscriptions_state.value.int32Values[current_index]; - current_index++; - for (int j = 0; j < num_of_publisher_ids; j++) { - if (subscriptions_state.value.int32Values[current_index] == - offers.publisher_id) { - subscribed_layers.push_back(layer); - } + if (subscriptions_state_int_size > + toInt(VmsSubscriptionsStateIntegerValuesIndex::NUMBER_OF_ASSOCIATED_LAYERS)) { + const int32_t num_of_associated_layers = subscriptions_state.value.int32Values[toInt( + VmsSubscriptionsStateIntegerValuesIndex::NUMBER_OF_ASSOCIATED_LAYERS)]; + + for (int i = 0; i < num_of_associated_layers; i++) { + if (subscriptions_state_int_size < current_index + kLayerSize) { + return {}; + } + VmsLayer layer = VmsLayer(subscriptions_state.value.int32Values[current_index], + subscriptions_state.value.int32Values[current_index + 1], + subscriptions_state.value.int32Values[current_index + 2]); + current_index += kLayerSize; + if (offered_layers.find(layer) != offered_layers.end() && + subscriptions_state_int_size > current_index) { + int32_t num_of_publisher_ids = + subscriptions_state.value.int32Values[current_index]; current_index++; + for (int j = 0; j < num_of_publisher_ids; j++) { + if (subscriptions_state_int_size > current_index && + subscriptions_state.value.int32Values[current_index] == + offers.publisher_id) { + subscribed_layers.push_back(std::move(layer)); + } + current_index++; + } } } } @@ -300,6 +316,64 @@ VmsSessionStatus parseStartSessionMessage(const VehiclePropValue& start_session, return VmsSessionStatus::kInvalidMessage; } +bool isAvailabilitySequenceNumberNewer(const VehiclePropValue& availability_state, + const int last_seen_availability_sequence_number) { + return (isValidVmsMessage(availability_state) && + (parseMessageType(availability_state) == VmsMessageType::AVAILABILITY_CHANGE || + parseMessageType(availability_state) == VmsMessageType::AVAILABILITY_RESPONSE) && + availability_state.value.int32Values.size() > kAvailabilitySequenceNumberIndex && + availability_state.value.int32Values[kAvailabilitySequenceNumberIndex] > + last_seen_availability_sequence_number); +} + +int32_t getSequenceNumberForAvailabilityState(const VehiclePropValue& availability_state) { + if (isValidVmsMessage(availability_state) && + (parseMessageType(availability_state) == VmsMessageType::AVAILABILITY_CHANGE || + parseMessageType(availability_state) == VmsMessageType::AVAILABILITY_RESPONSE) && + availability_state.value.int32Values.size() > kAvailabilitySequenceNumberIndex) { + return availability_state.value.int32Values[kAvailabilitySequenceNumberIndex]; + } + return -1; +} + +std::vector getAvailableLayers(const VehiclePropValue& availability_state) { + if (isValidVmsMessage(availability_state) && + (parseMessageType(availability_state) == VmsMessageType::AVAILABILITY_CHANGE || + parseMessageType(availability_state) == VmsMessageType::AVAILABILITY_RESPONSE) && + availability_state.value.int32Values.size() > + toInt(VmsAvailabilityStateIntegerValuesIndex::NUMBER_OF_ASSOCIATED_LAYERS)) { + int availability_state_int_size = availability_state.value.int32Values.size(); + const int32_t num_of_associated_layers = availability_state.value.int32Values[toInt( + VmsAvailabilityStateIntegerValuesIndex::NUMBER_OF_ASSOCIATED_LAYERS)]; + int current_index = toInt(VmsAvailabilityStateIntegerValuesIndex::LAYERS_START); + std::vector available_layers; + for (int i = 0; i < num_of_associated_layers; i++) { + if (availability_state_int_size < current_index + kLayerSize) { + return {}; + } + VmsLayer layer = VmsLayer(availability_state.value.int32Values[current_index], + availability_state.value.int32Values[current_index + 1], + availability_state.value.int32Values[current_index + 2]); + current_index += kLayerSize; + std::vector publisher_ids; + if (availability_state_int_size > current_index) { + int32_t num_of_publisher_ids = availability_state.value.int32Values[current_index]; + current_index++; + for (int j = 0; j < num_of_publisher_ids; j++) { + if (availability_state_int_size > current_index) { + publisher_ids.push_back( + availability_state.value.int32Values[current_index]); + current_index++; + } + } + } + available_layers.emplace_back(layer, std::move(publisher_ids)); + } + return available_layers; + } + return {}; +} + } // namespace vms } // namespace V2_0 } // namespace vehicle diff --git a/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp b/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp index 8b547f1733..a48d19ca58 100644 --- a/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp +++ b/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp @@ -279,7 +279,7 @@ void testSubscribedLayers(VmsMessageType type) { VmsOffers offers = {123, {VmsLayerOffering(VmsLayer(1, 0, 1), {VmsLayer(4, 1, 1)}), VmsLayerOffering(VmsLayer(2, 0, 1))}}; - auto message = createBaseVmsMessage(2); + auto message = createBaseVmsMessage(16); message->value.int32Values = hidl_vec{toInt(type), 1234, // sequence number 2, // number of layers @@ -308,9 +308,28 @@ TEST(VmsUtilsTest, subscribedLayersForResponse) { testSubscribedLayers(VmsMessageType::SUBSCRIPTIONS_RESPONSE); } +void testGetSubscribedLayersMalformedData(VmsMessageType type) { + VmsOffers offers = {123, + {VmsLayerOffering(VmsLayer(1, 0, 1), {VmsLayer(4, 1, 1)}), + VmsLayerOffering(VmsLayer(2, 0, 1))}}; + auto message = createBaseVmsMessage(2); + message->value.int32Values = hidl_vec{toInt(type), 1234}; // sequence number + EXPECT_TRUE(isValidVmsMessage(*message)); + auto result = getSubscribedLayers(*message, offers); + EXPECT_EQ(static_cast(result.size()), 0); +} + +TEST(VmsUtilsTest, subscribedLayersForMalformedChange) { + testGetSubscribedLayersMalformedData(VmsMessageType::SUBSCRIPTIONS_CHANGE); +} + +TEST(VmsUtilsTest, subscribedLayersForMalformedResponse) { + testGetSubscribedLayersMalformedData(VmsMessageType::SUBSCRIPTIONS_RESPONSE); +} + void testSubscribedLayersWithDifferentSubtype(VmsMessageType type) { VmsOffers offers = {123, {VmsLayerOffering(VmsLayer(1, 0, 1))}}; - auto message = createBaseVmsMessage(2); + auto message = createBaseVmsMessage(7); message->value.int32Values = hidl_vec{toInt(type), 1234, // sequence number 1, // number of layers @@ -332,7 +351,7 @@ TEST(VmsUtilsTest, subscribedLayersWithDifferentSubtypeForResponse) { void subscribedLayersWithDifferentVersion(VmsMessageType type) { VmsOffers offers = {123, {VmsLayerOffering(VmsLayer(1, 0, 1))}}; - auto message = createBaseVmsMessage(2); + auto message = createBaseVmsMessage(7); message->value.int32Values = hidl_vec{toInt(type), 1234, // sequence number 1, // number of layers @@ -353,7 +372,7 @@ TEST(VmsUtilsTest, subscribedLayersWithDifferentVersionForResponse) { void subscribedLayersWithDifferentPublisherId(VmsMessageType type) { VmsOffers offers = {123, {VmsLayerOffering(VmsLayer(1, 0, 1))}}; - auto message = createBaseVmsMessage(2); + auto message = createBaseVmsMessage(9); message->value.int32Values = hidl_vec{toInt(type), 1234, // sequence number 0, // number of layers @@ -475,6 +494,113 @@ TEST(VmsUtilsTest, startSessionInvalidMessageFormat) { EXPECT_EQ(new_service_id, 123); } +TEST(VmsUtilsTest, newAvailabilitySequenceNumberForExistingSmallerNumberForChange) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_CHANGE), 1234}; + EXPECT_TRUE(isAvailabilitySequenceNumberNewer(*message, 1233)); +} + +TEST(VmsUtilsTest, newAvailabilitySequenceNumberForExistingSmallerNumberForResponse) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_RESPONSE), 1234}; + EXPECT_TRUE(isAvailabilitySequenceNumberNewer(*message, 1233)); +} + +TEST(VmsUtilsTest, newAvailabilitySequenceNumberForExistingGreaterNumberForChange) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_CHANGE), 1234}; + EXPECT_FALSE(isAvailabilitySequenceNumberNewer(*message, 1235)); +} + +TEST(VmsUtilsTest, newAvailabilitySequenceNumberForExistingGreaterNumberForResponse) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_RESPONSE), 1234}; + EXPECT_FALSE(isAvailabilitySequenceNumberNewer(*message, 1235)); +} + +TEST(VmsUtilsTest, newAvailabilitySequenceNumberForSameNumberForChange) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_CHANGE), 1234}; + EXPECT_FALSE(isAvailabilitySequenceNumberNewer(*message, 1234)); +} + +TEST(VmsUtilsTest, newAvailabilitySequenceNumberForSameNumberForResponse) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_RESPONSE), 1234}; + EXPECT_FALSE(isAvailabilitySequenceNumberNewer(*message, 1234)); +} + +TEST(VmsUtilsTest, validSequenceNumberForAvailabilityChange) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_CHANGE), 1234}; + EXPECT_EQ(getSequenceNumberForAvailabilityState(*message), 1234); +} + +TEST(VmsUtilsTest, validSequenceNumberForAvailabilityResponse) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = + hidl_vec{toInt(VmsMessageType::AVAILABILITY_RESPONSE), 1234}; + EXPECT_EQ(getSequenceNumberForAvailabilityState(*message), 1234); +} + +TEST(VmsUtilsTest, invalidAvailabilityState) { + auto message = createBaseVmsMessage(1); + EXPECT_EQ(getSequenceNumberForAvailabilityState(*message), -1); +} + +void testGetAvailableLayers(VmsMessageType type) { + auto message = createBaseVmsMessage(13); + message->value.int32Values = hidl_vec{toInt(type), + 1234, // sequence number + 2, // number of associated layers + 1, // associated layer 1 + 0, 1, + 2, // number of publisher IDs + 111, // publisher IDs + 123, + 2, // associated layer 2 + 0, 1, 0}; // number of publisher IDs + EXPECT_TRUE(isValidVmsMessage(*message)); + auto result = getAvailableLayers(*message); + EXPECT_EQ(static_cast(result.size()), 2); + EXPECT_EQ(result.at(0).layer, VmsLayer(1, 0, 1)); + EXPECT_EQ(result.at(0).publisher_ids.at(0), 111); + EXPECT_EQ(result.at(0).publisher_ids.at(1), 123); + EXPECT_EQ(result.at(1).layer, VmsLayer(2, 0, 1)); + EXPECT_EQ(static_cast(result.at(1).publisher_ids.size()), 0); +} + +TEST(VmsUtilsTest, availableLayersForChange) { + testGetAvailableLayers(VmsMessageType::AVAILABILITY_CHANGE); +} + +TEST(VmsUtilsTest, availableLayersForResponse) { + testGetAvailableLayers(VmsMessageType::AVAILABILITY_RESPONSE); +} + +void testGetAvailableLayersMalformedData(VmsMessageType type) { + auto message = createBaseVmsMessage(2); + message->value.int32Values = hidl_vec{toInt(type), 1234}; // sequence number + EXPECT_TRUE(isValidVmsMessage(*message)); + auto result = getAvailableLayers(*message); + EXPECT_EQ(static_cast(result.size()), 0); +} + +TEST(VmsUtilsTest, availableLayersForMalformedChange) { + testGetAvailableLayersMalformedData(VmsMessageType::AVAILABILITY_CHANGE); +} + +TEST(VmsUtilsTest, availableLayersForMalformedResponse) { + testGetAvailableLayersMalformedData(VmsMessageType::AVAILABILITY_RESPONSE); +} + } // namespace } // namespace vms From e6111852bc79d903848c72c353be7bde0e1e95f2 Mon Sep 17 00:00:00 2001 From: Mikhail Naganov Date: Tue, 26 Nov 2019 18:56:21 -0800 Subject: [PATCH 17/19] DO NOT MERGE: Audio HAL: do not test input stream if no Built-in mic on primary The test used to always test input stream, assuming that all devices had built-in device on the primary Module. Nevertheless, although uncommon, the mic could be on any module or even not exist. This patch makes sure that the input stream tests are only run if there is a Built-in mic on the primary module. This patch also fixes GetMicrophonesTest to accept NOT_SUPPORTED result. This patch is specific for Android P. Later versions already have these fixes. Bug: 114303641 Test: atest VtsHalAudioV4_0TargetTest on device with a built-in mic and on a device w/o Change-Id: I7289724e5a73c1ffd09ca990f681844bdc8f6b3e --- .../utility/include/utility/ValidateXml.h | 7 ++ .../test/utility/src/ValidateXml.cpp | 73 +++++++++++++++---- audio/core/4.0/vts/functional/Android.bp | 1 + .../functional/AudioPolicyConfiguration.cpp | 26 +++++++ .../vts/functional/AudioPolicyConfiguration.h | 22 ++++++ .../functional/AudioPrimaryHidlHalTest.cpp | 29 ++++++++ .../functional/ValidateAudioConfiguration.cpp | 7 +- 7 files changed, 148 insertions(+), 17 deletions(-) create mode 100644 audio/core/4.0/vts/functional/AudioPolicyConfiguration.cpp create mode 100644 audio/core/4.0/vts/functional/AudioPolicyConfiguration.h diff --git a/audio/common/all-versions/test/utility/include/utility/ValidateXml.h b/audio/common/all-versions/test/utility/include/utility/ValidateXml.h index 91adfc12c8..4abd3fa8e1 100644 --- a/audio/common/all-versions/test/utility/include/utility/ValidateXml.h +++ b/audio/common/all-versions/test/utility/include/utility/ValidateXml.h @@ -34,6 +34,10 @@ namespace utility { ::testing::AssertionResult validateXml(const char* xmlFilePathExpr, const char* xsdFilePathExpr, const char* xmlFilePath, const char* xsdFilePath); +std::vector findValidXmlFiles(const char* xsdFilePathExpr, + const char* xmlFileName, std::vector xmlFileLocations, const char* xsdFilePath, + std::vector* errors = nullptr); + /** Helper gtest ASSERT to test XML validity against an XSD. */ #define ASSERT_VALID_XML(xmlFilePath, xsdFilePath) \ ASSERT_PRED_FORMAT2(::android::hardware::audio::common::test::utility::validateXml, \ @@ -78,6 +82,9 @@ template ::android::hardware::audio::common::test::utility::validateXmlMultipleLocations, \ xmlFileName, xmlFileLocations, xsdFilePath) +::testing::AssertionResult isNonEmptyXpath( + const char* xmlFilePath, const char* xpathQuery, bool* result); + } // namespace utility } // namespace test } // namespace common diff --git a/audio/common/all-versions/test/utility/src/ValidateXml.cpp b/audio/common/all-versions/test/utility/src/ValidateXml.cpp index 1a906d668b..126873d6f0 100644 --- a/audio/common/all-versions/test/utility/src/ValidateXml.cpp +++ b/audio/common/all-versions/test/utility/src/ValidateXml.cpp @@ -23,6 +23,8 @@ #include #define LIBXML_XINCLUDE_ENABLED #include +#define LIBXML_XPATH_ENABLED +#include #include #include @@ -47,6 +49,10 @@ template <> constexpr auto xmlDeleter = xmlSchemaFreeParserCtxt; template <> constexpr auto xmlDeleter = xmlSchemaFreeValidCtxt; +template <> +constexpr auto xmlDeleter = xmlXPathFreeContext; +template <> +constexpr auto xmlDeleter = xmlXPathFreeObject; /** @return a unique_ptr with the correct deleter for the libxml2 object. */ template @@ -129,6 +135,28 @@ struct Libxml2Global { return ::testing::AssertionSuccess(); } +std::vector findValidXmlFiles( + const char* xsdFilePathExpr, + const char* xmlFileName, std::vector xmlFileLocations, const char* xsdFilePath, + std::vector* errors) { + using namespace std::string_literals; + std::vector foundFiles; + for (const char* location : xmlFileLocations) { + std::string xmlFilePath = location + "/"s + xmlFileName; + if (access(xmlFilePath.c_str(), F_OK) != 0) { + // If the file does not exist ignore this location and fallback on the next one + continue; + } + auto result = validateXml("xmlFilePath", xsdFilePathExpr, xmlFilePath.c_str(), xsdFilePath); + if (!result) { + if (errors != nullptr) errors->push_back(result.message()); + } else { + foundFiles.push_back(xmlFilePath); + } + } + return foundFiles; +} + template ::testing::AssertionResult validateXmlMultipleLocations( const char* xmlFileNameExpr, const char* xmlFileLocationsExpr, const char* xsdFilePathExpr, @@ -136,20 +164,8 @@ template using namespace std::string_literals; std::vector errors; - std::vector foundFiles; - - for (const char* location : xmlFileLocations) { - std::string xmlFilePath = location + "/"s + xmlFileName; - if (access(xmlFilePath.c_str(), F_OK) != 0) { - // If the file does not exist ignore this location and fallback on the next one - continue; - } - foundFiles.push_back(" " + xmlFilePath + '\n'); - auto result = validateXml("xmlFilePath", xsdFilePathExpr, xmlFilePath.c_str(), xsdFilePath); - if (!result) { - errors.push_back(result.message()); - } - } + std::vector foundFiles = findValidXmlFiles( + xsdFilePathExpr, xmlFileName, xmlFileLocations, xsdFilePath, &errors); if (atLeastOneRequired && foundFiles.empty()) { errors.push_back("No xml file found in provided locations.\n"); @@ -175,6 +191,35 @@ template ::testing::AssertionResult validateXmlMultipleLocations(const ch std::vector, const char*); +::testing::AssertionResult isNonEmptyXpath( + const char* xmlFilePath, const char* xpathQuery, bool* result) { + Libxml2Global libxml2; + + auto context = [&]() { + return std::string() + " In: " + xmlFilePath + "\nLibxml2 errors:\n" + libxml2.getErrors(); + }; + + auto doc = make_xmlUnique(xmlReadFile(xmlFilePath, nullptr, 0)); + if (doc == nullptr) { + return ::testing::AssertionFailure() << "Failed to parse xml\n" << context(); + } + if (xmlXIncludeProcess(doc.get()) == -1) { + return ::testing::AssertionFailure() << "Failed to resolve xincludes in xml\n" << context(); + } + auto xpathCtxt = make_xmlUnique(xmlXPathNewContext(doc.get())); + if (xpathCtxt == nullptr) { + return ::testing::AssertionFailure() << "Failed to create xpath context\n" << context(); + } + auto xpathObj = make_xmlUnique(xmlXPathEvalExpression(BAD_CAST xpathQuery, xpathCtxt.get())); + if (xpathObj == nullptr) { + return ::testing::AssertionFailure() << + "Failed to evaluate xpath: \'" << xpathQuery << "\'\n" << context(); + } + auto nodeSet = xpathObj.get()->nodesetval; + *result = nodeSet ? nodeSet->nodeNr != 0 : false; + return ::testing::AssertionSuccess(); +} + } // namespace utility } // namespace test } // namespace common diff --git a/audio/core/4.0/vts/functional/Android.bp b/audio/core/4.0/vts/functional/Android.bp index e3b376ca88..48a98b1feb 100644 --- a/audio/core/4.0/vts/functional/Android.bp +++ b/audio/core/4.0/vts/functional/Android.bp @@ -18,6 +18,7 @@ cc_test { name: "VtsHalAudioV4_0TargetTest", defaults: ["VtsHalTargetTestDefaults"], srcs: [ + "AudioPolicyConfiguration.cpp", "AudioPrimaryHidlHalTest.cpp", "ValidateAudioConfiguration.cpp" ], diff --git a/audio/core/4.0/vts/functional/AudioPolicyConfiguration.cpp b/audio/core/4.0/vts/functional/AudioPolicyConfiguration.cpp new file mode 100644 index 0000000000..254c018e37 --- /dev/null +++ b/audio/core/4.0/vts/functional/AudioPolicyConfiguration.cpp @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "AudioPolicyConfiguration.h" + +const char* kAudioPolicyConfigurationXml = "audio_policy_configuration.xml"; +const char* kAudioPolicyConfigurationXsd = + "/data/local/tmp/audio_policy_configuration_V4_0.xsd"; + +const std::vector& getApmConfigLocations() { + static const std::vector locations = {"/odm/etc", "/vendor/etc", "/system/etc"}; + return locations; +} diff --git a/audio/core/4.0/vts/functional/AudioPolicyConfiguration.h b/audio/core/4.0/vts/functional/AudioPolicyConfiguration.h new file mode 100644 index 0000000000..13a62ed730 --- /dev/null +++ b/audio/core/4.0/vts/functional/AudioPolicyConfiguration.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +extern const char* kAudioPolicyConfigurationXml; +extern const char* kAudioPolicyConfigurationXsd; + +const std::vector& getApmConfigLocations(); diff --git a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp index 71d91db360..308a4b51d1 100644 --- a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp +++ b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp @@ -45,12 +45,14 @@ #include +#include "AudioPolicyConfiguration.h" #include "utility/AssertOk.h" #include "utility/Documentation.h" #include "utility/EnvironmentTearDown.h" #define AUDIO_HAL_VERSION V4_0 #include "utility/PrettyPrintAudioTypes.h" #include "utility/ReturnIn.h" +#include "utility/ValidateXml.h" using std::initializer_list; using std::string; @@ -348,8 +350,29 @@ TEST_F(AudioPatchPrimaryHidlTest, AudioPatches) { /////////// TODO: move to the beginning of the file for easier update //////// ////////////////////////////////////////////////////////////////////////////// +static void hasDeviceTypeInModule( + const std::string& module, const std::string& device, bool* result) { + const std::vector configs = findValidXmlFiles( + "", kAudioPolicyConfigurationXml, getApmConfigLocations(), + kAudioPolicyConfigurationXsd); + *result = true; // If could not get the information, run all tests + ASSERT_EQ(1U, configs.size()); + std::string query = "/audioPolicyConfiguration/modules/module[@name=\"" + module + "\"]" + + "/devicePorts/devicePort[@type=\"" + device + "\"]"; + ASSERT_NO_FATAL_FAILURE(isNonEmptyXpath(configs[0].c_str(), query.c_str(), result)); +} + class AudioConfigPrimaryTest : public AudioPatchPrimaryHidlTest { public: + static bool primaryHasMic() { + static const bool hasMic = []() { + bool result; + hasDeviceTypeInModule("primary", "AUDIO_DEVICE_IN_BUILTIN_MIC", &result); + return result; + }(); + return hasMic; + } + // Cache result ? static const vector getRequiredSupportPlaybackAudioConfig() { return combineAudioConfig({AudioChannelMask::OUT_STEREO, AudioChannelMask::OUT_MONO}, @@ -369,10 +392,12 @@ class AudioConfigPrimaryTest : public AudioPatchPrimaryHidlTest { } static const vector getRequiredSupportCaptureAudioConfig() { + if (!primaryHasMic()) return {}; return combineAudioConfig({AudioChannelMask::IN_MONO}, {8000, 11025, 16000, 44100}, {AudioFormat::PCM_16_BIT}); } static const vector getRecommendedSupportCaptureAudioConfig() { + if (!primaryHasMic()) return {}; return combineAudioConfig({AudioChannelMask::IN_STEREO}, {22050, 48000}, {AudioFormat::PCM_16_BIT}); } @@ -515,6 +540,10 @@ TEST_F(AudioPrimaryHidlTest, GetMicrophonesTest) { doc::test("Make sure getMicrophones always succeeds"); hidl_vec microphones; ASSERT_OK(device->getMicrophones(returnIn(res, microphones))); + if (res == Result::NOT_SUPPORTED) { + doc::partialTest("getMicrophones is not supported"); + return; + } ASSERT_OK(res); if (microphones.size() > 0) { // When there is microphone on the phone, try to open an input stream diff --git a/audio/core/4.0/vts/functional/ValidateAudioConfiguration.cpp b/audio/core/4.0/vts/functional/ValidateAudioConfiguration.cpp index a64513fc81..7d929ce560 100644 --- a/audio/core/4.0/vts/functional/ValidateAudioConfiguration.cpp +++ b/audio/core/4.0/vts/functional/ValidateAudioConfiguration.cpp @@ -18,13 +18,14 @@ #include #include "utility/ValidateXml.h" +#include "AudioPolicyConfiguration.h" TEST(CheckConfig, audioPolicyConfigurationValidation) { RecordProperty("description", "Verify that the audio policy configuration file " "is valid according to the schema"); - std::vector locations = {"/odm/etc", "/vendor/etc", "/system/etc"}; - EXPECT_ONE_VALID_XML_MULTIPLE_LOCATIONS("audio_policy_configuration.xml", locations, - "/data/local/tmp/audio_policy_configuration_V4_0.xsd"); + EXPECT_ONE_VALID_XML_MULTIPLE_LOCATIONS( + kAudioPolicyConfigurationXml, getApmConfigLocations(), + kAudioPolicyConfigurationXsd); } From aadb064fa139fdc13f30fd9938f381c6a1ebf516 Mon Sep 17 00:00:00 2001 From: Hayden Gomes Date: Thu, 17 Oct 2019 14:53:11 -0700 Subject: [PATCH 18/19] Adding audiocontrol and evs hashes to current.txt - Added hashes for IAudioControl, IEvsCamera, IEvsCameraStream, IEvsDisplay, IEvsEnumeration, and evs@1.0::types Bug: 142877791 Test: Ran vts module VtsTrebleVendorVintfTest Change-Id: I542492b7cfa52a3e01ef2f24efe5a30f679e2a15 --- current.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/current.txt b/current.txt index 47e7d721cc..221ca20901 100644 --- a/current.txt +++ b/current.txt @@ -298,7 +298,13 @@ a91b547f5922f39fe4231d97fac1c3825c1c1b0c8ef7a5136689ceed37e8bfe9 android.hardwar 3661fa0623056922fdc4235ac5a9c91a2d066ab6f1ab4297e3b240fe302ba500 android.hardware.audio.effect@4.0::IPresetReverbEffect e88e520f8c98a62fccd8d5316c6687808f775de145d1405a7a9a66587ee6a001 android.hardware.audio.effect@4.0::IVirtualizerEffect fe28829dab10d171783b79ac9cc45412739f8ff275e90228d7c6370ef189b859 android.hardware.audio.effect@4.0::IVisualizerEffect -21c8a702579356480236c6851b5b2c16b9bd369ce12bdd6ffdc4626a89f34f73 android.hardware.audio.effect@4.0::types +21c8a702579356480236c6851b5b2c16b9bd369ce12bdd6ffdc4626a89f34f73 android.hardware.audio.effect@4.0::types +a0f93c768c353cecee6237fe479bce47404eb10b629fafe07e32a054fd67f2af android.hardware.automotive.audiocontrol@1.0::IAudioControl +f2904a4c108ad1b93eb2fa4e43b82bd01ce1ff26156316e49d1d9fc80dfecaad android.hardware.automotive.evs@1.0::IEvsCamera +94cba6ad04c83aa840de2ed52b74ba2126a26dd960225e61ac36703315279a80 android.hardware.automotive.evs@1.0::IEvsCameraStream +5ea36fb043d9e3b413219de3dfd7b046b48af4fda39f167f3528652e986cb76d android.hardware.automotive.evs@1.0::IEvsDisplay +b15c5d8f28be4f0469c11d184ebca616895f109d553a6c31018789d8c1bc0ac5 android.hardware.automotive.evs@1.0::IEvsEnumerator +3b17c1fdfc389e0abe626c37054954b07201127d890c2bc05d47613ec1f4de4f android.hardware.automotive.evs@1.0::types 42a06dc288f61b0690580f3d37b30b663c31d74d50bb58d0772386b550d5faab android.hardware.authsecret@1.0::IAuthSecret 32cc50cc2a7658ec613c0c2dd2accbf6a05113b749852879e818b8b7b438db19 android.hardware.bluetooth.a2dp@1.0::IBluetoothAudioHost ff4be64d7992f8bec97dff37f35450e79b3430c61f85f54322ce45bef229dc3b android.hardware.bluetooth.a2dp@1.0::IBluetoothAudioOffload From 5039b6099ea82f158f1318fd2be3a141dd0bd54e Mon Sep 17 00:00:00 2001 From: Etan Cohen Date: Mon, 25 Nov 2019 11:41:58 -0800 Subject: [PATCH 19/19] [AWARE] Protect string copy against buffer overflow Fixes: 143789898 Test: (Unit) atest com.android.server.wifi Test: ACTS ThroughputTest:test_iperf_single_ndp_aware_only_ib Test: (VTS) atest VtsHalWifiApV1_4TargetTest Change-Id: I5b8aa1d9a6388fe20cb7e1cd6a76d5e59e14d099 --- wifi/1.3/default/hidl_struct_util.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/wifi/1.3/default/hidl_struct_util.cpp b/wifi/1.3/default/hidl_struct_util.cpp index 2e4db70480..d305c09979 100644 --- a/wifi/1.3/default/hidl_struct_util.cpp +++ b/wifi/1.3/default/hidl_struct_util.cpp @@ -1819,7 +1819,13 @@ bool convertHidlNanDataPathInitiatorRequestToLegacy( convertHidlNanDataPathChannelCfgToLegacy( hidl_request.channelRequestType); legacy_request->channel = hidl_request.channel; - strcpy(legacy_request->ndp_iface, hidl_request.ifaceName.c_str()); + if (strnlen(hidl_request.ifaceName.c_str(), IFNAMSIZ + 1) == IFNAMSIZ + 1) { + LOG(ERROR) << "convertHidlNanDataPathInitiatorRequestToLegacy: " + "ifaceName too long"; + return false; + } + strncpy(legacy_request->ndp_iface, hidl_request.ifaceName.c_str(), + IFNAMSIZ + 1); legacy_request->ndp_cfg.security_cfg = (hidl_request.securityConfig.securityType != NanDataPathSecurityType::OPEN) @@ -1900,7 +1906,13 @@ bool convertHidlNanDataPathIndicationResponseToLegacy( ? legacy_hal::NAN_DP_REQUEST_ACCEPT : legacy_hal::NAN_DP_REQUEST_REJECT; legacy_request->ndp_instance_id = hidl_request.ndpInstanceId; - strcpy(legacy_request->ndp_iface, hidl_request.ifaceName.c_str()); + if (strnlen(hidl_request.ifaceName.c_str(), IFNAMSIZ + 1) == IFNAMSIZ + 1) { + LOG(ERROR) << "convertHidlNanDataPathIndicationResponseToLegacy: " + "ifaceName too long"; + return false; + } + strncpy(legacy_request->ndp_iface, hidl_request.ifaceName.c_str(), + IFNAMSIZ + 1); legacy_request->ndp_cfg.security_cfg = (hidl_request.securityConfig.securityType != NanDataPathSecurityType::OPEN)