diff --git a/current.txt b/current.txt index 83479f39a3..fe763279db 100644 --- a/current.txt +++ b/current.txt @@ -238,15 +238,16 @@ a432d6d9200248dc2126827bcd6cdea31dd65eff39b939f64585d27d915a5857 android.hardwar 619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler 0a159f81359cd4f71bbe00972ee8403ea79351fb7c0cd48be72ebb3e424dbaef android.hardware.radio@1.0::types 09342041e17c429fce0034b9096d17849122111436a5f0053e7e59500e1cb89c android.hardware.media.omx@1.0::IOmxStore -246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types 93eb3757ceaf21590fa4cd1d4a7dfe3b3794af5396100a6d25630879352abce9 android.hardware.neuralnetworks@1.0::IDevice f66f9a38541bf92001d3adcce678cd7e3da2262124befb460b1c9aea9492813b android.hardware.neuralnetworks@1.0::IExecutionCallback 953607822954435874f4b81686440a604e2a88cdd2d9164c6293f3d5772510d7 android.hardware.neuralnetworks@1.0::IPreparedModel 73e03573494ba96f0e711ab7f1956c5b2d54c3da690cd7ecf4d6d0f287447730 android.hardware.neuralnetworks@1.0::IPreparedModelCallback +246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback # Future changes to HALs 5804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice -088b30a9c9ce27bc955b08a03c38c208f8f65b51133053c7656c875479801b99 android.hardware.neuralnetworks@1.0::types +12e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback +702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types diff --git a/neuralnetworks/1.0/IExecutionCallback.hal b/neuralnetworks/1.0/IExecutionCallback.hal index ef0f4549dd..9c0616696d 100644 --- a/neuralnetworks/1.0/IExecutionCallback.hal +++ b/neuralnetworks/1.0/IExecutionCallback.hal @@ -28,7 +28,7 @@ interface IExecutionCallback { * ErrorStatus resulting from the execution. If the asynchronous task * is not launched, notify must be invoked with the appropriate error. * - * @return param Error status returned from launching the asynchronous task + * @param status Error status returned from launching the asynchronous task * (if the launch fails) or from the asynchronous task itself * (if the launch succeeds). Must be: * - NONE if the asynchronous execution was successful diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal index 12461e9ce7..8c07fcc324 100644 --- a/neuralnetworks/1.0/types.hal +++ b/neuralnetworks/1.0/types.hal @@ -24,38 +24,40 @@ package android.hardware.neuralnetworks@1.0; * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors * with at least one dimension). Types not prefaced by TENSOR_* represent * scalar values and must have no dimensions. + * + * Although many types are defined, most operators accept just a few + * types. Most used are {@link OperandType::TENSOR_FLOAT32}, + * {@link OperandType::TENSOR_QUANT8_ASYMM}, + * and {@link OperandType::INT32}. */ enum OperandType : int32_t { - /** - * The following entries are used to declare scalars. - */ + /** A 32 bit floating point scalar value. */ FLOAT32 = 0, + /** A signed 32 bit integer scalar value. */ INT32 = 1, + /** An unsigned 32 bit integer scalar value. */ UINT32 = 2, - /** - * The following entries are used to declare tensors. - */ + /** A tensor of 32 bit floating point values. */ TENSOR_FLOAT32 = 3, + /** A tensor of 32 bit integer values. */ TENSOR_INT32 = 4, - - /** - * A tensor of 8 bit integers that represent real numbers. + /** A tensor of 8 bit integers that represent real numbers. * * Attached to this tensor are two numbers that can be used to convert the * 8 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value - * - zero_value: a 32 bit integer + * - scale: a 32 bit floating point value greater than zero. + * - zeroPoint: a 32 bit integer, in range [0, 255]. * * The formula is: - * real_value = (integer_value - zero_value) * scale. + * real_value = (integer_value - zeroPoint) * scale. */ TENSOR_QUANT8_ASYMM = 5, - /** - * The following entries are OEM specific operand types. - */ + /** OEM specific scalar value. */ OEM = 10000, + + /** A tensor of OEM specific values. */ TENSOR_OEM_BYTE = 10001, }; @@ -66,9 +68,9 @@ enum OperandType : int32_t { */ enum OperationType : int32_t { /** - * Adds two tensors, elment-wise. + * Adds two tensors, element-wise. * - * Takes two input tensors of identical type and compatible dimensions. The output + * Takes two input tensors of identical type and compatible dimensions. The output * is the sum of both input tensors, optionally modified by an activation function. * * Two dimensions are compatible when: @@ -79,21 +81,25 @@ enum OperationType : int32_t { * It starts with the trailing dimensions, and works its way forward. * * Example: - * input1.dimension = {4, 1, 2} + * + * input1.dimension = {4, 1, 2} * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: A tensor. - * 1: A tensor of the same type, and compatible dimensions as input0. - * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * * 0: A tensor. + * * 1: A tensor of the same type, and compatible dimensions as input0. + * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. * - * Ouputs: - * 0: The sum, a tensor of the same type as input0. + * Outputs: + * * 0: The sum, a tensor of the same type as input0. */ ADD = 0, @@ -102,29 +108,50 @@ enum OperationType : int32_t { * * The output dimensions are functions of the filter dimensions, stride, and padding. * - * The values in output Tensor is computed as: + * The values in the output tensor are computed as: + * * output[batch, row, col, channel] = * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} - * Supported tensor rank: 4, with "NHWC" data layout. + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} * - * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. - * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. - * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. - * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. - * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. - * 5: An INT32 value, specifying the output stride in the ‘width’ dimension. - * 6: An INT32 value, specifying the output stride in the ‘height’ dimension. - * 7: An INT32 value, specifying the filter width. - * 8: An INT32 value, specifying the filter height. - * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels) + * data layout. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. + * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. + * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. + * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. + * * 5: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 6: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 7: An INT32 value, specifying the filter width. + * * 8: An INT32 value, specifying the filter height. + * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the + * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. + * * 2: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 3: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 4: An INT32 value, specifying the filter width. + * * 5: An INT32 value, specifying the filter height. + * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ AVERAGE_POOL_2D = 1, @@ -134,19 +161,21 @@ enum OperationType : int32_t { * The input tensors must have identical type and the same dimensions except the * dimension along the concatenation axis. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0 ~ n: The list on n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm] - * n+1: An INT32 value, specifying the concatenation axis. - * n+2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]. + * For inputs of {@link OperandType::TENSOR_QUANT8_ASYMM} type, all + * input tensors must have the same scale and zeroPoint. + * * n: An INT32 value, specifying the concatenation axis. * - * Ouputs: - * 0: The output, a tensor of the same type as the input tensors. - The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. + * Outputs: + * * 0: The output, a tensor of the same type as the input tensors. + * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. */ CONCATENATION = 2, @@ -158,7 +187,8 @@ enum OperationType : int32_t { * * The output dimensions are functions of the filter dimensions, stride, and padding. * - * The values in output Tensor is computed as: + * The values in the output tensor are computed as: + * * output[batch, row, col, channel] = * sum_{i, j} ( * input[batch, row + i, col + j, k] * @@ -166,77 +196,135 @@ enum OperationType : int32_t { * bias[channel] * ) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: 4, with "NHWC" data layout. * - * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. - * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], - * specifying the filter. - * 2: A 1-D tensor, of shape [depth_out], specifying the bias. - * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should - * also be of {@link OperandType::TENSOR_FLOAT32}. - * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias - * should be of {@link OperandType::TENSOR_INT32}. - * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. - * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. - * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. - * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. - * 7: An INT32 value, specifying the output stride in the ‘width’ dimension. - * 8: An INT32 value, specifying the output stride in the ‘height’ dimension. - * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * Both explicit padding and implicit padding are supported. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. + * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], + * specifying the filter. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. + * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should + * also be of {@link OperandType::TENSOR_FLOAT32}. + * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias + * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and + * bias_scale == input_scale * filter_scale. + * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. + * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. + * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. + * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. + * * 7: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 8: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. + * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], + * specifying the filter. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. + * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should + * also be of {@link OperandType::TENSOR_FLOAT32}. + * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias + * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and + * bias_scale == input_scale * filter_scale. + * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the + * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. + * * 4: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 5: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. + * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following + * condition must be satisfied: output_scale > input_scale * filter_scale. */ CONV_2D = 3, /** - * Performs an depthwise 2-D convolution operation. + * Performs a depthwise 2-D convolution operation. * * Given an input tensor of shape [batches, height, width, depth_in] and a filter - * tensor of shape [depth_out, filter_height, filter_width, depth_in] containing - * in_channels convolutional filters of depth 1, DEPTHWISE_CONV applies a different + * tensor of shape [1, filter_height, filter_width, depth_out] containing + * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different * filter to each input channel (expanding from 1 channel to channel_multiplier channels * for each), then concatenates the results together. * * The output has depth_out = depth_in * depth_multiplier channels. * The output dimensions are functions of the filter dimensions, stride, and padding. * - * The values in output Tensor is computed as: + * The values in the output tensor are computed as: + * * output[b, i, j, k * channel_multiplier + q] = * sum_{di, dj} ( * input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[di, dj, k, q] + * filter[1, di, dj, k * channel_multiplier + q] * ) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: 4, with "NHWC" data layout. * - * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. - * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], - * specifying the filter. - * 2: A 1-D tensor, of shape [depth_out], specifying the bias. - * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should - * also be of {@link OperandType::TENSOR_FLOAT32}. - * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias - * should be of {@link OperandType::TENSOR_INT32}. - * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. - * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. - * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. - * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. - * 7: An INT32 value, specifying the output stride in the ‘width’ dimension. - * 8: An INT32 value, specifying the output stride in the ‘height’ dimension. - * 9: An INT32 value, specifying the depthwise multiplier. - * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * Both explicit padding and implicit padding are supported. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. + * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], + * specifying the filter. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. + * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should + * also be of {@link OperandType::TENSOR_FLOAT32}. + * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias + * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and + * bias_scale == input_scale * filter_scale. + * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. + * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. + * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. + * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. + * * 7: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 8: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 9: An INT32 value, specifying the depthwise multiplier. + * * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. + * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], + * specifying the filter. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. + * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should + * also be of {@link OperandType::TENSOR_FLOAT32}. + * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias + * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and + * bias_scale == input_scale * filter_scale. + * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the + * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. + * * 4: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 5: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 6: An INT32 value, specifying the depthwise multiplier. + * * 7: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. + * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following + * condition must be satisfied: output_scale > input_scale * filter_scale. */ DEPTHWISE_CONV_2D = 4, @@ -254,18 +342,20 @@ enum OperationType : int32_t { * input_height * block_size. * The depth of the input tensor must be divisible by block_size * block_size * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. - * 1: An INT32 value, specifying the block_size. block_size must be >=1 and - * block_size * block_size must be a divisor of the input depth. + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. + * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and + * block_size * block_size must be a divisor of the input depth. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size, - * depth/(block_size*block_size)]. + * Outputs: + * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size, + * depth/(block_size*block_size)]. */ DEPTH_TO_SPACE = 5, @@ -273,53 +363,69 @@ enum OperationType : int32_t { * Dequantizes the input tensor. * * The formula is: - * output = (input - zero_value) * scale. * - * Supported tensor types: {@link OperandType::TENSOR_QUANT8_ASYMM} + * output = (input - zeroPoint) * scale. + * + * Supported tensor types: + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}. + * * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}. * - * Ouputs: - * 0: The output tensor of same shape as input0, but with type - {@link OperandType::TENSOR_FLOAT32}. + * Outputs: + * * 0: The output tensor of same shape as input0, but with type + * {@link OperandType::TENSOR_FLOAT32}. */ DEQUANTIZE = 6, /** - * Looks up items from a given tensor. + * Looks up sub-tensors in the input tensor. * - * Each item in the output is a raw copy of the corresponding item in - * the input “values”. If the the given “lookup” indices are out of bounds, - * the op will fail and an error will be reported. + * This operator takes for input a tensor of values (Values) and + * a one-dimensional tensor of selection indices (Lookups). + * The output tensor is the concatenation of sub-tensors of Values as + * selected by Lookups. + * + * Think of Values as being sliced along its first dimension: + * The entries in Lookups select which slices are concatenated together + * to create the output tensor. + * + * For example, if Values has shape of [40, 200, 300] and + * Lookups has shape of [3], all three values found in Lookups are + * expected to be between 0 and 39. The resulting tensor must + * have shape of [3, 200, 300]. + * + * If a value in Lookups is out of bounds, the operation must fail + * and an error must be reported. * * Inputs: - * * 0: Values. An n-D tensor of any type X (where n >= 2). E.g., if n is 2, - * then the shape would be [lookup_dimension, values_dimension], where - * “lookup_dimension” corresponds to the indexing dimension in the lookup - * table, and “values_dimension” to the contents. - * * 1: Lookups. An 1-D tensor of type T, of shape [lookup_size], where - * “lookup_size” is the number of elements to look for, and each entry - * corresponds to the first dimension of the “values” tensor. + * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32} type. + * The values are indices into the first dimension of Values. + * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are + * extracted. * * Output: - * * 0: A n-D tensor of type X and the same rank and shape as the “values” - * tensor, except for the first dimension which has size “lookup_size”. + * * 0: A n-D tensor with the same rank and shape as the Values + * tensor, except for the first dimension which has the same size + * as Lookups' only dimension. */ EMBEDDING_LOOKUP = 7, /** * Computes element-wise floor() on the input tensor. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: A tensor. + * * 0: A tensor. * - * Ouputs: - * 0: The output, a tensor of the same type and dimensions as input0. + * Outputs: + * * 0: The output tensor, of the same type and dimensions as the input tensor. */ FLOOR = 8, @@ -328,66 +434,104 @@ enum OperationType : int32_t { * tensor with each element in the output tensor. * * This layer implements the operation: + * * outputs = activation(inputs * weights’ + bias) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to - * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape - * [batch_size, input_size], where “batch_size” corresponds to the batching dimension, - * and “input_size” is the size of the input. - * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where “num_units” - * corresponds to the number of output nodes. - * 2: A 1-D tensor, of shape [num_units], specifying the bias. - * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should - * also be of {@link OperandType::TENSOR_FLOAT32}. - * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias - * should be of {@link OperandType::TENSOR_INT32}. - * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to + * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape + * [batch_size, input_size], where “batch_size” corresponds to the batching dimension, + * and “input_size” is the size of the input. + * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where + * "num_units" corresponds to the number of output nodes. + * * 2: A 1-D tensor, of shape [num_units], specifying the bias. + * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should + * also be of {@link OperandType::TENSOR_FLOAT32}. + * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias + * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and + * bias_scale == input_scale * filter_scale. + * * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. * - * Ouputs: - * 0: The output tensor, of shape [batch_size, num_units]. + * Outputs: + * * 0: The output tensor, of shape [batch_size, num_units]. + * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following + * condition must be satisfied: output_scale > input_scale * filter_scale. */ FULLY_CONNECTED = 9, /** - * Looks up values of a hash table with given keys. + * Looks up sub-tensors in the input tensor using a key-value map. + * + * This operator takes for input a tensor of values (Values), + * a one-dimensional tensor of selection values (Lookups) and + * a one-dimensional tensor that maps these values to Values + * indexes. The output tensor is the concatenation of sub-tensors of + * Values as selected by Lookups via Keys. + * + * Think of Values as being sliced along its outer-most dimension. + * The output is a concatenation of selected slices, with one slice + * for each entry of Lookups. The slice selected is the one at the + * same index as the Maps entry that matches the value in Lookups. + * + * For a hit, the corresponding sub-tensor of Values is included + * in the Output tensor. For a miss, the corresponding sub-tensor in + * Output must have zero values. + * + * For example, if Values has shape of [40, 200, 300], + * Keys should have a shape of [40]. If Lookups tensor has shape + * of [3], three slices are being concatenated, so the resulting tensor + * must have the shape of [3, 200, 300]. If the first entry in Lookups + * has the value 123456, that value must be located in Keys tensor. + * If the sixth entry of Keys contains 123456, the sixth slice of Values + * must be selected. If no entry in Keys has 123456, a slice of zeroes + * must be concatenated. * * Inputs: - * * 0: Lookups. A 1-D int32 tensor with shape [ k ]. - * * 1: Keys. A 1-D int32 tensor with shape [ n ], *MUST* be sorted in - * ascending order. - * * 2: Values. A tensor with shape [ n … ]. + * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ k ]. + * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ n ]; + * Keys and Values pair represent a map, i.e., the ith element + * in Keys (Keys[i]) is the key to select the ith sub-tensor + * in Values (Values[i]), where 0 <= i <= n-1. + * Keys tensor *MUST* be sorted in ascending order. + * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n. * * Outputs: * * 0: Output. A tensor with shape [ k …]. - * * 1: Hits. A uint8 tensor with shape [ k ] indicates whether the lookup - * hits or not. + * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup + * hits (True) or not (False). + * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f. + * A non-zero byte represents True, a hit. A zero indicates otherwise. */ HASHTABLE_LOOKUP = 10, /** - * Applies L2 normalization along a the depth dimension. + * Applies L2 normalization along the depth dimension. + * + * The values in the output tensor are computed as: * - * The values in output Tensor is computed as: * output[batch, row, col, channel] = * input[batch, row, col, channel] / * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) * - * For x with more dimensions, independently normalizes each 1-D slice along dimension dim. + * For input tensor with more dimensions, independently normalizes each 1-D slice along dimension dim. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * Supported tensor rank: 4, with "NHWC" data layout. + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * + * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width, and Channels). * * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 0: A 4-D tensor, of shape [batches, height, width, depth]. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ L2_NORMALIZATION = 11, @@ -396,28 +540,48 @@ enum OperationType : int32_t { * * The output dimensions are functions of the filter dimensions, stride, and padding. * - * The values in output Tensor is computed as: + * The values in the output tensor are computed as: + * * output[batch, row, col, channel] = * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1)) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * Supported tensor rank: 4, with "NHWC" data layout. * - * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. - * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. - * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. - * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. - * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. - * 5: An INT32 value, specifying the output stride in the ‘width’ dimension. - * 6: An INT32 value, specifying the output stride in the ‘height’ dimension. - * 7: An INT32 value, specifying the filter width. - * 8: An INT32 value, specifying the filter height. - * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * Both explicit padding and implicit padding are supported. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. + * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. + * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. + * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. + * * 5: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 6: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 7: An INT32 value, specifying the filter width. + * * 8: An INT32 value, specifying the filter height. + * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the + * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. + * * 2: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 3: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 4: An INT32 value, specifying the filter width. + * * 5: An INT32 value, specifying the filter height. + * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ L2_POOL_2D = 12, @@ -428,41 +592,49 @@ enum OperationType : int32_t { * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within depth_radius. * - * In details: + * The output is calculated using this formula: + * * sqr_sum[a, b, c, d] = * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2) * output = input / pow((bias + alpha * sqr_sum), beta) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. - * 1: An INT32 value, specifying the radius of the normalization window. - * 2: A FLOAT32 value, specifying the bias, must not be zero. - * 3: A FLOAT32 value, specifying the scale factor, alpha. - * 4: A FLOAT32 value, specifying the exponent, beta. + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the radius of the normalization window. + * * 2: A FLOAT32 value, specifying the bias, must not be zero. + * * 3: A FLOAT32 value, specifying the scale factor, alpha. + * * 4: A FLOAT32 value, specifying the exponent, beta. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. */ LOCAL_RESPONSE_NORMALIZATION = 13, /** * Computes sigmoid activation on the input tensor element-wise. * - * In details: + * The output is calculated using this formula: + * * output = 1 / (1 + exp(-input)) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. + * For {@link OperandType::TENSOR_QUANT8_ASYMM} type, + * the scale must be 1.f / 256 and the zeroPoint must be 0. */ LOGISTIC = 14, @@ -501,102 +673,165 @@ enum OperationType : int32_t { LSH_PROJECTION = 15, /** - * Long short-term memory unit (LSTM) recurrent network layer. + * Performs a single time step in a Long Short-Term Memory (LSTM) layer * - * The default non-peephole implementation is based on: - * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf + * The LSTM operation is described by the following equations. + * + * \f{eqnarray*}{ + * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ + * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ + * C_t =& clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell})& \\ + * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o)& \\ + * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) & if\ there\ is\ a\ projection; \\ + * h_t =& & \\ + * & o_t \odot g(C_t) & otherwise. \\ + * \f} + * Where: + * * \f$x_t\f$ is the input, + * * \f$i_t\f$ is the input gate, + * * \f$f_t\f$ is the forget gate, + * * \f$C_t\f$ is the cell state, + * * \f$o_t\f$ is the output, + * * \f$h_t\f$ is the output state, + * * \f$\sigma\f$ is the logistic sigmoid function, + * * \f$g\f$ is the cell input and cell output activation function, usually \f$tahn\f$, + * * \f$W_{xi}\f$ is the input-to-input weight matrix, + * * \f$W_{hi}\f$ is the recurrent to input weight matrix, + * * \f$W_{ci}\f$ is the cell-to-input weight matrix, + * * \f$b_i\f$ is the input gate bias, + * * \f$W_{xf}\f$ is the input-to-forget weight matrix, + * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, + * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, + * * \f$b_f\f$ is the forget gate bias, + * * \f$W_{xc}\f$ is the input-to-cell weight matrix, + * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, + * * \f$b_c\f$ is the cell bias, + * * \f$W_{xo}\f$ is the input-to-output weight matrix, + * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, + * * \f$W_{co}\f$ is the cell-to-output weight matrix, + * * \f$b_o\f$ is the output gate bias, + * * \f$W_{proj}\f$ is the projection weight matrix, + * * \f$b_{proj}\f$ is the projection bias, + * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and + * * \f$t_{proj}\f$ is the threshold for clipping the projected output. + * * \f$\odot\f$ is the + * Hadamard product that takes two matrices and produces another + * matrix, each element of which is the product of the corresponding + * elements of the input matrices. + * + * The operation has the following independently optional inputs: + * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights (\f$W_{hi}\f$), + * cell-to-input (\f$W_{ci}\f$) weights, and input gate bias (\f$b_i\f$) either all have values, + * or none of them have values (i.e., all set to null). If they have no + * values, coupling of input and forget gates (CIFG) is used, in which case + * the input gate (\f$i_t\f$) is calculated using the following equation instead. + * \f{eqnarray*}{ + * i_t = 1 - f_t + * \f} + * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights (\f$W_{cf}\f$), and cell-to-output + * weights (\f$W_{co}\f$) either all have values or none of them have values. + * If they have values, the peephole optimization is used. + * * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection + * layer, and should otherwise have no value. + * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the + * recurrent projection layer exists, and should otherwise have no value. + * + * References: + * + * The default non-peephole non-CIFG implementation is based on: + * http://www.bioinf.jku.at/publications/older/2604.pdf * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural * Computation, 9(8):1735-1780, 1997. * - * The peephole implementation is based on: + * The peephole implementation and projection layer is based on: * https://research.google.com/pubs/archive/43905.pdf * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory * recurrent neural network architectures for large scale acoustic modeling." * INTERSPEECH, 2014. + * (However, the concept of peephole optimization was introduced in work + * prior to this paper.) * * The coupling of input and forget gate (CIFG) is based on: * http://arxiv.org/pdf/1503.04069.pdf * Greff et al. "LSTM: A Search Space Odyssey" * - * The class has the following independently optional inputs: - * * If input gate (if CIFG): “input_to_forget_weights”, - * “recurrent_to_input_weights”, “cell_to_input_weights”, “input_gate_bias”. - * * If no peephole connections: “cell_to_input_weights”, - * “cell_to_forget_weights”, “cell_to_output_weights”. - * * If no projection layer: “projection_weights” and “projection_bias”. - * * If no projection bias: “projection_bias”. - * - * Supported tensor types: + * Supported tensor types (type T): * * {@link OperandType::TENSOR_FLOAT32} * * Inputs: - * * 0: Input. + * * 0: The input (\f$x_t\f$). * A 2-D tensor of type T, of shape [batch_size, input_size], where * “batch_size” corresponds to the batching dimension, and “input_size” * is the size of the input. - * * 1: input_to_input_weights. + * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. * A 2-D tensor of type T, of shape [num_units, input_size], where * “num_units” corresponds to the number of cell units. - * * 2: input_to_forget_weights. + * * 2: The input-to-forget weights (\f$W_{xf}\f$). * A 2-D tensor of type T, of shape [num_units, input_size]. - * * 3: input_to_cell_weights. + * * 3: The input-to-cell weights (\f$W_{xc}\f$). * A 2-D tensor of type T, of shape [num_units, input_size]. - * * 4: input_to_output_weights. + * * 4: The input-to-output weights (\f$W_{xo}\f$). * A 2-D tensor of type T, of shape [num_units, input_size]. - * * 5: recurrent_to_input_weights. + * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. * A 2-D tensor of type T, of shape [num_units, output_size], where * “output_size” corresponds to either the number of cell units (i.e., * “num_units”), or the second dimension of the “projection_weights”, if * defined. - * * 6: recurrent_to_forget_weights. + * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). * A 2-D tensor of type T, of shape [num_units, output_size]. - * * 7: recurrent_to_cell_weights. + * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). * A 2-D tensor of type T, of shape [num_units, output_size]. - * * 8: recurrent_to_output_weights. + * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). * A 2-D tensor of type T, of shape [num_units, output_size]. - * * 9: cell_to_input_weights. + * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. * A 1-D tensor of type T, of shape [num_units]. - * * 10:cell_to_forget_weights. + * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. * A 1-D tensor of type T, of shape [num_units]. - * * 11:cell_to_output_weights. + * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. * A 1-D tensor of type T, of shape [num_units]. - * * 12:input_gate_bias. + * * 12:The input gate bias (\f$b_i\f$). Optional. * A 1-D tensor of type T, of shape [num_units]. - * * 13:forget_gate_bias. + * * 13:The forget gate bias (\f$b_f\f$). * A 1-D tensor of type T, of shape [num_units]. - * * 14:cell_bias. + * * 14:The cell bias (\f$b_c\f$). * A 1-D tensor of type T, of shape [num_units]. - * * 15:output_gate_bias. + * * 15:The output gate bias (\f$b_o\f$). * A 1-D tensor of type T, of shape [num_units]. - * * 16:projection_weights. + * * 16:The projection weights (\f$W_{proj}\f$). Optional. * A 2-D tensor of type T, of shape [output_size, num_units]. - * * 17:projection_bias. + * * 17:The projection bias (\f$b_{proj}\f$). Optional. * A 1-D tensor of type T, of shape [output_size]. - * - * Parameters: - * * 18:fused_activation_function. - * An (optional) ActivationFunctionType indicating the activation - * function. - * If “NONE” is specified then it results in a linear activation. - * * 19:cell_clip. - * A clipping threshold for the cell state, such that values are bound + * * 18:The output state (in) (\f$h_{t-1}\f$). + * A 2-D tensor of type T, of shape [batch_size, output_size]. + * * 19:The cell state (in) (\f$C_{t-1}\f$). + * A 2-D tensor of type T, of shape [batch_size, num_units]. + * * 20:The activation function (\f$g\f$). + * A value indicating the activation function: + * + * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such that values are bound * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is * disabled. - * * 20:proj_clip. - * A clipping threshold for the output from the projection layer, such + * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the projection layer, such * that values are bound within [-proj_clip, proj_clip]. If set to 0.0 * then clipping is disabled. * * Outputs: - * * 0: scratch_buffer. - * A 3-D tensor of type T, of shape [batch_size, num_cell, 4]. - * * 1: output_state. + * * 0: The scratch buffer. + * A 2-D tensor of type T, of shape [batch_size, num_units * 4] with + * CIFG, or [batch_size, num_units * 3] without CIFG. + * * 1: The output state (out) (\f$h_t\f$). * A 2-D tensor of type T, of shape [batch_size, output_size]. - * * 2: cell_state. + * * 2: The cell state (out) (\f$C_t\f$). * A 2-D tensor of type T, of shape [batch_size, num_units]. - * * 3: output. + * * 3: The output (\f$o_t\f$). * A 2-D tensor of type T, of shape [batch_size, output_size]. This is - * effectively the same as the current “output_state” value. + * effectively the same as the current “output state (out)” value. */ LSTM = 16, @@ -605,36 +840,56 @@ enum OperationType : int32_t { * * The output dimensions are functions of the filter dimensions, stride, and padding. * - * The values in output Tensor is computed as: + * The values in the output tensor are computed as: + * * output[batch, row, col, channel] = * max_{i, j} (input[batch, row + i, col + j, channel]) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: 4, with "NHWC" data layout. * - * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. - * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. - * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. - * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. - * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. - * 5: An INT32 value, specifying the output stride in the ‘width’ dimension. - * 6: An INT32 value, specifying the output stride in the ‘height’ dimension. - * 7: An INT32 value, specifying the filter width. - * 8: An INT32 value, specifying the filter height. - * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * Both explicit padding and implicit padding are supported. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. + * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. + * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. + * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. + * * 5: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 6: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 7: An INT32 value, specifying the filter width. + * * 8: An INT32 value, specifying the filter height. + * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the + * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. + * * 2: An INT32 value, specifying the stride when walking through input + * in the ‘width’ dimension. + * * 3: An INT32 value, specifying the stride when walking through input + * in the ‘height’ dimension. + * * 4: An INT32 value, specifying the filter width. + * * 5: An INT32 value, specifying the filter height. + * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. */ MAX_POOL_2D = 17, /** - * Multiplies two tensors, elment-wise. + * Multiplies two tensors, element-wise. * - * Takes two input tensors of identical type and compatible dimensions. The output + * Takes two input tensors of identical type and compatible dimensions. The output * is the product of both input tensors, optionally modified by an activation function. * * Two dimensions are compatible when: @@ -644,71 +899,85 @@ enum OperationType : int32_t { * The size of the resulting output is the maximum size along each dimension of the * input operands. It starts with the trailing dimensions, and works its way forward. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: A tensor. - * 1: A tensor of the same type, and compatible dimensions as input0. - * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. - * Specifies the activation to invoke on the result of each addition. + * * 0: A tensor. + * * 1: A tensor of the same type, and compatible dimensions as input0. + * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. + * Specifies the activation to invoke on the result of each addition. * - * Ouputs: - * 0: The product, a tensor of the same type as input0. + * Outputs: + * * 0: The product, a tensor of the same type as input0. + * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following + * condition must be satisfied: output_scale > input1_scale * input2_scale. */ MUL = 18, /** * Computes rectified linear activation on the input tensor element-wise. * - * In details: + * The output is calculated using this formula: + * * output = max(0, input) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. */ RELU = 19, /** * Computes rectified linear 1 activation on the input tensor element-wise. * - * In details: + * The output is calculated using this formula: + * * output = min(1.f, max(-1.f, input)) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. */ RELU1 = 20, /** * Computes rectified linear 6 activation on the input tensor element-wise. * - * In details: + * The output is calculated using this formula: + * * output = min(6, max(0, input)) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. */ RELU6 = 21, @@ -718,36 +987,41 @@ enum OperationType : int32_t { * Given tensor, this operation returns a tensor that has the same values as tensor, * but with a newly specified shape. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the tensor to be reshaped. - * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape - * of the output tensor. The number of elements implied by shape must be the same - * as the number of elements in the input tensor. + * * 0: A tensor, specifying the tensor to be reshaped. + * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape + * of the output tensor. The number of elements implied by shape must be the same + * as the number of elements in the input tensor. * - * Ouputs: - * 0: The output tensor, of shape specified by the input shape. + * Outputs: + * * 0: The output tensor, of shape specified by the input shape. */ RESHAPE = 22, /** * Resizes images to given size using the bilinear interpretation. * - * Resized images will be distorted if their original aspect ratio is not the same as input. + * Resized images must be distorted if their output aspect ratio is not the same as + * input aspect ratio. + * + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. - * 1: An INT32 value, specifying the output width of the output tensor. - * 2: An INT32 value, specifying the output height of the output tensor. + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. + * * 1: An INT32 value, specifying the output height of the output tensor. + * * 2: An INT32 value, specifying the output width of the output tensor. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth]. + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth]. */ RESIZE_BILINEAR = 23, @@ -766,7 +1040,7 @@ enum OperationType : int32_t { * * “activation” is the function passed as the “fused_activation_function” * argument (if not “NONE”). * - * Supported tensor types: + * Supported tensor types (Type T): * * {@link OperandType::TENSOR_FLOAT32} * * Inputs: @@ -782,21 +1056,18 @@ enum OperationType : int32_t { * corresponding to the weights from each unit. * * 3: bias. * A 1-D tensor of type T, of shape [num_units]. - * - * For FLOAT32 input tensor, bias must also be FLOAT32. - * For UINT8 input tensor, bias must be INT32. - * - * Parameters - * * 4: fused_activation_function. - * An (optional) ActivationFunctionType indicating the activation + * * 4: hidden state (in). + * A 2-D tensor of type T, of shape [batch_size, num_units]. + * * 5: fused_activation_function. + * An optional {@link FusedActivationFunc} value indicating the activation * function. If “NONE” is specified then it results in a linear * activation. * - * * 5: Hidden state. + * Outputs: + * * 0: hidden state (out). * A 2-D tensor of type T, of shape [batch_size, num_units]. * - * Outputs: - * * 0: output. + * * 1: output. * A 2-D tensor of type T, of shape [batch_size, num_units]. This is * effectively the same as the current state value. */ @@ -806,21 +1077,26 @@ enum OperationType : int32_t { * Computes the softmax activation on the input tensor element-wise, per batch, by * normalizing the input vector so the maximum coefficient is zero. * - * In details: + * The output is calculated using this formula: + * * output[batch, i] = * exp((input[batch, i] - max(input[batch, :])) * beta) / * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: 2 or 4. * * Inputs: - * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. - * 1: A FLOAT32 value, specifying the scaling factor for the exponent, beta. + * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. + * * 1: A FLOAT32 value, specifying the positive scaling factor for the exponent, beta. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. + * For {@link OperandType::TENSOR_QUANT8_ASYMM} type, + * the scale must be 1.f / 256 and the zeroPoint must be 0. */ SOFTMAX = 25, @@ -837,18 +1113,20 @@ enum OperationType : int32_t { * The depth of the output tensor is input_depth * block_size * block_size. * The input tensor's height and width must be divisible by block_size. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: 4, with "NHWC" data layout. * * Inputs: - * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. - * 1: An INT32 value, specifying the block_size. block_size must be >=1 and - * block_size must be a divisor of both the input height and width. + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. + * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and + * block_size must be a divisor of both the input height and width. * - * Ouputs: - * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size, - * depth*block_size*block_size]. + * Outputs: + * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size, + * depth*block_size*block_size]. */ SPACE_TO_DEPTH = 26, @@ -872,8 +1150,8 @@ enum OperationType : int32_t { * * Specifically, for rank 1, this layer implements the operation: * - * memory = push(conv1d(inputs, weights_feature, feature_dim, "VALID")); - * outputs = activation(memory * weights_time + bias); + * memory = push(conv1d(inputs, weights_feature, feature_dim, "PADDING_VALID")); + * outputs = activation(memory * weights_time + bias); * * Where: * * “weights_feature” is a weights matrix that processes the inputs (by @@ -890,7 +1168,7 @@ enum OperationType : int32_t { * Each rank adds a dimension to the weights matrices by means of stacking * the filters. * - * Supported tensor types: + * Supported tensor types (type T): * * {@link OperandType::TENSOR_FLOAT32} * * Inputs: @@ -905,20 +1183,17 @@ enum OperationType : int32_t { * A 2-D tensor of type T, of shape [num_units, memory_size], where * “memory_size” corresponds to the fixed-size of the memory. * * 3: bias. - * A optional 1-D tensor of type T, of shape [num_units]. - * - * For FLOAT32 input tensor, bias must also be FLOAT32. - * For UINT8 input tensor, bias must be INT32. - * - * Parameters: - * * 4: rank. + * An optional 1-D tensor of type T, of shape [num_units]. + * * 4: state (in). + * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. + * * 5: rank. * The rank of the SVD approximation. - * * 5: fused_activation_function. - * An (optional) ActivationFunctionType indicating the activation function. + * * 6: fused_activation_function. + * An optional {@link FusedActivationFunc} value indicating the activation function. * If “NONE” is specified then it results in a linear activation. * * Outputs: - * * 0: state. + * * 0: state (out). * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. * * 1: output. * A 2-D tensor of type T, of shape [batch_size, num_units]. @@ -928,17 +1203,20 @@ enum OperationType : int32_t { /** * Computes hyperbolic tangent of input tensor element-wise. * - * In details: + * The output is calculated using this formula: + * * output = tanh(input) * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * Supported tensor rank: up to 4. * * Inputs: - * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. * - * Ouputs: - * 0: The output tensor of same shape as input0. + * Outputs: + * * 0: The output tensor of same shape as input0. */ TANH = 28, @@ -965,8 +1243,8 @@ enum FusedActivationFunc : int32_t { */ enum OperandLifeTime : int32_t { /** - * The operand is internal to the model. It's created by an operation - * and consumed by other operations. + * The operand is internal to the model. It's created by an operation and + * consumed by other operations. */ TEMPORARY_VARIABLE, @@ -1110,7 +1388,7 @@ struct Operand { /** * Where to find the data for this operand. * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or NO_VALUE: - * - All the fields will be 0. + * - All the fields must be 0. * If the lifetime is CONSTANT_COPY: * - location.poolIndex is 0. * - location.offset is the offset in bytes into Model.operandValues. @@ -1218,7 +1496,7 @@ struct RequestArgument { * Updated dimension information. * * If dimensions.size() > 0, dimension information was provided along with the - * argument. This can be the case for models that accept inputs of varying size. + * argument. This can be the case for models that accept inputs of varying size. * This can't change the rank, just the value of the dimensions that were * unspecified in the model. */ diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index 54dd14aba3..e28113bcdc 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp @@ -18,7 +18,6 @@ cc_library_static { name: "VtsHalNeuralnetworksTest_utils", srcs: [ "Callbacks.cpp", - "Models.cpp", "GeneratedTestHarness.cpp", ], defaults: ["VtsHalTargetTestDefaults"], @@ -41,14 +40,17 @@ cc_library_static { cc_test { name: "VtsHalNeuralnetworksV1_0TargetTest", srcs: [ - "VtsHalNeuralnetworksV1_0.cpp", - "VtsHalNeuralnetworksV1_0BasicTest.cpp", - "VtsHalNeuralnetworksV1_0GeneratedTest.cpp", + "BasicTests.cpp", + "GeneratedTests.cpp", + "ValidateModel.cpp", + "ValidateRequest.cpp", + "ValidationTests.cpp", + "VtsHalNeuralnetworks.cpp", ], defaults: ["VtsHalTargetTestDefaults"], static_libs: [ - "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.0", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libhidlmemory", diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp new file mode 100644 index 0000000000..945c4065e5 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +// create device test +TEST_F(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_F(NeuralnetworksHidlTest, StatusTest) { + Return status = device->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); +} + +// initialization +TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { + Return ret = + device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0.0f, capabilities.float32Performance.execTime); + EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); + EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); + }); + EXPECT_TRUE(ret.isOk()); +} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h index 18c316706e..570a4fb74a 100644 --- a/neuralnetworks/1.0/vts/functional/Callbacks.h +++ b/neuralnetworks/1.0/vts/functional/Callbacks.h @@ -17,14 +17,6 @@ namespace neuralnetworks { namespace V1_0 { namespace implementation { -using ::android::hardware::hidl_array; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::sp; - /** * The CallbackBase class is used internally by the NeuralNetworks runtime to * synchronize between different threads. An asynchronous task is launched diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 8646a4cbb0..ed1fb944e6 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -179,7 +179,7 @@ void EvaluatePreparedModel(sp& preparedModel, std::function& device, std::function create_model, +void Execute(const sp& device, std::function create_model, std::function is_ignored, const std::vector& examples) { V1_0::Model model = create_model(); @@ -223,7 +223,7 @@ void Execute(sp& device, std::function create_ EvaluatePreparedModel(preparedModel, is_ignored, examples); } -void Execute(sp& device, std::function create_model, +void Execute(const sp& device, std::function create_model, std::function is_ignored, const std::vector& examples) { V1_1::Model model = create_model(); @@ -242,8 +242,8 @@ void Execute(sp& device, std::function create_ // launch prepare model sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); + Return prepareLaunchStatus = device->prepareModel_1_1( + model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp similarity index 61% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp rename to neuralnetworks/1.0/vts/functional/GeneratedTests.cpp index b99aef7fc0..2107333e26 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp @@ -16,47 +16,33 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_0.h" +#include "VtsHalNeuralnetworks.h" #include "Callbacks.h" #include "TestHarness.h" +#include "Utils.h" #include #include #include -using ::android::hardware::neuralnetworks::V1_0::IDevice; -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Model; -using ::android::hardware::neuralnetworks::V1_0::OperationType; -using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - namespace android { namespace hardware { namespace neuralnetworks { namespace generated_tests { using ::generated_tests::MixedTypedExampleType; -extern void Execute(sp&, std::function, std::function, - const std::vector&); +extern void Execute(const sp&, std::function, + std::function, const std::vector&); } // namespace generated_tests namespace V1_0 { namespace vts { namespace functional { + using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::nn::allocateSharedMemory; // Mixed-typed examples typedef generated_tests::MixedTypedExampleType MixedTypedExample; diff --git a/neuralnetworks/1.0/vts/functional/Models.cpp b/neuralnetworks/1.0/vts/functional/Models.cpp deleted file mode 100644 index 180286a5b7..0000000000 --- a/neuralnetworks/1.0/vts/functional/Models.cpp +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "neuralnetworks_hidl_hal_test" - -#include "Models.h" -#include "Utils.h" - -#include -#include -#include -#include -#include - -using ::android::sp; - -namespace android { -namespace hardware { -namespace neuralnetworks { - -// create a valid model -V1_1::Model createValidTestModel_1_1() { - const std::vector operand2Data = {5.0f, 6.0f, 7.0f, 8.0f}; - const uint32_t size = operand2Data.size() * sizeof(float); - - const uint32_t operand1 = 0; - const uint32_t operand2 = 1; - const uint32_t operand3 = 2; - const uint32_t operand4 = 3; - - const std::vector operands = { - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 1}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_INPUT, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 1}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, .offset = 0, .length = size}, - }, - { - .type = OperandType::INT32, - .dimensions = {}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)}, - }, - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 1}, - .numberOfConsumers = 0, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_OUTPUT, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - }; - - const std::vector operations = {{ - .type = OperationType::ADD, .inputs = {operand1, operand2, operand3}, .outputs = {operand4}, - }}; - - const std::vector inputIndexes = {operand1}; - const std::vector outputIndexes = {operand4}; - std::vector operandValues( - reinterpret_cast(operand2Data.data()), - reinterpret_cast(operand2Data.data()) + size); - int32_t activation[1] = {static_cast(FusedActivationFunc::NONE)}; - operandValues.insert(operandValues.end(), reinterpret_cast(&activation[0]), - reinterpret_cast(&activation[1])); - - const std::vector pools = {}; - - return { - .operands = operands, - .operations = operations, - .inputIndexes = inputIndexes, - .outputIndexes = outputIndexes, - .operandValues = operandValues, - .pools = pools, - }; -} - -// create first invalid model -V1_1::Model createInvalidTestModel1_1_1() { - Model model = createValidTestModel_1_1(); - model.operations[0].type = static_cast(0xDEADBEEF); /* INVALID */ - return model; -} - -// create second invalid model -V1_1::Model createInvalidTestModel2_1_1() { - Model model = createValidTestModel_1_1(); - const uint32_t operand1 = 0; - const uint32_t operand5 = 4; // INVALID OPERAND - model.inputIndexes = std::vector({operand1, operand5 /* INVALID OPERAND */}); - return model; -} - -V1_0::Model createValidTestModel_1_0() { - V1_1::Model model = createValidTestModel_1_1(); - return nn::convertToV1_0(model); -} - -V1_0::Model createInvalidTestModel1_1_0() { - V1_1::Model model = createInvalidTestModel1_1_1(); - return nn::convertToV1_0(model); -} - -V1_0::Model createInvalidTestModel2_1_0() { - V1_1::Model model = createInvalidTestModel2_1_1(); - return nn::convertToV1_0(model); -} - -// create a valid request -Request createValidTestRequest() { - std::vector inputData = {1.0f, 2.0f, 3.0f, 4.0f}; - std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; - - // prepare inputs - uint32_t inputSize = static_cast(inputData.size() * sizeof(float)); - uint32_t outputSize = static_cast(outputData.size() * sizeof(float)); - std::vector inputs = {{ - .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {}, - }}; - std::vector outputs = {{ - .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {}, - }}; - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { - return {}; - } - - // load data - sp inputMemory = mapMemory(pools[INPUT]); - sp outputMemory = mapMemory(pools[OUTPUT]); - if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) { - return {}; - } - float* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - if (inputPtr == nullptr || outputPtr == nullptr) { - return {}; - } - inputMemory->update(); - outputMemory->update(); - std::copy(inputData.begin(), inputData.end(), inputPtr); - std::copy(outputData.begin(), outputData.end(), outputPtr); - inputMemory->commit(); - outputMemory->commit(); - - return {.inputs = inputs, .outputs = outputs, .pools = pools}; -} - -// create first invalid request -Request createInvalidTestRequest1() { - Request request = createValidTestRequest(); - const uint32_t INVALID = 2; - std::vector inputData = {1.0f, 2.0f, 3.0f, 4.0f}; - uint32_t inputSize = static_cast(inputData.size() * sizeof(float)); - request.inputs[0].location = { - .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize}; - return request; -} - -// create second invalid request -Request createInvalidTestRequest2() { - Request request = createValidTestRequest(); - request.inputs[0].dimensions = std::vector({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */); - return request; -} - -} // namespace neuralnetworks -} // namespace hardware -} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h index 93982351f4..a1fbe9278b 100644 --- a/neuralnetworks/1.0/vts/functional/Models.h +++ b/neuralnetworks/1.0/vts/functional/Models.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 The Android Open Source Project + * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,29 +14,187 @@ * limitations under the License. */ +#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H +#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H + #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include +#include "TestHarness.h" + +#include namespace android { namespace hardware { namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { -// create V1_1 model -V1_1::Model createValidTestModel_1_1(); -V1_1::Model createInvalidTestModel1_1_1(); -V1_1::Model createInvalidTestModel2_1_1(); +using MixedTypedExample = generated_tests::MixedTypedExampleType; -// create V1_0 model -V1_0::Model createValidTestModel_1_0(); -V1_0::Model createInvalidTestModel1_1_0(); -V1_0::Model createInvalidTestModel2_1_0(); +#define FOR_EACH_TEST_MODEL(FN) \ + FN(add_broadcast_quant8) \ + FN(add) \ + FN(add_quant8) \ + FN(avg_pool_float_1) \ + FN(avg_pool_float_2) \ + FN(avg_pool_float_3) \ + FN(avg_pool_float_4) \ + FN(avg_pool_float_5) \ + FN(avg_pool_quant8_1) \ + FN(avg_pool_quant8_2) \ + FN(avg_pool_quant8_3) \ + FN(avg_pool_quant8_4) \ + FN(avg_pool_quant8_5) \ + FN(concat_float_1) \ + FN(concat_float_2) \ + FN(concat_float_3) \ + FN(concat_quant8_1) \ + FN(concat_quant8_2) \ + FN(concat_quant8_3) \ + FN(conv_1_h3_w2_SAME) \ + FN(conv_1_h3_w2_VALID) \ + FN(conv_3_h3_w2_SAME) \ + FN(conv_3_h3_w2_VALID) \ + FN(conv_float_2) \ + FN(conv_float_channels) \ + FN(conv_float_channels_weights_as_inputs) \ + FN(conv_float_large) \ + FN(conv_float_large_weights_as_inputs) \ + FN(conv_float) \ + FN(conv_float_weights_as_inputs) \ + FN(conv_quant8_2) \ + FN(conv_quant8_channels) \ + FN(conv_quant8_channels_weights_as_inputs) \ + FN(conv_quant8_large) \ + FN(conv_quant8_large_weights_as_inputs) \ + FN(conv_quant8) \ + FN(conv_quant8_overflow) \ + FN(conv_quant8_overflow_weights_as_inputs) \ + FN(conv_quant8_weights_as_inputs) \ + FN(depth_to_space_float_1) \ + FN(depth_to_space_float_2) \ + FN(depth_to_space_float_3) \ + FN(depth_to_space_quant8_1) \ + FN(depth_to_space_quant8_2) \ + FN(depthwise_conv2d_float_2) \ + FN(depthwise_conv2d_float_large_2) \ + FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ + FN(depthwise_conv2d_float_large) \ + FN(depthwise_conv2d_float_large_weights_as_inputs) \ + FN(depthwise_conv2d_float) \ + FN(depthwise_conv2d_float_weights_as_inputs) \ + FN(depthwise_conv2d_quant8_2) \ + FN(depthwise_conv2d_quant8_large) \ + FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ + FN(depthwise_conv2d_quant8) \ + FN(depthwise_conv2d_quant8_weights_as_inputs) \ + FN(depthwise_conv) \ + FN(dequantize) \ + FN(embedding_lookup) \ + FN(floor) \ + FN(fully_connected_float_2) \ + FN(fully_connected_float_large) \ + FN(fully_connected_float_large_weights_as_inputs) \ + FN(fully_connected_float) \ + FN(fully_connected_float_weights_as_inputs) \ + FN(fully_connected_quant8_2) \ + FN(fully_connected_quant8_large) \ + FN(fully_connected_quant8_large_weights_as_inputs) \ + FN(fully_connected_quant8) \ + FN(fully_connected_quant8_weights_as_inputs) \ + FN(hashtable_lookup_float) \ + FN(hashtable_lookup_quant8) \ + FN(l2_normalization_2) \ + FN(l2_normalization_large) \ + FN(l2_normalization) \ + FN(l2_pool_float_2) \ + FN(l2_pool_float_large) \ + FN(l2_pool_float) \ + FN(local_response_norm_float_1) \ + FN(local_response_norm_float_2) \ + FN(local_response_norm_float_3) \ + FN(local_response_norm_float_4) \ + FN(logistic_float_1) \ + FN(logistic_float_2) \ + FN(logistic_quant8_1) \ + FN(logistic_quant8_2) \ + FN(lsh_projection_2) \ + FN(lsh_projection) \ + FN(lsh_projection_weights_as_inputs) \ + FN(lstm2) \ + FN(lstm2_state2) \ + FN(lstm2_state) \ + FN(lstm3) \ + FN(lstm3_state2) \ + FN(lstm3_state3) \ + FN(lstm3_state) \ + FN(lstm) \ + FN(lstm_state2) \ + FN(lstm_state) \ + FN(max_pool_float_1) \ + FN(max_pool_float_2) \ + FN(max_pool_float_3) \ + FN(max_pool_float_4) \ + FN(max_pool_quant8_1) \ + FN(max_pool_quant8_2) \ + FN(max_pool_quant8_3) \ + FN(max_pool_quant8_4) \ + FN(mobilenet_224_gender_basic_fixed) \ + FN(mobilenet_quantized) \ + FN(mul_broadcast_quant8) \ + FN(mul) \ + FN(mul_quant8) \ + FN(mul_relu) \ + FN(relu1_float_1) \ + FN(relu1_float_2) \ + FN(relu1_quant8_1) \ + FN(relu1_quant8_2) \ + FN(relu6_float_1) \ + FN(relu6_float_2) \ + FN(relu6_quant8_1) \ + FN(relu6_quant8_2) \ + FN(relu_float_1) \ + FN(relu_float_2) \ + FN(relu_quant8_1) \ + FN(relu_quant8_2) \ + FN(reshape) \ + FN(reshape_quant8) \ + FN(reshape_quant8_weights_as_inputs) \ + FN(reshape_weights_as_inputs) \ + FN(resize_bilinear_2) \ + FN(resize_bilinear) \ + FN(rnn) \ + FN(rnn_state) \ + FN(softmax_float_1) \ + FN(softmax_float_2) \ + FN(softmax_quant8_1) \ + FN(softmax_quant8_2) \ + FN(space_to_depth_float_1) \ + FN(space_to_depth_float_2) \ + FN(space_to_depth_float_3) \ + FN(space_to_depth_quant8_1) \ + FN(space_to_depth_quant8_2) \ + FN(svdf2) \ + FN(svdf) \ + FN(svdf_state) \ + FN(tanh) -// create the request -V1_0::Request createValidTestRequest(); -V1_0::Request createInvalidTestRequest1(); -V1_0::Request createInvalidTestRequest2(); +#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ + namespace function { \ + extern std::vector examples; \ + Model createTestModel(); \ + } +FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) + +#undef FORWARD_DECLARE_GENERATED_OBJECTS + +} // namespace functional +} // namespace vts +} // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp new file mode 100644 index 0000000000..4f0697e931 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp @@ -0,0 +1,506 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void validateGetSupportedOperations(const sp& device, const std::string& message, + const V1_0::Model& model) { + SCOPED_TRACE(message + " [getSupportedOperations]"); + + Return ret = + device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); + EXPECT_TRUE(ret.isOk()); +} + +static void validatePrepareModel(const sp& device, const std::string& message, + const V1_0::Model& model) { + SCOPED_TRACE(message + " [prepareModel]"); + + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + ASSERT_EQ(nullptr, preparedModel.get()); +} + +// Primary validation function. This function will take a valid model, apply a +// mutation to it to invalidate the model, then pass it to interface calls that +// use the model. Note that the model here is passed by value, and any mutation +// to the model does not leave this function. +static void validate(const sp& device, const std::string& message, V1_0::Model model, + const std::function& mutation) { + mutation(&model); + validateGetSupportedOperations(device, message, model); + validatePrepareModel(device, message, model); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +static uint32_t addOperand(Model* model) { + return hidl_vec_push_back(&model->operands, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }); +} + +static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { + uint32_t index = addOperand(model); + model->operands[index].numberOfConsumers = 1; + model->operands[index].lifetime = lifetime; + return index; +} + +///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// + +static const int32_t invalidOperandTypes[] = { + static_cast(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental + static_cast(OperandType::OEM) - 1, // lower bound OEM + static_cast(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM +}; + +static void mutateOperandTypeTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (int32_t invalidOperandType : invalidOperandTypes) { + const std::string message = "mutateOperandTypeTest: operand " + + std::to_string(operand) + " set to value " + + std::to_string(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + model->operands[operand].type = static_cast(invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE OPERAND RANK ///////////////////////// + +static uint32_t getInvalidRank(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + return 1; + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + return 0; + default: + return 0; + } +} + +static void mutateOperandRankTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + + " has rank of " + std::to_string(invalidRank); + validate(device, message, model, [operand, invalidRank](Model* model) { + model->operands[operand].dimensions = std::vector(invalidRank, 0); + }); + } +} + +///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// + +static float getInvalidScale(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + return 1.0f; + case OperandType::TENSOR_INT32: + return -1.0f; + case OperandType::TENSOR_QUANT8_ASYMM: + return 0.0f; + default: + return 0.0f; + } +} + +static void mutateOperandScaleTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.operands[operand].type); + const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + + " has scale of " + std::to_string(invalidScale); + validate(device, message, model, [operand, invalidScale](Model* model) { + model->operands[operand].scale = invalidScale; + }); + } +} + +///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// + +static std::vector getInvalidZeroPoints(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + return {1}; + case OperandType::TENSOR_QUANT8_ASYMM: + return {-1, 256}; + default: + return {}; + } +} + +static void mutateOperandZeroPointTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector invalidZeroPoints = + getInvalidZeroPoints(model.operands[operand].type); + for (int32_t invalidZeroPoint : invalidZeroPoints) { + const std::string message = "mutateOperandZeroPointTest: operand " + + std::to_string(operand) + " has zero point of " + + std::to_string(invalidZeroPoint); + validate(device, message, model, [operand, invalidZeroPoint](Model* model) { + model->operands[operand].zeroPoint = invalidZeroPoint; + }); + } + } +} + +///////////////////////// VALIDATE EXTRA ??? ///////////////////////// + +// TODO: Operand::lifetime +// TODO: Operand::location + +///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// + +static void mutateOperand(Operand* operand, OperandType type) { + Operand newOperand = *operand; + newOperand.type = type; + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + newOperand.dimensions = hidl_vec(); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_FLOAT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_INT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_QUANT8_ASYMM: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; + break; + case OperandType::OEM: + case OperandType::TENSOR_OEM_BYTE: + default: + break; + } + *operand = newOperand; +} + +static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) { + // LSH_PROJECTION's second argument is allowed to have any type. This is the + // only operation that currently has a type that can be anything independent + // from any other type. Changing the operand type to any other type will + // result in a valid model for LSH_PROJECTION. If this is the case, skip the + // test. + for (const Operation& operation : model.operations) { + if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { + return true; + } + } + return false; +} + +static void mutateOperationOperandTypeTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + if (mutateOperationOperandTypeSkip(operand, model)) { + continue; + } + for (OperandType invalidOperandType : hidl_enum_iterator{}) { + // Do not test OEM types + if (invalidOperandType == model.operands[operand].type || + invalidOperandType == OperandType::OEM || + invalidOperandType == OperandType::TENSOR_OEM_BYTE) { + continue; + } + const std::string message = "mutateOperationOperandTypeTest: operand " + + std::to_string(operand) + " set to type " + + toString(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + mutateOperand(&model->operands[operand], invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// + +static const int32_t invalidOperationTypes[] = { + static_cast(OperationType::ADD) - 1, // lower bound fundamental + static_cast(OperationType::TANH) + 1, // upper bound fundamental + static_cast(OperationType::OEM_OPERATION) - 1, // lower bound OEM + static_cast(OperationType::OEM_OPERATION) + 1, // upper bound OEM +}; + +static void mutateOperationTypeTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (int32_t invalidOperationType : invalidOperationTypes) { + const std::string message = "mutateOperationTypeTest: operation " + + std::to_string(operation) + " set to value " + + std::to_string(invalidOperationType); + validate(device, message, model, [operation, invalidOperationType](Model* model) { + model->operations[operation].type = + static_cast(invalidOperationType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationInputOperandIndexTest(const sp& device, + const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const std::string message = "mutateOperationInputOperandIndexTest: operation " + + std::to_string(operation) + " input " + + std::to_string(input); + validate(device, message, model, [operation, input, invalidOperand](Model* model) { + model->operations[operation].inputs[input] = invalidOperand; + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationOutputOperandIndexTest(const sp& device, + const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "mutateOperationOutputOperandIndexTest: operation " + + std::to_string(operation) + " output " + + std::to_string(output); + validate(device, message, model, [operation, output, invalidOperand](Model* model) { + model->operations[operation].outputs[output] = invalidOperand; + }); + } + } +} + +///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// + +static void removeValueAndDecrementGreaterValues(hidl_vec* vec, uint32_t value) { + if (vec) { + // remove elements matching "value" + auto last = std::remove(vec->begin(), vec->end(), value); + vec->resize(std::distance(vec->begin(), last)); + + // decrement elements exceeding "value" + std::transform(vec->begin(), vec->end(), vec->begin(), + [value](uint32_t v) { return v > value ? v-- : v; }); + } +} + +static void removeOperand(Model* model, uint32_t index) { + hidl_vec_removeAt(&model->operands, index); + for (Operation& operation : model->operations) { + removeValueAndDecrementGreaterValues(&operation.inputs, index); + removeValueAndDecrementGreaterValues(&operation.outputs, index); + } + removeValueAndDecrementGreaterValues(&model->inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->outputIndexes, index); +} + +static void removeOperandTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::string message = "removeOperandTest: operand " + std::to_string(operand); + validate(device, message, model, + [operand](Model* model) { removeOperand(model, operand); }); + } +} + +///////////////////////// REMOVE OPERATION ///////////////////////// + +static void removeOperation(Model* model, uint32_t index) { + for (uint32_t operand : model->operations[index].inputs) { + model->operands[operand].numberOfConsumers--; + } + hidl_vec_removeAt(&model->operations, index); +} + +static void removeOperationTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "removeOperationTest: operation " + std::to_string(operation); + validate(device, message, model, + [operation](Model* model) { removeOperation(model, operation); }); + } +} + +///////////////////////// REMOVE OPERATION INPUT ///////////////////////// + +static void removeOperationInputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const V1_0::Operation& op = model.operations[operation]; + // CONCATENATION has at least 2 inputs, with the last element being + // INT32. Skip this test if removing one of CONCATENATION's + // inputs still produces a valid model. + if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 && + input != op.inputs.size() - 1) { + continue; + } + const std::string message = "removeOperationInputTest: operation " + + std::to_string(operation) + ", input " + + std::to_string(input); + validate(device, message, model, [operation, input](Model* model) { + uint32_t operand = model->operations[operation].inputs[input]; + model->operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->operations[operation].inputs, input); + }); + } + } +} + +///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// + +static void removeOperationOutputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "removeOperationOutputTest: operation " + + std::to_string(operation) + ", output " + + std::to_string(output); + validate(device, message, model, [operation, output](Model* model) { + hidl_vec_removeAt(&model->operations[operation].outputs, output); + }); + } + } +} + +///////////////////////// MODEL VALIDATION ///////////////////////// + +// TODO: remove model input +// TODO: remove model output +// TODO: add unused operation + +///////////////////////// ADD OPERATION INPUT ///////////////////////// + +static void addOperationInputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "addOperationInputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); + hidl_vec_push_back(&model->operations[operation].inputs, index); + hidl_vec_push_back(&model->inputIndexes, index); + }); + } +} + +///////////////////////// ADD OPERATION OUTPUT ///////////////////////// + +static void addOperationOutputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = + "addOperationOutputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); + hidl_vec_push_back(&model->operations[operation].outputs, index); + hidl_vec_push_back(&model->outputIndexes, index); + }); + } +} + +////////////////////////// ENTRY POINT ////////////////////////////// + +void ValidationTest::validateModel(const V1_0::Model& model) { + mutateOperandTypeTest(device, model); + mutateOperandRankTest(device, model); + mutateOperandScaleTest(device, model); + mutateOperandZeroPointTest(device, model); + mutateOperationOperandTypeTest(device, model); + mutateOperationTypeTest(device, model); + mutateOperationInputOperandIndexTest(device, model); + mutateOperationOutputOperandIndexTest(device, model); + removeOperandTest(device, model); + removeOperationTest(device, model); + removeOperationInputTest(device, model); + removeOperationOutputTest(device, model); + addOperationInputTest(device, model); + addOperationOutputTest(device, model); +} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp new file mode 100644 index 0000000000..08f2613c99 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" +#include "TestHarness.h" +#include "Utils.h" + +#include +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::hidl::memory::V1_0::IMemory; +using generated_tests::MixedTyped; +using generated_tests::MixedTypedExampleType; +using generated_tests::for_all; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void createPreparedModel(const sp& device, const V1_0::Model& model, + sp* preparedModel) { + ASSERT_NE(nullptr, preparedModel); + + // see if service can handle model + bool fullySupportsModel = false; + Return supportedOpsLaunchStatus = device->getSupportedOperations( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); + + // launch prepare model + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + *preparedModel = preparedModelCallback->getPreparedModel(); + + // The getSupportedOperations call returns a list of operations that are + // guaranteed not to fail if prepareModel is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Unable to test Request validation because vendor service " + "cannot prepare model that it does not support." + << std::endl; + return; + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); +} + +// Primary validation function. This function will take a valid request, apply a +// mutation to it to invalidate the request, then pass it to interface calls +// that use the request. Note that the request here is passed by value, and any +// mutation to the request does not leave this function. +static void validate(const sp& preparedModel, const std::string& message, + Request request, const std::function& mutation) { + mutation(&request); + SCOPED_TRACE(message + " [execute]"); + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +///////////////////////// REMOVE INPUT //////////////////////////////////// + +static void removeInputTest(const sp& preparedModel, const Request& request) { + for (size_t input = 0; input < request.inputs.size(); ++input) { + const std::string message = "removeInput: removed input " + std::to_string(input); + validate(preparedModel, message, request, + [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); + } +} + +///////////////////////// REMOVE OUTPUT //////////////////////////////////// + +static void removeOutputTest(const sp& preparedModel, const Request& request) { + for (size_t output = 0; output < request.outputs.size(); ++output) { + const std::string message = "removeOutput: removed Output " + std::to_string(output); + validate(preparedModel, message, request, + [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); + } +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +std::vector createRequests(const std::vector& examples) { + const uint32_t INPUT = 0; + const uint32_t OUTPUT = 1; + + std::vector requests; + + for (auto& example : examples) { + const MixedTyped& inputs = example.first; + const MixedTyped& outputs = example.second; + + std::vector inputs_info, outputs_info; + uint32_t inputSize = 0, outputSize = 0; + + // This function only partially specifies the metadata (vector of RequestArguments). + // The contents are copied over below. + for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { + if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + RequestArgument arg_empty = { + .hasNoValue = true, + }; + inputs_info[index] = s ? arg : arg_empty; + inputSize += s; + }); + // Compute offset for inputs 1 and so on + { + size_t offset = 0; + for (auto& i : inputs_info) { + if (!i.hasNoValue) i.location.offset = offset; + offset += i.location.length; + } + } + + // Go through all outputs, initialize RequestArgument descriptors + for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { + if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + outputs_info[index] = arg; + outputSize += s; + }); + // Compute offset for outputs 1 and so on + { + size_t offset = 0; + for (auto& i : outputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + std::vector pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; + if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { + return {}; + } + + // map pool + sp inputMemory = mapMemory(pools[INPUT]); + if (inputMemory == nullptr) { + return {}; + } + char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); + if (inputPtr == nullptr) { + return {}; + } + + // initialize pool + inputMemory->update(); + for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { + char* begin = (char*)p; + char* end = begin + s; + // TODO: handle more than one input + std::copy(begin, end, inputPtr + inputs_info[index].location.offset); + }); + inputMemory->commit(); + + requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); + } + + return requests; +} + +void ValidationTest::validateRequests(const V1_0::Model& model, + const std::vector& requests) { + // create IPreparedModel + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); + if (preparedModel == nullptr) { + return; + } + + // validate each request + for (const Request& request : requests) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); + } +} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/ValidationTests.cpp b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp new file mode 100644 index 0000000000..98fc1c59f4 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "Models.h" +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +// forward declarations +std::vector createRequests(const std::vector& examples); + +// generate validation tests +#define VTS_CURRENT_TEST_CASE(TestName) \ + TEST_F(ValidationTest, TestName) { \ + const Model model = TestName::createTestModel(); \ + const std::vector requests = createRequests(TestName::examples); \ + validateModel(model); \ + validateRequests(model, requests); \ + } + +FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) + +#undef VTS_CURRENT_TEST_CASE + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp similarity index 64% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp rename to neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp index b14fb2c4c8..1ff3b66808 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp @@ -16,15 +16,7 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_0.h" -#include "Utils.h" - -#include - -using ::android::hardware::hidl_memory; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; +#include "VtsHalNeuralnetworks.h" namespace android { namespace hardware { @@ -33,11 +25,6 @@ namespace V1_0 { namespace vts { namespace functional { -// allocator helper -hidl_memory allocateSharedMemory(int64_t size) { - return nn::allocateSharedMemory(size); -} - // A class for test environment setup NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} @@ -51,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { } void NeuralnetworksHidlEnvironment::registerTestServices() { - registerTestService(); + registerTestService(); } // The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} + NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} void NeuralnetworksHidlTest::SetUp() { - device = ::testing::VtsHalHidlTargetTestBase::getService( + ::testing::VtsHalHidlTargetTestBase::SetUp(); + device = ::testing::VtsHalHidlTargetTestBase::getService( NeuralnetworksHidlEnvironment::getInstance()); ASSERT_NE(nullptr, device.get()); } -void NeuralnetworksHidlTest::TearDown() {} +void NeuralnetworksHidlTest::TearDown() { + device = nullptr; + ::testing::VtsHalHidlTargetTestBase::TearDown(); +} } // namespace functional } // namespace vts + +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { + return os << toString(deviceStatus); +} + } // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android + +using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment; + +int main(int argc, char** argv) { + ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); + ::testing::InitGoogleTest(&argc, argv); + NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); + + int status = RUN_ALL_TESTS(); + return status; +} diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h similarity index 60% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h rename to neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h index fbb1607478..e79129b09f 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h @@ -18,16 +18,15 @@ #define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H #include -#include -#include -#include #include -#include #include #include + +#include #include -#include +#include +#include namespace android { namespace hardware { @@ -36,47 +35,47 @@ namespace V1_0 { namespace vts { namespace functional { -hidl_memory allocateSharedMemory(int64_t size); - // A class for test environment setup class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); NeuralnetworksHidlEnvironment(); - NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; - NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; + ~NeuralnetworksHidlEnvironment() override; public: - ~NeuralnetworksHidlEnvironment() override; static NeuralnetworksHidlEnvironment* getInstance(); void registerTestServices() override; }; // The main test class for NEURALNETWORKS HIDL HAL. class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); + public: + NeuralnetworksHidlTest(); ~NeuralnetworksHidlTest() override; void SetUp() override; void TearDown() override; - sp device; + protected: + sp device; }; + +// Tag for the validation tests +class ValidationTest : public NeuralnetworksHidlTest { + protected: + void validateModel(const Model& model); + void validateRequests(const Model& model, const std::vector& request); +}; + +// Tag for the generated tests +class GeneratedTest : public NeuralnetworksHidlTest {}; + } // namespace functional } // namespace vts // pretty-print values for error messages - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::ErrorStatus errorStatus) { - return os << toString(errorStatus); -} - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::DeviceStatus deviceStatus) { - return os << toString(deviceStatus); -} +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); } // namespace V1_0 } // namespace neuralnetworks diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp deleted file mode 100644 index 59e5b80612..0000000000 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "neuralnetworks_hidl_hal_test" - -#include "VtsHalNeuralnetworksV1_0.h" - -#include "Callbacks.h" -#include "Models.h" -#include "TestHarness.h" - -#include -#include -#include - -using ::android::hardware::neuralnetworks::V1_0::IDevice; -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Model; -using ::android::hardware::neuralnetworks::V1_0::OperationType; -using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; - -static void doPrepareModelShortcut(const sp& device, sp* preparedModel) { - ASSERT_NE(nullptr, preparedModel); - Model model = createValidTestModel_1_0(); - - // see if service can handle model - bool fullySupportsModel = false; - Return supportedOpsLaunchStatus = device->getSupportedOperations( - model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { - ASSERT_EQ(ErrorStatus::NONE, status); - ASSERT_NE(0ul, supported.size()); - fullySupportsModel = - std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); - }); - ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); - - // launch prepare model - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); - - // retrieve prepared model - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - *preparedModel = preparedModelCallback->getPreparedModel(); - - // The getSupportedOperations call returns a list of operations that are - // guaranteed not to fail if prepareModel is called, and - // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. - // If a driver has any doubt that it can prepare an operation, it must - // return false. So here, if a driver isn't sure if it can support an - // operation, but reports that it successfully prepared the model, the test - // can continue. - if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { - ASSERT_EQ(nullptr, preparedModel->get()); - LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " - "prepare model that it does not support."; - std::cout << "[ ] Early termination of test because vendor service cannot " - "prepare model that it does not support." - << std::endl; - return; - } - ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); - ASSERT_NE(nullptr, preparedModel->get()); -} - -// create device test -TEST_F(NeuralnetworksHidlTest, CreateDevice) {} - -// status test -TEST_F(NeuralnetworksHidlTest, StatusTest) { - Return status = device->getStatus(); - ASSERT_TRUE(status.isOk()); - EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); -} - -// initialization -TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { - Return ret = - device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LT(0.0f, capabilities.float32Performance.execTime); - EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); - EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations positive test -TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { - Model model = createValidTestModel_1_0(); - Return ret = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_EQ(model.operations.size(), supported.size()); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 1 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { - Model model = createInvalidTestModel1_1_0(); - Return ret = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 2 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { - Model model = createInvalidTestModel2_1_0(); - Return ret = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// prepare simple model positive test -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { - sp preparedModel; - doPrepareModelShortcut(device, &preparedModel); -} - -// prepare simple model negative test 1 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { - Model model = createInvalidTestModel1_1_0(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// prepare simple model negative test 2 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { - Model model = createInvalidTestModel2_1_0(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// execute simple graph positive test -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { - std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; - std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; - const uint32_t OUTPUT = 1; - - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createValidTestRequest(); - - auto postWork = [&] { - sp outputMemory = mapMemory(request.pools[OUTPUT]); - if (outputMemory == nullptr) { - return false; - } - float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - if (outputPtr == nullptr) { - return false; - } - outputMemory->read(); - std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); - outputMemory->commit(); - return true; - }; - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - executionCallback->on_finish(postWork); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); - EXPECT_EQ(expectedData, outputData); -} - -// execute simple graph negative test 1 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest1(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -// execute simple graph negative test 2 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest2(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android - -using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment; - -int main(int argc, char** argv) { - ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); - ::testing::InitGoogleTest(&argc, argv); - NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); - - int status = RUN_ALL_TESTS(); - return status; -} diff --git a/neuralnetworks/1.1/IDevice.hal b/neuralnetworks/1.1/IDevice.hal index d2c48433bb..1335bde193 100644 --- a/neuralnetworks/1.1/IDevice.hal +++ b/neuralnetworks/1.1/IDevice.hal @@ -102,6 +102,8 @@ interface IDevice extends @1.0::IDevice { * Multiple threads can call prepareModel on the same model concurrently. * * @param model The model to be prepared for execution. + * @param preference Indicates the intended execution behavior of a prepared + * model. * @param callback A callback object used to return the error status of * preparing the model for execution and the prepared model * if successful, nullptr otherwise. The callback object's @@ -115,6 +117,7 @@ interface IDevice extends @1.0::IDevice { * - INVALID_ARGUMENT if one of the input arguments is * invalid */ - prepareModel_1_1(Model model, IPreparedModelCallback callback) + prepareModel_1_1(Model model, ExecutionPreference preference, + IPreparedModelCallback callback) generates (ErrorStatus status); }; diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal index 1d470d636f..8290fbbb06 100644 --- a/neuralnetworks/1.1/types.hal +++ b/neuralnetworks/1.1/types.hal @@ -27,25 +27,24 @@ import @1.0::PerformanceInfo; */ enum OperationType : @1.0::OperationType { /** - * BatchToSpace for N-D tensors. + * BatchToSpace for N-dimensional tensors. * - * This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape + * This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape * block_shape + [batch], interleaves these blocks back into the grid defined by the * spatial dimensions [1, ..., M], to obtain a result with the same rank as the input. - * The spatial dimensions of this intermediate result are then optionally cropped - * according to the amount to crop to produce the output. + * * This is the reverse of SpaceToBatch. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} - * Supported tensor rank: up to 4 + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * + * Supported tensor rank: 4 * * Inputs: - * 0: An n-D tensor, specifying the input. + * 0: An n-D tensor, specifying the tensor to be reshaped * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the * input tensor. All values must be >= 1. - * 2: A 1-D Tensor of type TENSOR_INT32, the amount to crop for each spatial diemension of the - * input tensor. All values must be >= 0. * * Outputs: * 0: A tensor of the same type as input0. @@ -53,9 +52,9 @@ enum OperationType : @1.0::OperationType { BATCH_TO_SPACE_ND = 29, /** - * Divides the second tensor from the first tensor, element-wise. + * Element-wise division of two tensors. * - * Takes two input tensors of identical OperandType and compatible dimensions. The output + * Takes two input tensors of identical type and compatible dimensions. The output * is the result of dividing the first input tensor by the second, optionally * modified by an activation function. * @@ -71,7 +70,9 @@ enum OperationType : @1.0::OperationType { * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * Supported tensor rank: up to 4 * * Inputs: @@ -88,15 +89,17 @@ enum OperationType : @1.0::OperationType { /** * Computes the mean of elements across dimensions of a tensor. * - * Reduces input tensor along the dimensions given in axis. Unless keep_dims is true, - * the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is - * true, the reduced dimensions are retained with length 1. + * Reduces the input tensor along the given dimensions to reduce. Unless keep_dims + * is true, the rank of the tensor is reduced by 1 for each entry in axis. + * If keep_dims is true, the reduced dimensions are retained with length 1. * - * If axis has no entries, all dimensions are reduced, and a tensor with a single - * element is returned. + * If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with + * a single element is returned. + * + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} * Supported tensor rank: up to 4 * * Inputs: @@ -115,14 +118,18 @@ enum OperationType : @1.0::OperationType { * * This operation pads a tensor according to the specified paddings. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: An n-D tensor, specifying the input. - * 1: A 2-D Tensor of type TENSOR_INT32. The paddings, before and after for each spatial dimension - * of the input tensor. + * 0: An n-D tensor, specifying the tensor to be padded. + * 1: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial dimension of the + * input tensor. The shape of the tensor must be {rank(input0), 2}. + * padding[i, 0] specifies the number of element to be padded in the front of dimension i. + * padding[i, 1] specifies the number of element to be padded after the end of dimension i. * * Outputs: * 0: A tensor of the same type as input0. @@ -130,7 +137,7 @@ enum OperationType : @1.0::OperationType { PAD = 32, /** - * SpaceToBatch for N-D tensors. + * SpaceToBatch for N-Dimensional tensors. * * This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks * of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that @@ -139,16 +146,20 @@ enum OperationType : @1.0::OperationType { * batch position. Prior to division into blocks, the spatial dimensions of the input are * optionally zero padded according to paddings. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} - * Supported tensor rank: up to 4 + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * + * Supported tensor rank: 4 * * Inputs: * 0: An n-D tensor, specifying the input. * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the * input tensor. All values must be >= 1. * 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the - * input tensor. All values must be >= 0. + * input tensor. All values must be >= 0. The shape of the tensor must be {rank(input0), 2}. + * padding[i, 0] specifies the number of element to be padded in the front of dimension i. + * padding[i, 1] specifies the number of element to be padded after the end of dimension i. * * Outputs: * 0: A tensor of the same type as input0. @@ -160,17 +171,20 @@ enum OperationType : @1.0::OperationType { * * Given a tensor input, this operation returns a tensor of the same type with all * dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, - * you can remove specific size 1 dimensions by specifying axis. + * you can remove specific size 1 dimensions by specifying the axes (input1). + * + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} * Supported tensor rank: up to 4 * * Inputs: - * 0: An n-D tensor, specifying the input. - * 1: An 1-D Tensor of type TENSOR_INT32. The dimensions to squeeze. If None (the default), - * squeezes all dimensions. If specified, only squeezes the dimensions listed. The dimension - * index starts at 0. It is an error to squeeze a dimension that is not 1. + * 0: An n-D tensor, the tensor to be squeezed. + * 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified + * only squeezes the dimensions listed. Otherwise, squeezes all dimensions. + * The dimension index starts at 0. An error must be reported if squeezing a dimension that + * is not 1. * * Outputs: * 0: A tensor of the same type as input0. Contains the same data as input, but has one or more @@ -181,23 +195,25 @@ enum OperationType : @1.0::OperationType { /** * Extracts a strided slice of a tensor. * - * This op extracts a slice of size (end-begin)/stride from the given input tensor. - * Starting at the location specified by begin the slice continues by adding + * Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given + * input tensor. Starting at the location specified by begin the slice continues by adding * stride to the index until all dimensions are not less than end. Note that a stride can * be negative, which causes a reverse slice. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: An n-D tensor, specifying the input. + * 0: An n-D tensor, specifying the tensor to be sliced. * 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input - * tensor to be sliced. + * tensor to be sliced. The length must be of rank(input0). * 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input - * tensor to be sliced. + * tensor to be sliced. The length must be of rank(input0). * 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input - * tensor to be sliced. + * tensor to be sliced. The length must be of rank(input0). * * Outputs: * 0: A tensor of the same type as input0. @@ -205,7 +221,7 @@ enum OperationType : @1.0::OperationType { STRIDED_SLICE = 35, /** - * Subtracts the second tensor from the first tensor, element-wise. + * Element-wise subtraction of two tensors. * * Takes two input tensors of identical type and compatible dimensions. The output * is the result of subtracting the second input tensor from the first one, optionally @@ -223,7 +239,9 @@ enum OperationType : @1.0::OperationType { * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * Supported tensor rank: up to 4 * * Inputs: @@ -240,18 +258,20 @@ enum OperationType : @1.0::OperationType { /** * Transposes the input tensor, permuting the dimensions according to the perm tensor. * - * The returned tensor's dimension i must correspond to the input dimension perm[i]. + * The returned tensor's dimension i corresponds to the input dimension perm[i]. * If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor. * Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. * - * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} - * {@link OperandType::TENSOR_QUANT8_ASYMM} + * Supported tensor types: + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * Supported tensor rank: up to 4 * * Inputs: - * 0: An n-D tensor, specifying the input. - * 1: A 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the input - * tensor. + * 0: An n-D tensor, specifying the tensor to be transposed. + * 1: An optional 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the + * input tensor. * * Outputs: * 0: A tensor of the same type as input0. @@ -362,3 +382,24 @@ struct Model { */ bool relaxComputationFloat32toFloat16; }; + +/** + * Execution preferences. + */ +enum ExecutionPreference : int32_t { + /** + * Prefer executing in a way that minimizes battery drain. + * This is desirable for compilations that will be executed often. + */ + LOW_POWER = 0, + /** + * Prefer returning a single answer as fast as possible, even if this causes + * more power consumption. + */ + FAST_SINGLE_ANSWER = 1, + /** + * Prefer maximizing the throughput of successive frames, for example when + * processing successive frames coming from the camera. + */ + SUSTAINED_SPEED = 2, +}; diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp index 623b44103a..f755c20be5 100644 --- a/neuralnetworks/1.1/vts/functional/Android.bp +++ b/neuralnetworks/1.1/vts/functional/Android.bp @@ -17,9 +17,12 @@ cc_test { name: "VtsHalNeuralnetworksV1_1TargetTest", srcs: [ - "VtsHalNeuralnetworksV1_1.cpp", - "VtsHalNeuralnetworksV1_1BasicTest.cpp", - "VtsHalNeuralnetworksV1_1GeneratedTest.cpp", + "BasicTests.cpp", + "GeneratedTests.cpp", + "ValidateModel.cpp", + "ValidateRequest.cpp", + "ValidationTests.cpp", + "VtsHalNeuralnetworks.cpp", ], defaults: ["VtsHalTargetTestDefaults"], static_libs: [ @@ -36,4 +39,13 @@ cc_test { "libneuralnetworks_generated_test_harness_headers", "libneuralnetworks_generated_tests", ], + // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal + // error in ld.gold. + arch: { + arm: { + sanitize: { + never: true, + }, + }, + }, } diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp new file mode 100644 index 0000000000..ed59a2dd8c --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +// create device test +TEST_F(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_F(NeuralnetworksHidlTest, StatusTest) { + Return status = device->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); +} + +// initialization +TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { + Return ret = + device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0.0f, capabilities.float32Performance.execTime); + EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); + EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime); + EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage); + }); + EXPECT_TRUE(ret.isOk()); +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp similarity index 53% rename from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp rename to neuralnetworks/1.1/vts/functional/GeneratedTests.cpp index 025d9feda3..1f1cc7af9d 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp +++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp @@ -16,54 +16,33 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_1.h" +#include "VtsHalNeuralnetworks.h" #include "Callbacks.h" #include "TestHarness.h" +#include "Utils.h" #include -#include -#include #include #include -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Operand; -using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; -using ::android::hardware::neuralnetworks::V1_0::OperandType; -using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_1::IDevice; -using ::android::hardware::neuralnetworks::V1_1::Model; -using ::android::hardware::neuralnetworks::V1_1::Operation; -using ::android::hardware::neuralnetworks::V1_1::OperationType; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - namespace android { namespace hardware { namespace neuralnetworks { namespace generated_tests { using ::generated_tests::MixedTypedExampleType; -extern void Execute(sp&, std::function, std::function, - const std::vector&); +extern void Execute(const sp&, std::function, + std::function, const std::vector&); } // namespace generated_tests namespace V1_1 { namespace vts { namespace functional { + using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::nn::allocateSharedMemory; // Mixed-typed examples typedef generated_tests::MixedTypedExampleType MixedTypedExample; diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h new file mode 100644 index 0000000000..c3cadb5fe2 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/Models.h @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H +#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "TestHarness.h" + +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +using MixedTypedExample = generated_tests::MixedTypedExampleType; + +#define FOR_EACH_TEST_MODEL(FN) \ + FN(add) \ + FN(add_broadcast_quant8) \ + FN(add_quant8) \ + FN(add_relaxed) \ + FN(avg_pool_float_1) \ + FN(avg_pool_float_1_relaxed) \ + FN(avg_pool_float_2) \ + FN(avg_pool_float_2_relaxed) \ + FN(avg_pool_float_3) \ + FN(avg_pool_float_3_relaxed) \ + FN(avg_pool_float_4) \ + FN(avg_pool_float_4_relaxed) \ + FN(avg_pool_float_5) \ + FN(avg_pool_quant8_1) \ + FN(avg_pool_quant8_2) \ + FN(avg_pool_quant8_3) \ + FN(avg_pool_quant8_4) \ + FN(avg_pool_quant8_5) \ + FN(batch_to_space) \ + FN(batch_to_space_float_1) \ + FN(batch_to_space_quant8_1) \ + FN(concat_float_1) \ + FN(concat_float_1_relaxed) \ + FN(concat_float_2) \ + FN(concat_float_2_relaxed) \ + FN(concat_float_3) \ + FN(concat_float_3_relaxed) \ + FN(concat_quant8_1) \ + FN(concat_quant8_2) \ + FN(concat_quant8_3) \ + FN(conv_1_h3_w2_SAME) \ + FN(conv_1_h3_w2_SAME_relaxed) \ + FN(conv_1_h3_w2_VALID) \ + FN(conv_1_h3_w2_VALID_relaxed) \ + FN(conv_3_h3_w2_SAME) \ + FN(conv_3_h3_w2_SAME_relaxed) \ + FN(conv_3_h3_w2_VALID) \ + FN(conv_3_h3_w2_VALID_relaxed) \ + FN(conv_float) \ + FN(conv_float_2) \ + FN(conv_float_channels) \ + FN(conv_float_channels_relaxed) \ + FN(conv_float_channels_weights_as_inputs) \ + FN(conv_float_channels_weights_as_inputs_relaxed) \ + FN(conv_float_large) \ + FN(conv_float_large_relaxed) \ + FN(conv_float_large_weights_as_inputs) \ + FN(conv_float_large_weights_as_inputs_relaxed) \ + FN(conv_float_relaxed) \ + FN(conv_float_weights_as_inputs) \ + FN(conv_float_weights_as_inputs_relaxed) \ + FN(conv_quant8) \ + FN(conv_quant8_2) \ + FN(conv_quant8_channels) \ + FN(conv_quant8_channels_weights_as_inputs) \ + FN(conv_quant8_large) \ + FN(conv_quant8_large_weights_as_inputs) \ + FN(conv_quant8_overflow) \ + FN(conv_quant8_overflow_weights_as_inputs) \ + FN(conv_quant8_weights_as_inputs) \ + FN(depth_to_space_float_1) \ + FN(depth_to_space_float_1_relaxed) \ + FN(depth_to_space_float_2) \ + FN(depth_to_space_float_2_relaxed) \ + FN(depth_to_space_float_3) \ + FN(depth_to_space_float_3_relaxed) \ + FN(depth_to_space_quant8_1) \ + FN(depth_to_space_quant8_2) \ + FN(depthwise_conv) \ + FN(depthwise_conv2d_float) \ + FN(depthwise_conv2d_float_2) \ + FN(depthwise_conv2d_float_large) \ + FN(depthwise_conv2d_float_large_2) \ + FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ + FN(depthwise_conv2d_float_large_relaxed) \ + FN(depthwise_conv2d_float_large_weights_as_inputs) \ + FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \ + FN(depthwise_conv2d_float_weights_as_inputs) \ + FN(depthwise_conv2d_quant8) \ + FN(depthwise_conv2d_quant8_2) \ + FN(depthwise_conv2d_quant8_large) \ + FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ + FN(depthwise_conv2d_quant8_weights_as_inputs) \ + FN(depthwise_conv_relaxed) \ + FN(dequantize) \ + FN(div) \ + FN(embedding_lookup) \ + FN(embedding_lookup_relaxed) \ + FN(floor) \ + FN(floor_relaxed) \ + FN(fully_connected_float) \ + FN(fully_connected_float_2) \ + FN(fully_connected_float_large) \ + FN(fully_connected_float_large_weights_as_inputs) \ + FN(fully_connected_float_relaxed) \ + FN(fully_connected_float_weights_as_inputs) \ + FN(fully_connected_float_weights_as_inputs_relaxed) \ + FN(fully_connected_quant8) \ + FN(fully_connected_quant8_2) \ + FN(fully_connected_quant8_large) \ + FN(fully_connected_quant8_large_weights_as_inputs) \ + FN(fully_connected_quant8_weights_as_inputs) \ + FN(hashtable_lookup_float) \ + FN(hashtable_lookup_float_relaxed) \ + FN(hashtable_lookup_quant8) \ + FN(l2_normalization) \ + FN(l2_normalization_2) \ + FN(l2_normalization_large) \ + FN(l2_normalization_large_relaxed) \ + FN(l2_normalization_relaxed) \ + FN(l2_pool_float) \ + FN(l2_pool_float_2) \ + FN(l2_pool_float_large) \ + FN(l2_pool_float_relaxed) \ + FN(local_response_norm_float_1) \ + FN(local_response_norm_float_1_relaxed) \ + FN(local_response_norm_float_2) \ + FN(local_response_norm_float_2_relaxed) \ + FN(local_response_norm_float_3) \ + FN(local_response_norm_float_3_relaxed) \ + FN(local_response_norm_float_4) \ + FN(local_response_norm_float_4_relaxed) \ + FN(logistic_float_1) \ + FN(logistic_float_1_relaxed) \ + FN(logistic_float_2) \ + FN(logistic_float_2_relaxed) \ + FN(logistic_quant8_1) \ + FN(logistic_quant8_2) \ + FN(lsh_projection) \ + FN(lsh_projection_2) \ + FN(lsh_projection_2_relaxed) \ + FN(lsh_projection_relaxed) \ + FN(lsh_projection_weights_as_inputs) \ + FN(lsh_projection_weights_as_inputs_relaxed) \ + FN(lstm) \ + FN(lstm2) \ + FN(lstm2_relaxed) \ + FN(lstm2_state) \ + FN(lstm2_state2) \ + FN(lstm2_state2_relaxed) \ + FN(lstm2_state_relaxed) \ + FN(lstm3) \ + FN(lstm3_relaxed) \ + FN(lstm3_state) \ + FN(lstm3_state2) \ + FN(lstm3_state2_relaxed) \ + FN(lstm3_state3) \ + FN(lstm3_state3_relaxed) \ + FN(lstm3_state_relaxed) \ + FN(lstm_relaxed) \ + FN(lstm_state) \ + FN(lstm_state2) \ + FN(lstm_state2_relaxed) \ + FN(lstm_state_relaxed) \ + FN(max_pool_float_1) \ + FN(max_pool_float_1_relaxed) \ + FN(max_pool_float_2) \ + FN(max_pool_float_2_relaxed) \ + FN(max_pool_float_3) \ + FN(max_pool_float_3_relaxed) \ + FN(max_pool_float_4) \ + FN(max_pool_quant8_1) \ + FN(max_pool_quant8_2) \ + FN(max_pool_quant8_3) \ + FN(max_pool_quant8_4) \ + FN(mean) \ + FN(mean_float_1) \ + FN(mean_float_2) \ + FN(mean_quant8_1) \ + FN(mean_quant8_2) \ + FN(mobilenet_224_gender_basic_fixed) \ + FN(mobilenet_224_gender_basic_fixed_relaxed) \ + FN(mobilenet_quantized) \ + FN(mul) \ + FN(mul_broadcast_quant8) \ + FN(mul_quant8) \ + FN(mul_relaxed) \ + FN(mul_relu) \ + FN(mul_relu_relaxed) \ + FN(pad) \ + FN(pad_float_1) \ + FN(relu1_float_1) \ + FN(relu1_float_1_relaxed) \ + FN(relu1_float_2) \ + FN(relu1_float_2_relaxed) \ + FN(relu1_quant8_1) \ + FN(relu1_quant8_2) \ + FN(relu6_float_1) \ + FN(relu6_float_1_relaxed) \ + FN(relu6_float_2) \ + FN(relu6_float_2_relaxed) \ + FN(relu6_quant8_1) \ + FN(relu6_quant8_2) \ + FN(relu_float_1) \ + FN(relu_float_1_relaxed) \ + FN(relu_float_2) \ + FN(relu_quant8_1) \ + FN(relu_quant8_2) \ + FN(reshape) \ + FN(reshape_quant8) \ + FN(reshape_quant8_weights_as_inputs) \ + FN(reshape_relaxed) \ + FN(reshape_weights_as_inputs) \ + FN(reshape_weights_as_inputs_relaxed) \ + FN(resize_bilinear) \ + FN(resize_bilinear_2) \ + FN(resize_bilinear_relaxed) \ + FN(rnn) \ + FN(rnn_relaxed) \ + FN(rnn_state) \ + FN(rnn_state_relaxed) \ + FN(softmax_float_1) \ + FN(softmax_float_1_relaxed) \ + FN(softmax_float_2) \ + FN(softmax_float_2_relaxed) \ + FN(softmax_quant8_1) \ + FN(softmax_quant8_2) \ + FN(space_to_batch) \ + FN(space_to_batch_float_1) \ + FN(space_to_batch_float_2) \ + FN(space_to_batch_float_3) \ + FN(space_to_batch_quant8_1) \ + FN(space_to_batch_quant8_2) \ + FN(space_to_batch_quant8_3) \ + FN(space_to_depth_float_1) \ + FN(space_to_depth_float_1_relaxed) \ + FN(space_to_depth_float_2) \ + FN(space_to_depth_float_2_relaxed) \ + FN(space_to_depth_float_3) \ + FN(space_to_depth_float_3_relaxed) \ + FN(space_to_depth_quant8_1) \ + FN(space_to_depth_quant8_2) \ + FN(squeeze) \ + FN(squeeze_float_1) \ + FN(squeeze_quant8_1) \ + FN(strided_slice) \ + FN(strided_slice_float_1) \ + FN(strided_slice_float_10) \ + FN(strided_slice_float_2) \ + FN(strided_slice_float_3) \ + FN(strided_slice_float_4) \ + FN(strided_slice_float_5) \ + FN(strided_slice_float_6) \ + FN(strided_slice_float_7) \ + FN(strided_slice_float_8) \ + FN(strided_slice_float_9) \ + FN(strided_slice_qaunt8_10) \ + FN(strided_slice_quant8_1) \ + FN(strided_slice_quant8_2) \ + FN(strided_slice_quant8_3) \ + FN(strided_slice_quant8_4) \ + FN(strided_slice_quant8_5) \ + FN(strided_slice_quant8_6) \ + FN(strided_slice_quant8_7) \ + FN(strided_slice_quant8_8) \ + FN(strided_slice_quant8_9) \ + FN(sub) \ + FN(svdf) \ + FN(svdf2) \ + FN(svdf2_relaxed) \ + FN(svdf_relaxed) \ + FN(svdf_state) \ + FN(svdf_state_relaxed) \ + FN(tanh) \ + FN(tanh_relaxed) \ + FN(transpose) \ + FN(transpose_float_1) \ + FN(transpose_quant8_1) + +#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ + namespace function { \ + extern std::vector examples; \ + Model createTestModel(); \ + } + +FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) + +#undef FORWARD_DECLARE_GENERATED_OBJECTS + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp new file mode 100644 index 0000000000..3aa55f8c7d --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp @@ -0,0 +1,539 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { + +using V1_0::IPreparedModel; +using V1_0::Operand; +using V1_0::OperandLifeTime; +using V1_0::OperandType; + +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void validateGetSupportedOperations(const sp& device, const std::string& message, + const V1_1::Model& model) { + SCOPED_TRACE(message + " [getSupportedOperations_1_1]"); + + Return ret = + device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); + EXPECT_TRUE(ret.isOk()); +} + +static void validatePrepareModel(const sp& device, const std::string& message, + const V1_1::Model& model, ExecutionPreference preference) { + SCOPED_TRACE(message + " [prepareModel_1_1]"); + + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preference, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + ASSERT_EQ(nullptr, preparedModel.get()); +} + +static bool validExecutionPreference(ExecutionPreference preference) { + return preference == ExecutionPreference::LOW_POWER || + preference == ExecutionPreference::FAST_SINGLE_ANSWER || + preference == ExecutionPreference::SUSTAINED_SPEED; +} + +// Primary validation function. This function will take a valid model, apply a +// mutation to it to invalidate the model, then pass it to interface calls that +// use the model. Note that the model here is passed by value, and any mutation +// to the model does not leave this function. +static void validate(const sp& device, const std::string& message, V1_1::Model model, + const std::function& mutation, + ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) { + mutation(&model); + if (validExecutionPreference(preference)) { + validateGetSupportedOperations(device, message, model); + } + validatePrepareModel(device, message, model, preference); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +static uint32_t addOperand(Model* model) { + return hidl_vec_push_back(&model->operands, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }); +} + +static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { + uint32_t index = addOperand(model); + model->operands[index].numberOfConsumers = 1; + model->operands[index].lifetime = lifetime; + return index; +} + +///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// + +static const int32_t invalidOperandTypes[] = { + static_cast(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental + static_cast(OperandType::OEM) - 1, // lower bound OEM + static_cast(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM +}; + +static void mutateOperandTypeTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (int32_t invalidOperandType : invalidOperandTypes) { + const std::string message = "mutateOperandTypeTest: operand " + + std::to_string(operand) + " set to value " + + std::to_string(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + model->operands[operand].type = static_cast(invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE OPERAND RANK ///////////////////////// + +static uint32_t getInvalidRank(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + return 1; + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + return 0; + default: + return 0; + } +} + +static void mutateOperandRankTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + + " has rank of " + std::to_string(invalidRank); + validate(device, message, model, [operand, invalidRank](Model* model) { + model->operands[operand].dimensions = std::vector(invalidRank, 0); + }); + } +} + +///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// + +static float getInvalidScale(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + return 1.0f; + case OperandType::TENSOR_INT32: + return -1.0f; + case OperandType::TENSOR_QUANT8_ASYMM: + return 0.0f; + default: + return 0.0f; + } +} + +static void mutateOperandScaleTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.operands[operand].type); + const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + + " has scale of " + std::to_string(invalidScale); + validate(device, message, model, [operand, invalidScale](Model* model) { + model->operands[operand].scale = invalidScale; + }); + } +} + +///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// + +static std::vector getInvalidZeroPoints(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + return {1}; + case OperandType::TENSOR_QUANT8_ASYMM: + return {-1, 256}; + default: + return {}; + } +} + +static void mutateOperandZeroPointTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector invalidZeroPoints = + getInvalidZeroPoints(model.operands[operand].type); + for (int32_t invalidZeroPoint : invalidZeroPoints) { + const std::string message = "mutateOperandZeroPointTest: operand " + + std::to_string(operand) + " has zero point of " + + std::to_string(invalidZeroPoint); + validate(device, message, model, [operand, invalidZeroPoint](Model* model) { + model->operands[operand].zeroPoint = invalidZeroPoint; + }); + } + } +} + +///////////////////////// VALIDATE EXTRA ??? ///////////////////////// + +// TODO: Operand::lifetime +// TODO: Operand::location + +///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// + +static void mutateOperand(Operand* operand, OperandType type) { + Operand newOperand = *operand; + newOperand.type = type; + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + newOperand.dimensions = hidl_vec(); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_FLOAT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_INT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_QUANT8_ASYMM: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; + break; + case OperandType::OEM: + case OperandType::TENSOR_OEM_BYTE: + default: + break; + } + *operand = newOperand; +} + +static bool mutateOperationOperandTypeSkip(size_t operand, const V1_1::Model& model) { + // LSH_PROJECTION's second argument is allowed to have any type. This is the + // only operation that currently has a type that can be anything independent + // from any other type. Changing the operand type to any other type will + // result in a valid model for LSH_PROJECTION. If this is the case, skip the + // test. + for (const Operation& operation : model.operations) { + if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { + return true; + } + } + return false; +} + +static void mutateOperationOperandTypeTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + if (mutateOperationOperandTypeSkip(operand, model)) { + continue; + } + for (OperandType invalidOperandType : hidl_enum_iterator{}) { + // Do not test OEM types + if (invalidOperandType == model.operands[operand].type || + invalidOperandType == OperandType::OEM || + invalidOperandType == OperandType::TENSOR_OEM_BYTE) { + continue; + } + const std::string message = "mutateOperationOperandTypeTest: operand " + + std::to_string(operand) + " set to type " + + toString(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + mutateOperand(&model->operands[operand], invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// + +static const int32_t invalidOperationTypes[] = { + static_cast(OperationType::ADD) - 1, // lower bound fundamental + static_cast(OperationType::TRANSPOSE) + 1, // upper bound fundamental + static_cast(OperationType::OEM_OPERATION) - 1, // lower bound OEM + static_cast(OperationType::OEM_OPERATION) + 1, // upper bound OEM +}; + +static void mutateOperationTypeTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (int32_t invalidOperationType : invalidOperationTypes) { + const std::string message = "mutateOperationTypeTest: operation " + + std::to_string(operation) + " set to value " + + std::to_string(invalidOperationType); + validate(device, message, model, [operation, invalidOperationType](Model* model) { + model->operations[operation].type = + static_cast(invalidOperationType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationInputOperandIndexTest(const sp& device, + const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const std::string message = "mutateOperationInputOperandIndexTest: operation " + + std::to_string(operation) + " input " + + std::to_string(input); + validate(device, message, model, [operation, input, invalidOperand](Model* model) { + model->operations[operation].inputs[input] = invalidOperand; + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationOutputOperandIndexTest(const sp& device, + const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "mutateOperationOutputOperandIndexTest: operation " + + std::to_string(operation) + " output " + + std::to_string(output); + validate(device, message, model, [operation, output, invalidOperand](Model* model) { + model->operations[operation].outputs[output] = invalidOperand; + }); + } + } +} + +///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// + +static void removeValueAndDecrementGreaterValues(hidl_vec* vec, uint32_t value) { + if (vec) { + // remove elements matching "value" + auto last = std::remove(vec->begin(), vec->end(), value); + vec->resize(std::distance(vec->begin(), last)); + + // decrement elements exceeding "value" + std::transform(vec->begin(), vec->end(), vec->begin(), + [value](uint32_t v) { return v > value ? v-- : v; }); + } +} + +static void removeOperand(Model* model, uint32_t index) { + hidl_vec_removeAt(&model->operands, index); + for (Operation& operation : model->operations) { + removeValueAndDecrementGreaterValues(&operation.inputs, index); + removeValueAndDecrementGreaterValues(&operation.outputs, index); + } + removeValueAndDecrementGreaterValues(&model->inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->outputIndexes, index); +} + +static void removeOperandTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::string message = "removeOperandTest: operand " + std::to_string(operand); + validate(device, message, model, + [operand](Model* model) { removeOperand(model, operand); }); + } +} + +///////////////////////// REMOVE OPERATION ///////////////////////// + +static void removeOperation(Model* model, uint32_t index) { + for (uint32_t operand : model->operations[index].inputs) { + model->operands[operand].numberOfConsumers--; + } + hidl_vec_removeAt(&model->operations, index); +} + +static void removeOperationTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "removeOperationTest: operation " + std::to_string(operation); + validate(device, message, model, + [operation](Model* model) { removeOperation(model, operation); }); + } +} + +///////////////////////// REMOVE OPERATION INPUT ///////////////////////// + +static void removeOperationInputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const V1_1::Operation& op = model.operations[operation]; + // CONCATENATION has at least 2 inputs, with the last element being + // INT32. Skip this test if removing one of CONCATENATION's + // inputs still produces a valid model. + if (op.type == V1_1::OperationType::CONCATENATION && op.inputs.size() > 2 && + input != op.inputs.size() - 1) { + continue; + } + const std::string message = "removeOperationInputTest: operation " + + std::to_string(operation) + ", input " + + std::to_string(input); + validate(device, message, model, [operation, input](Model* model) { + uint32_t operand = model->operations[operation].inputs[input]; + model->operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->operations[operation].inputs, input); + }); + } + } +} + +///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// + +static void removeOperationOutputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "removeOperationOutputTest: operation " + + std::to_string(operation) + ", output " + + std::to_string(output); + validate(device, message, model, [operation, output](Model* model) { + hidl_vec_removeAt(&model->operations[operation].outputs, output); + }); + } + } +} + +///////////////////////// MODEL VALIDATION ///////////////////////// + +// TODO: remove model input +// TODO: remove model output +// TODO: add unused operation + +///////////////////////// ADD OPERATION INPUT ///////////////////////// + +static void addOperationInputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "addOperationInputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); + hidl_vec_push_back(&model->operations[operation].inputs, index); + hidl_vec_push_back(&model->inputIndexes, index); + }); + } +} + +///////////////////////// ADD OPERATION OUTPUT ///////////////////////// + +static void addOperationOutputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = + "addOperationOutputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); + hidl_vec_push_back(&model->operations[operation].outputs, index); + hidl_vec_push_back(&model->outputIndexes, index); + }); + } +} + +///////////////////////// VALIDATE EXECUTION PREFERENCE ///////////////////////// + +static const int32_t invalidExecutionPreferences[] = { + static_cast(ExecutionPreference::LOW_POWER) - 1, // lower bound + static_cast(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound +}; + +static void mutateExecutionPreferenceTest(const sp& device, const V1_1::Model& model) { + for (int32_t preference : invalidExecutionPreferences) { + const std::string message = + "mutateExecutionPreferenceTest: preference " + std::to_string(preference); + validate(device, message, model, [](Model*) {}, + static_cast(preference)); + } +} + +////////////////////////// ENTRY POINT ////////////////////////////// + +void ValidationTest::validateModel(const V1_1::Model& model) { + mutateOperandTypeTest(device, model); + mutateOperandRankTest(device, model); + mutateOperandScaleTest(device, model); + mutateOperandZeroPointTest(device, model); + mutateOperationOperandTypeTest(device, model); + mutateOperationTypeTest(device, model); + mutateOperationInputOperandIndexTest(device, model); + mutateOperationOutputOperandIndexTest(device, model); + removeOperandTest(device, model); + removeOperationTest(device, model); + removeOperationInputTest(device, model); + removeOperationOutputTest(device, model); + addOperationInputTest(device, model); + addOperationOutputTest(device, model); + mutateExecutionPreferenceTest(device, model); +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp new file mode 100644 index 0000000000..b42f561c7f --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" +#include "TestHarness.h" +#include "Utils.h" + +#include +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::hidl::memory::V1_0::IMemory; +using generated_tests::MixedTyped; +using generated_tests::MixedTypedExampleType; +using generated_tests::for_all; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void createPreparedModel(const sp& device, const V1_1::Model& model, + sp* preparedModel) { + ASSERT_NE(nullptr, preparedModel); + + // see if service can handle model + bool fullySupportsModel = false; + Return supportedOpsLaunchStatus = device->getSupportedOperations_1_1( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); + + // launch prepare model + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = device->prepareModel_1_1( + model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + *preparedModel = preparedModelCallback->getPreparedModel(); + + // The getSupportedOperations_1_1 call returns a list of operations that are + // guaranteed not to fail if prepareModel_1_1 is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Unable to test Request validation because vendor service " + "cannot prepare model that it does not support." + << std::endl; + return; + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); +} + +// Primary validation function. This function will take a valid request, apply a +// mutation to it to invalidate the request, then pass it to interface calls +// that use the request. Note that the request here is passed by value, and any +// mutation to the request does not leave this function. +static void validate(const sp& preparedModel, const std::string& message, + Request request, const std::function& mutation) { + mutation(&request); + SCOPED_TRACE(message + " [execute]"); + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +///////////////////////// REMOVE INPUT //////////////////////////////////// + +static void removeInputTest(const sp& preparedModel, const Request& request) { + for (size_t input = 0; input < request.inputs.size(); ++input) { + const std::string message = "removeInput: removed input " + std::to_string(input); + validate(preparedModel, message, request, + [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); + } +} + +///////////////////////// REMOVE OUTPUT //////////////////////////////////// + +static void removeOutputTest(const sp& preparedModel, const Request& request) { + for (size_t output = 0; output < request.outputs.size(); ++output) { + const std::string message = "removeOutput: removed Output " + std::to_string(output); + validate(preparedModel, message, request, + [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); + } +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +std::vector createRequests(const std::vector& examples) { + const uint32_t INPUT = 0; + const uint32_t OUTPUT = 1; + + std::vector requests; + + for (auto& example : examples) { + const MixedTyped& inputs = example.first; + const MixedTyped& outputs = example.second; + + std::vector inputs_info, outputs_info; + uint32_t inputSize = 0, outputSize = 0; + + // This function only partially specifies the metadata (vector of RequestArguments). + // The contents are copied over below. + for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { + if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + RequestArgument arg_empty = { + .hasNoValue = true, + }; + inputs_info[index] = s ? arg : arg_empty; + inputSize += s; + }); + // Compute offset for inputs 1 and so on + { + size_t offset = 0; + for (auto& i : inputs_info) { + if (!i.hasNoValue) i.location.offset = offset; + offset += i.location.length; + } + } + + // Go through all outputs, initialize RequestArgument descriptors + for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { + if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + outputs_info[index] = arg; + outputSize += s; + }); + // Compute offset for outputs 1 and so on + { + size_t offset = 0; + for (auto& i : outputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + std::vector pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; + if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { + return {}; + } + + // map pool + sp inputMemory = mapMemory(pools[INPUT]); + if (inputMemory == nullptr) { + return {}; + } + char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); + if (inputPtr == nullptr) { + return {}; + } + + // initialize pool + inputMemory->update(); + for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { + char* begin = (char*)p; + char* end = begin + s; + // TODO: handle more than one input + std::copy(begin, end, inputPtr + inputs_info[index].location.offset); + }); + inputMemory->commit(); + + requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); + } + + return requests; +} + +void ValidationTest::validateRequests(const V1_1::Model& model, + const std::vector& requests) { + // create IPreparedModel + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); + if (preparedModel == nullptr) { + return; + } + + // validate each request + for (const Request& request : requests) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); + } +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/ValidationTests.cpp b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp new file mode 100644 index 0000000000..1c35ba842b --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "Models.h" +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +// forward declarations +std::vector createRequests(const std::vector& examples); + +// generate validation tests +#define VTS_CURRENT_TEST_CASE(TestName) \ + TEST_F(ValidationTest, TestName) { \ + const Model model = TestName::createTestModel(); \ + const std::vector requests = createRequests(TestName::examples); \ + validateModel(model); \ + validateRequests(model, requests); \ + } + +FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) + +#undef VTS_CURRENT_TEST_CASE + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp similarity index 64% rename from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp rename to neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp index b1d3be786c..62381e6796 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp @@ -16,16 +16,7 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_1.h" -#include "Utils.h" - -#include -#include - -using ::android::hardware::hidl_memory; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; +#include "VtsHalNeuralnetworks.h" namespace android { namespace hardware { @@ -34,11 +25,6 @@ namespace V1_1 { namespace vts { namespace functional { -// allocator helper -hidl_memory allocateSharedMemory(int64_t size) { - return nn::allocateSharedMemory(size); -} - // A class for test environment setup NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} @@ -52,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { } void NeuralnetworksHidlEnvironment::registerTestServices() { - registerTestService(); + registerTestService(); } // The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} + NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} void NeuralnetworksHidlTest::SetUp() { - device = ::testing::VtsHalHidlTargetTestBase::getService( + ::testing::VtsHalHidlTargetTestBase::SetUp(); + device = ::testing::VtsHalHidlTargetTestBase::getService( NeuralnetworksHidlEnvironment::getInstance()); ASSERT_NE(nullptr, device.get()); } -void NeuralnetworksHidlTest::TearDown() {} +void NeuralnetworksHidlTest::TearDown() { + device = nullptr; + ::testing::VtsHalHidlTargetTestBase::TearDown(); +} } // namespace functional } // namespace vts + +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { + return os << toString(deviceStatus); +} + } // namespace V1_1 } // namespace neuralnetworks } // namespace hardware } // namespace android + +using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; + +int main(int argc, char** argv) { + ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); + ::testing::InitGoogleTest(&argc, argv); + NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); + + int status = RUN_ALL_TESTS(); + return status; +} diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h similarity index 60% rename from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h rename to neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h index 426246ce76..0050e52d25 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h @@ -17,65 +17,71 @@ #ifndef VTS_HAL_NEURALNETWORKS_V1_1_H #define VTS_HAL_NEURALNETWORKS_V1_1_H -#include -#include -#include +#include #include #include -#include #include #include + +#include #include -#include +#include +#include namespace android { namespace hardware { namespace neuralnetworks { namespace V1_1 { + +using V1_0::Request; +using V1_0::DeviceStatus; +using V1_0::ErrorStatus; + namespace vts { namespace functional { -hidl_memory allocateSharedMemory(int64_t size); // A class for test environment setup class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); NeuralnetworksHidlEnvironment(); - NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; - NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; + ~NeuralnetworksHidlEnvironment() override; public: - ~NeuralnetworksHidlEnvironment() override; static NeuralnetworksHidlEnvironment* getInstance(); void registerTestServices() override; }; // The main test class for NEURALNETWORKS HIDL HAL. class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); + public: + NeuralnetworksHidlTest(); ~NeuralnetworksHidlTest() override; void SetUp() override; void TearDown() override; - sp device; + protected: + sp device; }; + +// Tag for the validation tests +class ValidationTest : public NeuralnetworksHidlTest { + protected: + void validateModel(const Model& model); + void validateRequests(const Model& model, const std::vector& request); +}; + +// Tag for the generated tests +class GeneratedTest : public NeuralnetworksHidlTest {}; + } // namespace functional } // namespace vts // pretty-print values for error messages - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::ErrorStatus errorStatus) { - return os << toString(errorStatus); -} - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::DeviceStatus deviceStatus) { - return os << toString(deviceStatus); -} +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); } // namespace V1_1 } // namespace neuralnetworks diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp deleted file mode 100644 index 17f6744c2b..0000000000 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "neuralnetworks_hidl_hal_test" - -#include "VtsHalNeuralnetworksV1_1.h" - -#include "Callbacks.h" -#include "Models.h" -#include "TestHarness.h" - -#include -#include -#include -#include -#include - -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Operand; -using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; -using ::android::hardware::neuralnetworks::V1_0::OperandType; -using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_1::Capabilities; -using ::android::hardware::neuralnetworks::V1_1::IDevice; -using ::android::hardware::neuralnetworks::V1_1::Model; -using ::android::hardware::neuralnetworks::V1_1::Operation; -using ::android::hardware::neuralnetworks::V1_1::OperationType; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_1 { -namespace vts { -namespace functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; - -static void doPrepareModelShortcut(const sp& device, sp* preparedModel) { - ASSERT_NE(nullptr, preparedModel); - Model model = createValidTestModel_1_1(); - - // see if service can handle model - bool fullySupportsModel = false; - Return supportedOpsLaunchStatus = device->getSupportedOperations_1_1( - model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { - ASSERT_EQ(ErrorStatus::NONE, status); - ASSERT_NE(0ul, supported.size()); - fullySupportsModel = - std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); - }); - ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); - - // launch prepare model - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); - - // retrieve prepared model - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - *preparedModel = preparedModelCallback->getPreparedModel(); - - // The getSupportedOperations call returns a list of operations that are - // guaranteed not to fail if prepareModel is called, and - // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. - // If a driver has any doubt that it can prepare an operation, it must - // return false. So here, if a driver isn't sure if it can support an - // operation, but reports that it successfully prepared the model, the test - // can continue. - if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { - ASSERT_EQ(nullptr, preparedModel->get()); - LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " - "prepare model that it does not support."; - std::cout << "[ ] Early termination of test because vendor service cannot " - "prepare model that it does not support." - << std::endl; - return; - } - ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); - ASSERT_NE(nullptr, preparedModel->get()); -} - -// create device test -TEST_F(NeuralnetworksHidlTest, CreateDevice) {} - -// status test -TEST_F(NeuralnetworksHidlTest, StatusTest) { - Return status = device->getStatus(); - ASSERT_TRUE(status.isOk()); - EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); -} - -// initialization -TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { - Return ret = - device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LT(0.0f, capabilities.float32Performance.execTime); - EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); - EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime); - EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations positive test -TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { - Model model = createValidTestModel_1_1(); - Return ret = device->getSupportedOperations_1_1( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_EQ(model.operations.size(), supported.size()); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 1 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { - Model model = createInvalidTestModel1_1_1(); - Return ret = device->getSupportedOperations_1_1( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 2 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { - Model model = createInvalidTestModel2_1_1(); - Return ret = device->getSupportedOperations_1_1( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// prepare simple model positive test -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { - sp preparedModel; - doPrepareModelShortcut(device, &preparedModel); -} - -// prepare simple model negative test 1 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { - Model model = createInvalidTestModel1_1_1(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// prepare simple model negative test 2 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { - Model model = createInvalidTestModel2_1_1(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// execute simple graph positive test -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { - std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; - std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; - const uint32_t OUTPUT = 1; - - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createValidTestRequest(); - - auto postWork = [&] { - sp outputMemory = mapMemory(request.pools[OUTPUT]); - if (outputMemory == nullptr) { - return false; - } - float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - if (outputPtr == nullptr) { - return false; - } - outputMemory->read(); - std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); - outputMemory->commit(); - return true; - }; - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - executionCallback->on_finish(postWork); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); - EXPECT_EQ(expectedData, outputData); -} - -// execute simple graph negative test 1 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest1(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -// execute simple graph negative test 2 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest2(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -} // namespace functional -} // namespace vts -} // namespace V1_1 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android - -using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; - -int main(int argc, char** argv) { - ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); - ::testing::InitGoogleTest(&argc, argv); - NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); - - int status = RUN_ALL_TESTS(); - return status; -}