diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp index 2c3287ab35..5af3255f42 100644 --- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp +++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp @@ -44,6 +44,21 @@ std::vector createRequests(const std::vector& exampl // in frameworks/ml/nn/runtime/tests/generated/ #include "all_generated_V1_2_vts_tests.cpp" +// Generated from spec/strided_slice_invalid_output_dims.mod.py. +// TODO(b/132155416): Make this part of all_generated_V1_2_vts_tests.cpp. +namespace strided_slice_invalid_output_dims { +#include "generated/strided_slice_invalid_output_dims.example.cpp" +#include "generated/strided_slice_invalid_output_dims.model.cpp" +} // namespace strided_slice_invalid_output_dims + +// TODO(b/132155416): Make this part of all_generated_V1_2_vts_tests.cpp. +TEST_F(ValidationTest, strided_slice_invalid_output_dims) { + const Model model = strided_slice_invalid_output_dims::createTestModel(); + const std::vector requests = + createRequests(strided_slice_invalid_output_dims::get_examples()); + validateFailure(model, requests); +} + } // namespace functional } // namespace vts } // namespace V1_2 diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index 9703c2d765..e935aaa1fa 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -274,6 +274,22 @@ void ValidationTest::validateRequests(const sp& preparedModel, } } +void ValidationTest::validateRequestFailure(const sp& preparedModel, + const std::vector& requests) { + for (const Request& request : requests) { + SCOPED_TRACE("Expecting request to fail [executeSynchronously]"); + Return executeStatus = preparedModel->executeSynchronously( + request, MeasureTiming::NO, + [](ErrorStatus error, const hidl_vec& outputShapes, + const Timing& timing) { + ASSERT_NE(ErrorStatus::NONE, error); + EXPECT_EQ(outputShapes.size(), 0); + EXPECT_TRUE(badTiming(timing)); + }); + ASSERT_TRUE(executeStatus.isOk()); + } +} + } // namespace functional } // namespace vts } // namespace V1_2 diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp index 4ddefe8134..666f9b5b00 100644 --- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp @@ -140,6 +140,20 @@ void ValidationTest::validateEverything(const Model& model, const std::vector& requests) { + // TODO: Should this always succeed? + // What if the invalid input is part of the model (i.e., a parameter). + validateModel(model); + + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); + if (preparedModel == nullptr) { + return; + } + + validateRequestFailure(preparedModel, requests); +} + sp getPreparedModel_1_2( const sp& callback) { sp preparedModelV1_0 = callback->getPreparedModel(); diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h index 8d1acbe03e..80e810a5f7 100644 --- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h +++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h @@ -73,11 +73,14 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { class ValidationTest : public NeuralnetworksHidlTest { protected: void validateEverything(const Model& model, const std::vector& requests); + void validateFailure(const Model& model, const std::vector& requests); private: void validateModel(const Model& model); void validateRequests(const sp& preparedModel, const std::vector& requests); + void validateRequestFailure(const sp& preparedModel, + const std::vector& requests); void validateBurst(const sp& preparedModel, const std::vector& requests); }; diff --git a/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.example.cpp b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.example.cpp new file mode 100644 index 0000000000..064083340c --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.example.cpp @@ -0,0 +1,116 @@ +// clang-format off +// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit +std::vector& get_examples() { +static std::vector examples = { +// Begin of an example +{ +.operands = { +//Input(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> Dimensions map + .operandDimensions = {{0, {2, 3}}}, + // int -> FLOAT32 map + .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}}, + // int -> INT32 map + .int32Operands = {}, + // int -> QUANT8_ASYMM map + .quant8AsymmOperands = {}, + // int -> QUANT16_SYMM map + .quant16SymmOperands = {}, + // int -> FLOAT16 map + .float16Operands = {}, + // int -> BOOL8 map + .bool8Operands = {}, + // int -> QUANT8_SYMM_PER_CHANNEL map + .quant8ChannelOperands = {}, + // int -> QUANT16_ASYMM map + .quant16AsymmOperands = {}, + // int -> QUANT8_SYMM map + .quant8SymmOperands = {}, +}, +//Output(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> Dimensions map + .operandDimensions = {{0, {3}}}, + // int -> FLOAT32 map + .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}}, + // int -> INT32 map + .int32Operands = {}, + // int -> QUANT8_ASYMM map + .quant8AsymmOperands = {}, + // int -> QUANT16_SYMM map + .quant16SymmOperands = {}, + // int -> FLOAT16 map + .float16Operands = {}, + // int -> BOOL8 map + .bool8Operands = {}, + // int -> QUANT8_SYMM_PER_CHANNEL map + .quant8ChannelOperands = {}, + // int -> QUANT16_ASYMM map + .quant16AsymmOperands = {}, + // int -> QUANT8_SYMM map + .quant8SymmOperands = {}, +} +}, +}, // End of an example +}; +return examples; +}; + +std::vector& get_examples_dynamic_output_shape() { +static std::vector examples_dynamic_output_shape = { +// Begin of an example +{ +.operands = { +//Input(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> Dimensions map + .operandDimensions = {{0, {2, 3}}}, + // int -> FLOAT32 map + .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}}, + // int -> INT32 map + .int32Operands = {}, + // int -> QUANT8_ASYMM map + .quant8AsymmOperands = {}, + // int -> QUANT16_SYMM map + .quant16SymmOperands = {}, + // int -> FLOAT16 map + .float16Operands = {}, + // int -> BOOL8 map + .bool8Operands = {}, + // int -> QUANT8_SYMM_PER_CHANNEL map + .quant8ChannelOperands = {}, + // int -> QUANT16_ASYMM map + .quant16AsymmOperands = {}, + // int -> QUANT8_SYMM map + .quant8SymmOperands = {}, +}, +//Output(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> Dimensions map + .operandDimensions = {{0, {3}}}, + // int -> FLOAT32 map + .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}}, + // int -> INT32 map + .int32Operands = {}, + // int -> QUANT8_ASYMM map + .quant8AsymmOperands = {}, + // int -> QUANT16_SYMM map + .quant16SymmOperands = {}, + // int -> FLOAT16 map + .float16Operands = {}, + // int -> BOOL8 map + .bool8Operands = {}, + // int -> QUANT8_SYMM_PER_CHANNEL map + .quant8ChannelOperands = {}, + // int -> QUANT16_ASYMM map + .quant16AsymmOperands = {}, + // int -> QUANT8_SYMM map + .quant8SymmOperands = {}, +} +}, +}, // End of an example +}; +return examples_dynamic_output_shape; +}; + diff --git a/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.model.cpp b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.model.cpp new file mode 100644 index 0000000000..106655aa30 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/generated/strided_slice_invalid_output_dims.model.cpp @@ -0,0 +1,216 @@ +// clang-format off +// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit +// Create the model +Model createTestModel() { + const std::vector operands = { + { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {2, 3}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + .type = OperandType::TENSOR_INT32, + .dimensions = {2}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 0, .length = 8}, + }, + { + .type = OperandType::TENSOR_INT32, + .dimensions = {2}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 8, .length = 8}, + }, + { + .type = OperandType::TENSOR_INT32, + .dimensions = {2}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 16, .length = 8}, + }, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 24, .length = 4}, + }, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 28, .length = 4}, + }, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 32, .length = 4}, + }, + { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {3}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_OUTPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + } + }; + + const std::vector operations = { + { + .type = OperationType::STRIDED_SLICE, + .inputs = {0, 1, 2, 3, 4, 5, 6}, + .outputs = {7}, + } + }; + + const std::vector inputIndexes = {0}; + const std::vector outputIndexes = {7}; + std::vector operandValues = { + 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 + }; + const std::vector pools = {}; + + return { + .operands = operands, + .operations = operations, + .inputIndexes = inputIndexes, + .outputIndexes = outputIndexes, + .operandValues = operandValues, + .pools = pools, + }; +} + +inline bool is_ignored(int i) { + static std::set ignore = {}; + return ignore.find(i) != ignore.end(); +} + +// Create the model +Model createTestModel_dynamic_output_shape() { + const std::vector operands = { + { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {2, 3}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + .type = OperandType::TENSOR_INT32, + .dimensions = {2}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 0, .length = 8}, + }, + { + .type = OperandType::TENSOR_INT32, + .dimensions = {2}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 8, .length = 8}, + }, + { + .type = OperandType::TENSOR_INT32, + .dimensions = {2}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 16, .length = 8}, + }, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 24, .length = 4}, + }, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 28, .length = 4}, + }, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 32, .length = 4}, + }, + { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {0}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_OUTPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + } + }; + + const std::vector operations = { + { + .type = OperationType::STRIDED_SLICE, + .inputs = {0, 1, 2, 3, 4, 5, 6}, + .outputs = {7}, + } + }; + + const std::vector inputIndexes = {0}; + const std::vector outputIndexes = {7}; + std::vector operandValues = { + 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 + }; + const std::vector pools = {}; + + return { + .operands = operands, + .operations = operations, + .inputIndexes = inputIndexes, + .outputIndexes = outputIndexes, + .operandValues = operandValues, + .pools = pools, + }; +} + +inline bool is_ignored_dynamic_output_shape(int i) { + static std::set ignore = {}; + return ignore.find(i) != ignore.end(); +} + diff --git a/neuralnetworks/1.2/vts/functional/spec/strided_slice_invalid_output_dims.mod.py b/neuralnetworks/1.2/vts/functional/spec/strided_slice_invalid_output_dims.mod.py new file mode 100644 index 0000000000..e8d30f3fb2 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/spec/strided_slice_invalid_output_dims.mod.py @@ -0,0 +1,43 @@ +# +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This test makes sure that executing STRIDED_SLICE results in a failure when +# the output dimensions do not match shrinkAxisMask. +# +# The test generator does not support generating tests resulting in execution +# failure, so the gTest part of this test has been written by hand. +# TODO(b/132155416): Move this under frameworks/ml/nn/runtime/test/specs/V1_2. +# +# Based on strided_slice_float_11.mod.py. + +model = Model() +i1 = Input("input", "TENSOR_FLOAT32", "{2, 3}") +begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0]) +# The value "2" below makes the test invalid. See http://b/79856511#comment2. +ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 3]) +strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1]) +beginMask = Int32Scalar("beginMask", 0) +endMask = Int32Scalar("endMask", 0) +shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1) + +output = Output("output", "TENSOR_FLOAT32", "{3}") + +model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output) + +Example({ + i1: [1, 2, 3, 4, 5, 6], + output: [1, 2, 3], +})