diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index 54dd14aba3..e28113bcdc 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp @@ -18,7 +18,6 @@ cc_library_static { name: "VtsHalNeuralnetworksTest_utils", srcs: [ "Callbacks.cpp", - "Models.cpp", "GeneratedTestHarness.cpp", ], defaults: ["VtsHalTargetTestDefaults"], @@ -41,14 +40,17 @@ cc_library_static { cc_test { name: "VtsHalNeuralnetworksV1_0TargetTest", srcs: [ - "VtsHalNeuralnetworksV1_0.cpp", - "VtsHalNeuralnetworksV1_0BasicTest.cpp", - "VtsHalNeuralnetworksV1_0GeneratedTest.cpp", + "BasicTests.cpp", + "GeneratedTests.cpp", + "ValidateModel.cpp", + "ValidateRequest.cpp", + "ValidationTests.cpp", + "VtsHalNeuralnetworks.cpp", ], defaults: ["VtsHalTargetTestDefaults"], static_libs: [ - "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.0", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libhidlmemory", diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp new file mode 100644 index 0000000000..945c4065e5 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +// create device test +TEST_F(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_F(NeuralnetworksHidlTest, StatusTest) { + Return status = device->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); +} + +// initialization +TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { + Return ret = + device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0.0f, capabilities.float32Performance.execTime); + EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); + EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); + }); + EXPECT_TRUE(ret.isOk()); +} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h index 0e2ffb324a..2ac6130df7 100644 --- a/neuralnetworks/1.0/vts/functional/Callbacks.h +++ b/neuralnetworks/1.0/vts/functional/Callbacks.h @@ -17,14 +17,6 @@ namespace neuralnetworks { namespace V1_0 { namespace implementation { -using ::android::hardware::hidl_array; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::sp; - /** * The CallbackBase class is used internally by the NeuralNetworks runtime to * synchronize between different threads. An asynchronous task is launched diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 8646a4cbb0..4f9d52837c 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -179,7 +179,7 @@ void EvaluatePreparedModel(sp& preparedModel, std::function& device, std::function create_model, +void Execute(const sp& device, std::function create_model, std::function is_ignored, const std::vector& examples) { V1_0::Model model = create_model(); @@ -223,7 +223,7 @@ void Execute(sp& device, std::function create_ EvaluatePreparedModel(preparedModel, is_ignored, examples); } -void Execute(sp& device, std::function create_model, +void Execute(const sp& device, std::function create_model, std::function is_ignored, const std::vector& examples) { V1_1::Model model = create_model(); diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp similarity index 61% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp rename to neuralnetworks/1.0/vts/functional/GeneratedTests.cpp index b99aef7fc0..2107333e26 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp @@ -16,47 +16,33 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_0.h" +#include "VtsHalNeuralnetworks.h" #include "Callbacks.h" #include "TestHarness.h" +#include "Utils.h" #include #include #include -using ::android::hardware::neuralnetworks::V1_0::IDevice; -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Model; -using ::android::hardware::neuralnetworks::V1_0::OperationType; -using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - namespace android { namespace hardware { namespace neuralnetworks { namespace generated_tests { using ::generated_tests::MixedTypedExampleType; -extern void Execute(sp&, std::function, std::function, - const std::vector&); +extern void Execute(const sp&, std::function, + std::function, const std::vector&); } // namespace generated_tests namespace V1_0 { namespace vts { namespace functional { + using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::nn::allocateSharedMemory; // Mixed-typed examples typedef generated_tests::MixedTypedExampleType MixedTypedExample; diff --git a/neuralnetworks/1.0/vts/functional/Models.cpp b/neuralnetworks/1.0/vts/functional/Models.cpp deleted file mode 100644 index 180286a5b7..0000000000 --- a/neuralnetworks/1.0/vts/functional/Models.cpp +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "neuralnetworks_hidl_hal_test" - -#include "Models.h" -#include "Utils.h" - -#include -#include -#include -#include -#include - -using ::android::sp; - -namespace android { -namespace hardware { -namespace neuralnetworks { - -// create a valid model -V1_1::Model createValidTestModel_1_1() { - const std::vector operand2Data = {5.0f, 6.0f, 7.0f, 8.0f}; - const uint32_t size = operand2Data.size() * sizeof(float); - - const uint32_t operand1 = 0; - const uint32_t operand2 = 1; - const uint32_t operand3 = 2; - const uint32_t operand4 = 3; - - const std::vector operands = { - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 1}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_INPUT, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 1}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, .offset = 0, .length = size}, - }, - { - .type = OperandType::INT32, - .dimensions = {}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)}, - }, - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 1}, - .numberOfConsumers = 0, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_OUTPUT, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - }; - - const std::vector operations = {{ - .type = OperationType::ADD, .inputs = {operand1, operand2, operand3}, .outputs = {operand4}, - }}; - - const std::vector inputIndexes = {operand1}; - const std::vector outputIndexes = {operand4}; - std::vector operandValues( - reinterpret_cast(operand2Data.data()), - reinterpret_cast(operand2Data.data()) + size); - int32_t activation[1] = {static_cast(FusedActivationFunc::NONE)}; - operandValues.insert(operandValues.end(), reinterpret_cast(&activation[0]), - reinterpret_cast(&activation[1])); - - const std::vector pools = {}; - - return { - .operands = operands, - .operations = operations, - .inputIndexes = inputIndexes, - .outputIndexes = outputIndexes, - .operandValues = operandValues, - .pools = pools, - }; -} - -// create first invalid model -V1_1::Model createInvalidTestModel1_1_1() { - Model model = createValidTestModel_1_1(); - model.operations[0].type = static_cast(0xDEADBEEF); /* INVALID */ - return model; -} - -// create second invalid model -V1_1::Model createInvalidTestModel2_1_1() { - Model model = createValidTestModel_1_1(); - const uint32_t operand1 = 0; - const uint32_t operand5 = 4; // INVALID OPERAND - model.inputIndexes = std::vector({operand1, operand5 /* INVALID OPERAND */}); - return model; -} - -V1_0::Model createValidTestModel_1_0() { - V1_1::Model model = createValidTestModel_1_1(); - return nn::convertToV1_0(model); -} - -V1_0::Model createInvalidTestModel1_1_0() { - V1_1::Model model = createInvalidTestModel1_1_1(); - return nn::convertToV1_0(model); -} - -V1_0::Model createInvalidTestModel2_1_0() { - V1_1::Model model = createInvalidTestModel2_1_1(); - return nn::convertToV1_0(model); -} - -// create a valid request -Request createValidTestRequest() { - std::vector inputData = {1.0f, 2.0f, 3.0f, 4.0f}; - std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; - - // prepare inputs - uint32_t inputSize = static_cast(inputData.size() * sizeof(float)); - uint32_t outputSize = static_cast(outputData.size() * sizeof(float)); - std::vector inputs = {{ - .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {}, - }}; - std::vector outputs = {{ - .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {}, - }}; - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { - return {}; - } - - // load data - sp inputMemory = mapMemory(pools[INPUT]); - sp outputMemory = mapMemory(pools[OUTPUT]); - if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) { - return {}; - } - float* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - if (inputPtr == nullptr || outputPtr == nullptr) { - return {}; - } - inputMemory->update(); - outputMemory->update(); - std::copy(inputData.begin(), inputData.end(), inputPtr); - std::copy(outputData.begin(), outputData.end(), outputPtr); - inputMemory->commit(); - outputMemory->commit(); - - return {.inputs = inputs, .outputs = outputs, .pools = pools}; -} - -// create first invalid request -Request createInvalidTestRequest1() { - Request request = createValidTestRequest(); - const uint32_t INVALID = 2; - std::vector inputData = {1.0f, 2.0f, 3.0f, 4.0f}; - uint32_t inputSize = static_cast(inputData.size() * sizeof(float)); - request.inputs[0].location = { - .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize}; - return request; -} - -// create second invalid request -Request createInvalidTestRequest2() { - Request request = createValidTestRequest(); - request.inputs[0].dimensions = std::vector({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */); - return request; -} - -} // namespace neuralnetworks -} // namespace hardware -} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h index 93982351f4..a1fbe9278b 100644 --- a/neuralnetworks/1.0/vts/functional/Models.h +++ b/neuralnetworks/1.0/vts/functional/Models.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 The Android Open Source Project + * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,29 +14,187 @@ * limitations under the License. */ +#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H +#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H + #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include +#include "TestHarness.h" + +#include namespace android { namespace hardware { namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { -// create V1_1 model -V1_1::Model createValidTestModel_1_1(); -V1_1::Model createInvalidTestModel1_1_1(); -V1_1::Model createInvalidTestModel2_1_1(); +using MixedTypedExample = generated_tests::MixedTypedExampleType; -// create V1_0 model -V1_0::Model createValidTestModel_1_0(); -V1_0::Model createInvalidTestModel1_1_0(); -V1_0::Model createInvalidTestModel2_1_0(); +#define FOR_EACH_TEST_MODEL(FN) \ + FN(add_broadcast_quant8) \ + FN(add) \ + FN(add_quant8) \ + FN(avg_pool_float_1) \ + FN(avg_pool_float_2) \ + FN(avg_pool_float_3) \ + FN(avg_pool_float_4) \ + FN(avg_pool_float_5) \ + FN(avg_pool_quant8_1) \ + FN(avg_pool_quant8_2) \ + FN(avg_pool_quant8_3) \ + FN(avg_pool_quant8_4) \ + FN(avg_pool_quant8_5) \ + FN(concat_float_1) \ + FN(concat_float_2) \ + FN(concat_float_3) \ + FN(concat_quant8_1) \ + FN(concat_quant8_2) \ + FN(concat_quant8_3) \ + FN(conv_1_h3_w2_SAME) \ + FN(conv_1_h3_w2_VALID) \ + FN(conv_3_h3_w2_SAME) \ + FN(conv_3_h3_w2_VALID) \ + FN(conv_float_2) \ + FN(conv_float_channels) \ + FN(conv_float_channels_weights_as_inputs) \ + FN(conv_float_large) \ + FN(conv_float_large_weights_as_inputs) \ + FN(conv_float) \ + FN(conv_float_weights_as_inputs) \ + FN(conv_quant8_2) \ + FN(conv_quant8_channels) \ + FN(conv_quant8_channels_weights_as_inputs) \ + FN(conv_quant8_large) \ + FN(conv_quant8_large_weights_as_inputs) \ + FN(conv_quant8) \ + FN(conv_quant8_overflow) \ + FN(conv_quant8_overflow_weights_as_inputs) \ + FN(conv_quant8_weights_as_inputs) \ + FN(depth_to_space_float_1) \ + FN(depth_to_space_float_2) \ + FN(depth_to_space_float_3) \ + FN(depth_to_space_quant8_1) \ + FN(depth_to_space_quant8_2) \ + FN(depthwise_conv2d_float_2) \ + FN(depthwise_conv2d_float_large_2) \ + FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ + FN(depthwise_conv2d_float_large) \ + FN(depthwise_conv2d_float_large_weights_as_inputs) \ + FN(depthwise_conv2d_float) \ + FN(depthwise_conv2d_float_weights_as_inputs) \ + FN(depthwise_conv2d_quant8_2) \ + FN(depthwise_conv2d_quant8_large) \ + FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ + FN(depthwise_conv2d_quant8) \ + FN(depthwise_conv2d_quant8_weights_as_inputs) \ + FN(depthwise_conv) \ + FN(dequantize) \ + FN(embedding_lookup) \ + FN(floor) \ + FN(fully_connected_float_2) \ + FN(fully_connected_float_large) \ + FN(fully_connected_float_large_weights_as_inputs) \ + FN(fully_connected_float) \ + FN(fully_connected_float_weights_as_inputs) \ + FN(fully_connected_quant8_2) \ + FN(fully_connected_quant8_large) \ + FN(fully_connected_quant8_large_weights_as_inputs) \ + FN(fully_connected_quant8) \ + FN(fully_connected_quant8_weights_as_inputs) \ + FN(hashtable_lookup_float) \ + FN(hashtable_lookup_quant8) \ + FN(l2_normalization_2) \ + FN(l2_normalization_large) \ + FN(l2_normalization) \ + FN(l2_pool_float_2) \ + FN(l2_pool_float_large) \ + FN(l2_pool_float) \ + FN(local_response_norm_float_1) \ + FN(local_response_norm_float_2) \ + FN(local_response_norm_float_3) \ + FN(local_response_norm_float_4) \ + FN(logistic_float_1) \ + FN(logistic_float_2) \ + FN(logistic_quant8_1) \ + FN(logistic_quant8_2) \ + FN(lsh_projection_2) \ + FN(lsh_projection) \ + FN(lsh_projection_weights_as_inputs) \ + FN(lstm2) \ + FN(lstm2_state2) \ + FN(lstm2_state) \ + FN(lstm3) \ + FN(lstm3_state2) \ + FN(lstm3_state3) \ + FN(lstm3_state) \ + FN(lstm) \ + FN(lstm_state2) \ + FN(lstm_state) \ + FN(max_pool_float_1) \ + FN(max_pool_float_2) \ + FN(max_pool_float_3) \ + FN(max_pool_float_4) \ + FN(max_pool_quant8_1) \ + FN(max_pool_quant8_2) \ + FN(max_pool_quant8_3) \ + FN(max_pool_quant8_4) \ + FN(mobilenet_224_gender_basic_fixed) \ + FN(mobilenet_quantized) \ + FN(mul_broadcast_quant8) \ + FN(mul) \ + FN(mul_quant8) \ + FN(mul_relu) \ + FN(relu1_float_1) \ + FN(relu1_float_2) \ + FN(relu1_quant8_1) \ + FN(relu1_quant8_2) \ + FN(relu6_float_1) \ + FN(relu6_float_2) \ + FN(relu6_quant8_1) \ + FN(relu6_quant8_2) \ + FN(relu_float_1) \ + FN(relu_float_2) \ + FN(relu_quant8_1) \ + FN(relu_quant8_2) \ + FN(reshape) \ + FN(reshape_quant8) \ + FN(reshape_quant8_weights_as_inputs) \ + FN(reshape_weights_as_inputs) \ + FN(resize_bilinear_2) \ + FN(resize_bilinear) \ + FN(rnn) \ + FN(rnn_state) \ + FN(softmax_float_1) \ + FN(softmax_float_2) \ + FN(softmax_quant8_1) \ + FN(softmax_quant8_2) \ + FN(space_to_depth_float_1) \ + FN(space_to_depth_float_2) \ + FN(space_to_depth_float_3) \ + FN(space_to_depth_quant8_1) \ + FN(space_to_depth_quant8_2) \ + FN(svdf2) \ + FN(svdf) \ + FN(svdf_state) \ + FN(tanh) -// create the request -V1_0::Request createValidTestRequest(); -V1_0::Request createInvalidTestRequest1(); -V1_0::Request createInvalidTestRequest2(); +#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ + namespace function { \ + extern std::vector examples; \ + Model createTestModel(); \ + } +FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) + +#undef FORWARD_DECLARE_GENERATED_OBJECTS + +} // namespace functional +} // namespace vts +} // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp new file mode 100644 index 0000000000..4f0697e931 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp @@ -0,0 +1,506 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void validateGetSupportedOperations(const sp& device, const std::string& message, + const V1_0::Model& model) { + SCOPED_TRACE(message + " [getSupportedOperations]"); + + Return ret = + device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); + EXPECT_TRUE(ret.isOk()); +} + +static void validatePrepareModel(const sp& device, const std::string& message, + const V1_0::Model& model) { + SCOPED_TRACE(message + " [prepareModel]"); + + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + ASSERT_EQ(nullptr, preparedModel.get()); +} + +// Primary validation function. This function will take a valid model, apply a +// mutation to it to invalidate the model, then pass it to interface calls that +// use the model. Note that the model here is passed by value, and any mutation +// to the model does not leave this function. +static void validate(const sp& device, const std::string& message, V1_0::Model model, + const std::function& mutation) { + mutation(&model); + validateGetSupportedOperations(device, message, model); + validatePrepareModel(device, message, model); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +static uint32_t addOperand(Model* model) { + return hidl_vec_push_back(&model->operands, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }); +} + +static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { + uint32_t index = addOperand(model); + model->operands[index].numberOfConsumers = 1; + model->operands[index].lifetime = lifetime; + return index; +} + +///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// + +static const int32_t invalidOperandTypes[] = { + static_cast(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental + static_cast(OperandType::OEM) - 1, // lower bound OEM + static_cast(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM +}; + +static void mutateOperandTypeTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (int32_t invalidOperandType : invalidOperandTypes) { + const std::string message = "mutateOperandTypeTest: operand " + + std::to_string(operand) + " set to value " + + std::to_string(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + model->operands[operand].type = static_cast(invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE OPERAND RANK ///////////////////////// + +static uint32_t getInvalidRank(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + return 1; + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + return 0; + default: + return 0; + } +} + +static void mutateOperandRankTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + + " has rank of " + std::to_string(invalidRank); + validate(device, message, model, [operand, invalidRank](Model* model) { + model->operands[operand].dimensions = std::vector(invalidRank, 0); + }); + } +} + +///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// + +static float getInvalidScale(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + return 1.0f; + case OperandType::TENSOR_INT32: + return -1.0f; + case OperandType::TENSOR_QUANT8_ASYMM: + return 0.0f; + default: + return 0.0f; + } +} + +static void mutateOperandScaleTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.operands[operand].type); + const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + + " has scale of " + std::to_string(invalidScale); + validate(device, message, model, [operand, invalidScale](Model* model) { + model->operands[operand].scale = invalidScale; + }); + } +} + +///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// + +static std::vector getInvalidZeroPoints(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + return {1}; + case OperandType::TENSOR_QUANT8_ASYMM: + return {-1, 256}; + default: + return {}; + } +} + +static void mutateOperandZeroPointTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector invalidZeroPoints = + getInvalidZeroPoints(model.operands[operand].type); + for (int32_t invalidZeroPoint : invalidZeroPoints) { + const std::string message = "mutateOperandZeroPointTest: operand " + + std::to_string(operand) + " has zero point of " + + std::to_string(invalidZeroPoint); + validate(device, message, model, [operand, invalidZeroPoint](Model* model) { + model->operands[operand].zeroPoint = invalidZeroPoint; + }); + } + } +} + +///////////////////////// VALIDATE EXTRA ??? ///////////////////////// + +// TODO: Operand::lifetime +// TODO: Operand::location + +///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// + +static void mutateOperand(Operand* operand, OperandType type) { + Operand newOperand = *operand; + newOperand.type = type; + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + newOperand.dimensions = hidl_vec(); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_FLOAT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_INT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_QUANT8_ASYMM: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; + break; + case OperandType::OEM: + case OperandType::TENSOR_OEM_BYTE: + default: + break; + } + *operand = newOperand; +} + +static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) { + // LSH_PROJECTION's second argument is allowed to have any type. This is the + // only operation that currently has a type that can be anything independent + // from any other type. Changing the operand type to any other type will + // result in a valid model for LSH_PROJECTION. If this is the case, skip the + // test. + for (const Operation& operation : model.operations) { + if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { + return true; + } + } + return false; +} + +static void mutateOperationOperandTypeTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + if (mutateOperationOperandTypeSkip(operand, model)) { + continue; + } + for (OperandType invalidOperandType : hidl_enum_iterator{}) { + // Do not test OEM types + if (invalidOperandType == model.operands[operand].type || + invalidOperandType == OperandType::OEM || + invalidOperandType == OperandType::TENSOR_OEM_BYTE) { + continue; + } + const std::string message = "mutateOperationOperandTypeTest: operand " + + std::to_string(operand) + " set to type " + + toString(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + mutateOperand(&model->operands[operand], invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// + +static const int32_t invalidOperationTypes[] = { + static_cast(OperationType::ADD) - 1, // lower bound fundamental + static_cast(OperationType::TANH) + 1, // upper bound fundamental + static_cast(OperationType::OEM_OPERATION) - 1, // lower bound OEM + static_cast(OperationType::OEM_OPERATION) + 1, // upper bound OEM +}; + +static void mutateOperationTypeTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (int32_t invalidOperationType : invalidOperationTypes) { + const std::string message = "mutateOperationTypeTest: operation " + + std::to_string(operation) + " set to value " + + std::to_string(invalidOperationType); + validate(device, message, model, [operation, invalidOperationType](Model* model) { + model->operations[operation].type = + static_cast(invalidOperationType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationInputOperandIndexTest(const sp& device, + const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const std::string message = "mutateOperationInputOperandIndexTest: operation " + + std::to_string(operation) + " input " + + std::to_string(input); + validate(device, message, model, [operation, input, invalidOperand](Model* model) { + model->operations[operation].inputs[input] = invalidOperand; + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationOutputOperandIndexTest(const sp& device, + const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "mutateOperationOutputOperandIndexTest: operation " + + std::to_string(operation) + " output " + + std::to_string(output); + validate(device, message, model, [operation, output, invalidOperand](Model* model) { + model->operations[operation].outputs[output] = invalidOperand; + }); + } + } +} + +///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// + +static void removeValueAndDecrementGreaterValues(hidl_vec* vec, uint32_t value) { + if (vec) { + // remove elements matching "value" + auto last = std::remove(vec->begin(), vec->end(), value); + vec->resize(std::distance(vec->begin(), last)); + + // decrement elements exceeding "value" + std::transform(vec->begin(), vec->end(), vec->begin(), + [value](uint32_t v) { return v > value ? v-- : v; }); + } +} + +static void removeOperand(Model* model, uint32_t index) { + hidl_vec_removeAt(&model->operands, index); + for (Operation& operation : model->operations) { + removeValueAndDecrementGreaterValues(&operation.inputs, index); + removeValueAndDecrementGreaterValues(&operation.outputs, index); + } + removeValueAndDecrementGreaterValues(&model->inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->outputIndexes, index); +} + +static void removeOperandTest(const sp& device, const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::string message = "removeOperandTest: operand " + std::to_string(operand); + validate(device, message, model, + [operand](Model* model) { removeOperand(model, operand); }); + } +} + +///////////////////////// REMOVE OPERATION ///////////////////////// + +static void removeOperation(Model* model, uint32_t index) { + for (uint32_t operand : model->operations[index].inputs) { + model->operands[operand].numberOfConsumers--; + } + hidl_vec_removeAt(&model->operations, index); +} + +static void removeOperationTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "removeOperationTest: operation " + std::to_string(operation); + validate(device, message, model, + [operation](Model* model) { removeOperation(model, operation); }); + } +} + +///////////////////////// REMOVE OPERATION INPUT ///////////////////////// + +static void removeOperationInputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const V1_0::Operation& op = model.operations[operation]; + // CONCATENATION has at least 2 inputs, with the last element being + // INT32. Skip this test if removing one of CONCATENATION's + // inputs still produces a valid model. + if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 && + input != op.inputs.size() - 1) { + continue; + } + const std::string message = "removeOperationInputTest: operation " + + std::to_string(operation) + ", input " + + std::to_string(input); + validate(device, message, model, [operation, input](Model* model) { + uint32_t operand = model->operations[operation].inputs[input]; + model->operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->operations[operation].inputs, input); + }); + } + } +} + +///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// + +static void removeOperationOutputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "removeOperationOutputTest: operation " + + std::to_string(operation) + ", output " + + std::to_string(output); + validate(device, message, model, [operation, output](Model* model) { + hidl_vec_removeAt(&model->operations[operation].outputs, output); + }); + } + } +} + +///////////////////////// MODEL VALIDATION ///////////////////////// + +// TODO: remove model input +// TODO: remove model output +// TODO: add unused operation + +///////////////////////// ADD OPERATION INPUT ///////////////////////// + +static void addOperationInputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "addOperationInputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); + hidl_vec_push_back(&model->operations[operation].inputs, index); + hidl_vec_push_back(&model->inputIndexes, index); + }); + } +} + +///////////////////////// ADD OPERATION OUTPUT ///////////////////////// + +static void addOperationOutputTest(const sp& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = + "addOperationOutputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); + hidl_vec_push_back(&model->operations[operation].outputs, index); + hidl_vec_push_back(&model->outputIndexes, index); + }); + } +} + +////////////////////////// ENTRY POINT ////////////////////////////// + +void ValidationTest::validateModel(const V1_0::Model& model) { + mutateOperandTypeTest(device, model); + mutateOperandRankTest(device, model); + mutateOperandScaleTest(device, model); + mutateOperandZeroPointTest(device, model); + mutateOperationOperandTypeTest(device, model); + mutateOperationTypeTest(device, model); + mutateOperationInputOperandIndexTest(device, model); + mutateOperationOutputOperandIndexTest(device, model); + removeOperandTest(device, model); + removeOperationTest(device, model); + removeOperationInputTest(device, model); + removeOperationOutputTest(device, model); + addOperationInputTest(device, model); + addOperationOutputTest(device, model); +} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp new file mode 100644 index 0000000000..08f2613c99 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" +#include "TestHarness.h" +#include "Utils.h" + +#include +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::hidl::memory::V1_0::IMemory; +using generated_tests::MixedTyped; +using generated_tests::MixedTypedExampleType; +using generated_tests::for_all; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void createPreparedModel(const sp& device, const V1_0::Model& model, + sp* preparedModel) { + ASSERT_NE(nullptr, preparedModel); + + // see if service can handle model + bool fullySupportsModel = false; + Return supportedOpsLaunchStatus = device->getSupportedOperations( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); + + // launch prepare model + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + *preparedModel = preparedModelCallback->getPreparedModel(); + + // The getSupportedOperations call returns a list of operations that are + // guaranteed not to fail if prepareModel is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Unable to test Request validation because vendor service " + "cannot prepare model that it does not support." + << std::endl; + return; + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); +} + +// Primary validation function. This function will take a valid request, apply a +// mutation to it to invalidate the request, then pass it to interface calls +// that use the request. Note that the request here is passed by value, and any +// mutation to the request does not leave this function. +static void validate(const sp& preparedModel, const std::string& message, + Request request, const std::function& mutation) { + mutation(&request); + SCOPED_TRACE(message + " [execute]"); + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +///////////////////////// REMOVE INPUT //////////////////////////////////// + +static void removeInputTest(const sp& preparedModel, const Request& request) { + for (size_t input = 0; input < request.inputs.size(); ++input) { + const std::string message = "removeInput: removed input " + std::to_string(input); + validate(preparedModel, message, request, + [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); + } +} + +///////////////////////// REMOVE OUTPUT //////////////////////////////////// + +static void removeOutputTest(const sp& preparedModel, const Request& request) { + for (size_t output = 0; output < request.outputs.size(); ++output) { + const std::string message = "removeOutput: removed Output " + std::to_string(output); + validate(preparedModel, message, request, + [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); + } +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +std::vector createRequests(const std::vector& examples) { + const uint32_t INPUT = 0; + const uint32_t OUTPUT = 1; + + std::vector requests; + + for (auto& example : examples) { + const MixedTyped& inputs = example.first; + const MixedTyped& outputs = example.second; + + std::vector inputs_info, outputs_info; + uint32_t inputSize = 0, outputSize = 0; + + // This function only partially specifies the metadata (vector of RequestArguments). + // The contents are copied over below. + for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { + if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + RequestArgument arg_empty = { + .hasNoValue = true, + }; + inputs_info[index] = s ? arg : arg_empty; + inputSize += s; + }); + // Compute offset for inputs 1 and so on + { + size_t offset = 0; + for (auto& i : inputs_info) { + if (!i.hasNoValue) i.location.offset = offset; + offset += i.location.length; + } + } + + // Go through all outputs, initialize RequestArgument descriptors + for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { + if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + outputs_info[index] = arg; + outputSize += s; + }); + // Compute offset for outputs 1 and so on + { + size_t offset = 0; + for (auto& i : outputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + std::vector pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; + if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { + return {}; + } + + // map pool + sp inputMemory = mapMemory(pools[INPUT]); + if (inputMemory == nullptr) { + return {}; + } + char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); + if (inputPtr == nullptr) { + return {}; + } + + // initialize pool + inputMemory->update(); + for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { + char* begin = (char*)p; + char* end = begin + s; + // TODO: handle more than one input + std::copy(begin, end, inputPtr + inputs_info[index].location.offset); + }); + inputMemory->commit(); + + requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); + } + + return requests; +} + +void ValidationTest::validateRequests(const V1_0::Model& model, + const std::vector& requests) { + // create IPreparedModel + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); + if (preparedModel == nullptr) { + return; + } + + // validate each request + for (const Request& request : requests) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); + } +} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/ValidationTests.cpp b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp new file mode 100644 index 0000000000..98fc1c59f4 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "Models.h" +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +// forward declarations +std::vector createRequests(const std::vector& examples); + +// generate validation tests +#define VTS_CURRENT_TEST_CASE(TestName) \ + TEST_F(ValidationTest, TestName) { \ + const Model model = TestName::createTestModel(); \ + const std::vector requests = createRequests(TestName::examples); \ + validateModel(model); \ + validateRequests(model, requests); \ + } + +FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) + +#undef VTS_CURRENT_TEST_CASE + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp similarity index 64% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp rename to neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp index b14fb2c4c8..1ff3b66808 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp @@ -16,15 +16,7 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_0.h" -#include "Utils.h" - -#include - -using ::android::hardware::hidl_memory; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; +#include "VtsHalNeuralnetworks.h" namespace android { namespace hardware { @@ -33,11 +25,6 @@ namespace V1_0 { namespace vts { namespace functional { -// allocator helper -hidl_memory allocateSharedMemory(int64_t size) { - return nn::allocateSharedMemory(size); -} - // A class for test environment setup NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} @@ -51,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { } void NeuralnetworksHidlEnvironment::registerTestServices() { - registerTestService(); + registerTestService(); } // The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} + NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} void NeuralnetworksHidlTest::SetUp() { - device = ::testing::VtsHalHidlTargetTestBase::getService( + ::testing::VtsHalHidlTargetTestBase::SetUp(); + device = ::testing::VtsHalHidlTargetTestBase::getService( NeuralnetworksHidlEnvironment::getInstance()); ASSERT_NE(nullptr, device.get()); } -void NeuralnetworksHidlTest::TearDown() {} +void NeuralnetworksHidlTest::TearDown() { + device = nullptr; + ::testing::VtsHalHidlTargetTestBase::TearDown(); +} } // namespace functional } // namespace vts + +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { + return os << toString(deviceStatus); +} + } // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android + +using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment; + +int main(int argc, char** argv) { + ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); + ::testing::InitGoogleTest(&argc, argv); + NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); + + int status = RUN_ALL_TESTS(); + return status; +} diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h similarity index 60% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h rename to neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h index fbb1607478..e79129b09f 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h @@ -18,16 +18,15 @@ #define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H #include -#include -#include -#include #include -#include #include #include + +#include #include -#include +#include +#include namespace android { namespace hardware { @@ -36,47 +35,47 @@ namespace V1_0 { namespace vts { namespace functional { -hidl_memory allocateSharedMemory(int64_t size); - // A class for test environment setup class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); NeuralnetworksHidlEnvironment(); - NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; - NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; + ~NeuralnetworksHidlEnvironment() override; public: - ~NeuralnetworksHidlEnvironment() override; static NeuralnetworksHidlEnvironment* getInstance(); void registerTestServices() override; }; // The main test class for NEURALNETWORKS HIDL HAL. class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); + public: + NeuralnetworksHidlTest(); ~NeuralnetworksHidlTest() override; void SetUp() override; void TearDown() override; - sp device; + protected: + sp device; }; + +// Tag for the validation tests +class ValidationTest : public NeuralnetworksHidlTest { + protected: + void validateModel(const Model& model); + void validateRequests(const Model& model, const std::vector& request); +}; + +// Tag for the generated tests +class GeneratedTest : public NeuralnetworksHidlTest {}; + } // namespace functional } // namespace vts // pretty-print values for error messages - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::ErrorStatus errorStatus) { - return os << toString(errorStatus); -} - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::DeviceStatus deviceStatus) { - return os << toString(deviceStatus); -} +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); } // namespace V1_0 } // namespace neuralnetworks diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp deleted file mode 100644 index 59e5b80612..0000000000 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "neuralnetworks_hidl_hal_test" - -#include "VtsHalNeuralnetworksV1_0.h" - -#include "Callbacks.h" -#include "Models.h" -#include "TestHarness.h" - -#include -#include -#include - -using ::android::hardware::neuralnetworks::V1_0::IDevice; -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Model; -using ::android::hardware::neuralnetworks::V1_0::OperationType; -using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; - -static void doPrepareModelShortcut(const sp& device, sp* preparedModel) { - ASSERT_NE(nullptr, preparedModel); - Model model = createValidTestModel_1_0(); - - // see if service can handle model - bool fullySupportsModel = false; - Return supportedOpsLaunchStatus = device->getSupportedOperations( - model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { - ASSERT_EQ(ErrorStatus::NONE, status); - ASSERT_NE(0ul, supported.size()); - fullySupportsModel = - std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); - }); - ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); - - // launch prepare model - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); - - // retrieve prepared model - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - *preparedModel = preparedModelCallback->getPreparedModel(); - - // The getSupportedOperations call returns a list of operations that are - // guaranteed not to fail if prepareModel is called, and - // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. - // If a driver has any doubt that it can prepare an operation, it must - // return false. So here, if a driver isn't sure if it can support an - // operation, but reports that it successfully prepared the model, the test - // can continue. - if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { - ASSERT_EQ(nullptr, preparedModel->get()); - LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " - "prepare model that it does not support."; - std::cout << "[ ] Early termination of test because vendor service cannot " - "prepare model that it does not support." - << std::endl; - return; - } - ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); - ASSERT_NE(nullptr, preparedModel->get()); -} - -// create device test -TEST_F(NeuralnetworksHidlTest, CreateDevice) {} - -// status test -TEST_F(NeuralnetworksHidlTest, StatusTest) { - Return status = device->getStatus(); - ASSERT_TRUE(status.isOk()); - EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); -} - -// initialization -TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { - Return ret = - device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LT(0.0f, capabilities.float32Performance.execTime); - EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); - EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations positive test -TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { - Model model = createValidTestModel_1_0(); - Return ret = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_EQ(model.operations.size(), supported.size()); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 1 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { - Model model = createInvalidTestModel1_1_0(); - Return ret = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 2 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { - Model model = createInvalidTestModel2_1_0(); - Return ret = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// prepare simple model positive test -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { - sp preparedModel; - doPrepareModelShortcut(device, &preparedModel); -} - -// prepare simple model negative test 1 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { - Model model = createInvalidTestModel1_1_0(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// prepare simple model negative test 2 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { - Model model = createInvalidTestModel2_1_0(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// execute simple graph positive test -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { - std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; - std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; - const uint32_t OUTPUT = 1; - - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createValidTestRequest(); - - auto postWork = [&] { - sp outputMemory = mapMemory(request.pools[OUTPUT]); - if (outputMemory == nullptr) { - return false; - } - float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - if (outputPtr == nullptr) { - return false; - } - outputMemory->read(); - std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); - outputMemory->commit(); - return true; - }; - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - executionCallback->on_finish(postWork); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); - EXPECT_EQ(expectedData, outputData); -} - -// execute simple graph negative test 1 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest1(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -// execute simple graph negative test 2 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest2(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -} // namespace functional -} // namespace vts -} // namespace V1_0 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android - -using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment; - -int main(int argc, char** argv) { - ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); - ::testing::InitGoogleTest(&argc, argv); - NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); - - int status = RUN_ALL_TESTS(); - return status; -} diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp index 947ca2ca4d..f755c20be5 100644 --- a/neuralnetworks/1.1/vts/functional/Android.bp +++ b/neuralnetworks/1.1/vts/functional/Android.bp @@ -17,9 +17,12 @@ cc_test { name: "VtsHalNeuralnetworksV1_1TargetTest", srcs: [ - "VtsHalNeuralnetworksV1_1.cpp", - "VtsHalNeuralnetworksV1_1BasicTest.cpp", - "VtsHalNeuralnetworksV1_1GeneratedTest.cpp", + "BasicTests.cpp", + "GeneratedTests.cpp", + "ValidateModel.cpp", + "ValidateRequest.cpp", + "ValidationTests.cpp", + "VtsHalNeuralnetworks.cpp", ], defaults: ["VtsHalTargetTestDefaults"], static_libs: [ diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp new file mode 100644 index 0000000000..ed59a2dd8c --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +// create device test +TEST_F(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_F(NeuralnetworksHidlTest, StatusTest) { + Return status = device->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); +} + +// initialization +TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { + Return ret = + device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0.0f, capabilities.float32Performance.execTime); + EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); + EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime); + EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage); + }); + EXPECT_TRUE(ret.isOk()); +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp similarity index 53% rename from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp rename to neuralnetworks/1.1/vts/functional/GeneratedTests.cpp index 025d9feda3..1f1cc7af9d 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp +++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp @@ -16,54 +16,33 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_1.h" +#include "VtsHalNeuralnetworks.h" #include "Callbacks.h" #include "TestHarness.h" +#include "Utils.h" #include -#include -#include #include #include -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Operand; -using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; -using ::android::hardware::neuralnetworks::V1_0::OperandType; -using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_1::IDevice; -using ::android::hardware::neuralnetworks::V1_1::Model; -using ::android::hardware::neuralnetworks::V1_1::Operation; -using ::android::hardware::neuralnetworks::V1_1::OperationType; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - namespace android { namespace hardware { namespace neuralnetworks { namespace generated_tests { using ::generated_tests::MixedTypedExampleType; -extern void Execute(sp&, std::function, std::function, - const std::vector&); +extern void Execute(const sp&, std::function, + std::function, const std::vector&); } // namespace generated_tests namespace V1_1 { namespace vts { namespace functional { + using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::nn::allocateSharedMemory; // Mixed-typed examples typedef generated_tests::MixedTypedExampleType MixedTypedExample; diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h new file mode 100644 index 0000000000..c3cadb5fe2 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/Models.h @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H +#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "TestHarness.h" + +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +using MixedTypedExample = generated_tests::MixedTypedExampleType; + +#define FOR_EACH_TEST_MODEL(FN) \ + FN(add) \ + FN(add_broadcast_quant8) \ + FN(add_quant8) \ + FN(add_relaxed) \ + FN(avg_pool_float_1) \ + FN(avg_pool_float_1_relaxed) \ + FN(avg_pool_float_2) \ + FN(avg_pool_float_2_relaxed) \ + FN(avg_pool_float_3) \ + FN(avg_pool_float_3_relaxed) \ + FN(avg_pool_float_4) \ + FN(avg_pool_float_4_relaxed) \ + FN(avg_pool_float_5) \ + FN(avg_pool_quant8_1) \ + FN(avg_pool_quant8_2) \ + FN(avg_pool_quant8_3) \ + FN(avg_pool_quant8_4) \ + FN(avg_pool_quant8_5) \ + FN(batch_to_space) \ + FN(batch_to_space_float_1) \ + FN(batch_to_space_quant8_1) \ + FN(concat_float_1) \ + FN(concat_float_1_relaxed) \ + FN(concat_float_2) \ + FN(concat_float_2_relaxed) \ + FN(concat_float_3) \ + FN(concat_float_3_relaxed) \ + FN(concat_quant8_1) \ + FN(concat_quant8_2) \ + FN(concat_quant8_3) \ + FN(conv_1_h3_w2_SAME) \ + FN(conv_1_h3_w2_SAME_relaxed) \ + FN(conv_1_h3_w2_VALID) \ + FN(conv_1_h3_w2_VALID_relaxed) \ + FN(conv_3_h3_w2_SAME) \ + FN(conv_3_h3_w2_SAME_relaxed) \ + FN(conv_3_h3_w2_VALID) \ + FN(conv_3_h3_w2_VALID_relaxed) \ + FN(conv_float) \ + FN(conv_float_2) \ + FN(conv_float_channels) \ + FN(conv_float_channels_relaxed) \ + FN(conv_float_channels_weights_as_inputs) \ + FN(conv_float_channels_weights_as_inputs_relaxed) \ + FN(conv_float_large) \ + FN(conv_float_large_relaxed) \ + FN(conv_float_large_weights_as_inputs) \ + FN(conv_float_large_weights_as_inputs_relaxed) \ + FN(conv_float_relaxed) \ + FN(conv_float_weights_as_inputs) \ + FN(conv_float_weights_as_inputs_relaxed) \ + FN(conv_quant8) \ + FN(conv_quant8_2) \ + FN(conv_quant8_channels) \ + FN(conv_quant8_channels_weights_as_inputs) \ + FN(conv_quant8_large) \ + FN(conv_quant8_large_weights_as_inputs) \ + FN(conv_quant8_overflow) \ + FN(conv_quant8_overflow_weights_as_inputs) \ + FN(conv_quant8_weights_as_inputs) \ + FN(depth_to_space_float_1) \ + FN(depth_to_space_float_1_relaxed) \ + FN(depth_to_space_float_2) \ + FN(depth_to_space_float_2_relaxed) \ + FN(depth_to_space_float_3) \ + FN(depth_to_space_float_3_relaxed) \ + FN(depth_to_space_quant8_1) \ + FN(depth_to_space_quant8_2) \ + FN(depthwise_conv) \ + FN(depthwise_conv2d_float) \ + FN(depthwise_conv2d_float_2) \ + FN(depthwise_conv2d_float_large) \ + FN(depthwise_conv2d_float_large_2) \ + FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ + FN(depthwise_conv2d_float_large_relaxed) \ + FN(depthwise_conv2d_float_large_weights_as_inputs) \ + FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \ + FN(depthwise_conv2d_float_weights_as_inputs) \ + FN(depthwise_conv2d_quant8) \ + FN(depthwise_conv2d_quant8_2) \ + FN(depthwise_conv2d_quant8_large) \ + FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ + FN(depthwise_conv2d_quant8_weights_as_inputs) \ + FN(depthwise_conv_relaxed) \ + FN(dequantize) \ + FN(div) \ + FN(embedding_lookup) \ + FN(embedding_lookup_relaxed) \ + FN(floor) \ + FN(floor_relaxed) \ + FN(fully_connected_float) \ + FN(fully_connected_float_2) \ + FN(fully_connected_float_large) \ + FN(fully_connected_float_large_weights_as_inputs) \ + FN(fully_connected_float_relaxed) \ + FN(fully_connected_float_weights_as_inputs) \ + FN(fully_connected_float_weights_as_inputs_relaxed) \ + FN(fully_connected_quant8) \ + FN(fully_connected_quant8_2) \ + FN(fully_connected_quant8_large) \ + FN(fully_connected_quant8_large_weights_as_inputs) \ + FN(fully_connected_quant8_weights_as_inputs) \ + FN(hashtable_lookup_float) \ + FN(hashtable_lookup_float_relaxed) \ + FN(hashtable_lookup_quant8) \ + FN(l2_normalization) \ + FN(l2_normalization_2) \ + FN(l2_normalization_large) \ + FN(l2_normalization_large_relaxed) \ + FN(l2_normalization_relaxed) \ + FN(l2_pool_float) \ + FN(l2_pool_float_2) \ + FN(l2_pool_float_large) \ + FN(l2_pool_float_relaxed) \ + FN(local_response_norm_float_1) \ + FN(local_response_norm_float_1_relaxed) \ + FN(local_response_norm_float_2) \ + FN(local_response_norm_float_2_relaxed) \ + FN(local_response_norm_float_3) \ + FN(local_response_norm_float_3_relaxed) \ + FN(local_response_norm_float_4) \ + FN(local_response_norm_float_4_relaxed) \ + FN(logistic_float_1) \ + FN(logistic_float_1_relaxed) \ + FN(logistic_float_2) \ + FN(logistic_float_2_relaxed) \ + FN(logistic_quant8_1) \ + FN(logistic_quant8_2) \ + FN(lsh_projection) \ + FN(lsh_projection_2) \ + FN(lsh_projection_2_relaxed) \ + FN(lsh_projection_relaxed) \ + FN(lsh_projection_weights_as_inputs) \ + FN(lsh_projection_weights_as_inputs_relaxed) \ + FN(lstm) \ + FN(lstm2) \ + FN(lstm2_relaxed) \ + FN(lstm2_state) \ + FN(lstm2_state2) \ + FN(lstm2_state2_relaxed) \ + FN(lstm2_state_relaxed) \ + FN(lstm3) \ + FN(lstm3_relaxed) \ + FN(lstm3_state) \ + FN(lstm3_state2) \ + FN(lstm3_state2_relaxed) \ + FN(lstm3_state3) \ + FN(lstm3_state3_relaxed) \ + FN(lstm3_state_relaxed) \ + FN(lstm_relaxed) \ + FN(lstm_state) \ + FN(lstm_state2) \ + FN(lstm_state2_relaxed) \ + FN(lstm_state_relaxed) \ + FN(max_pool_float_1) \ + FN(max_pool_float_1_relaxed) \ + FN(max_pool_float_2) \ + FN(max_pool_float_2_relaxed) \ + FN(max_pool_float_3) \ + FN(max_pool_float_3_relaxed) \ + FN(max_pool_float_4) \ + FN(max_pool_quant8_1) \ + FN(max_pool_quant8_2) \ + FN(max_pool_quant8_3) \ + FN(max_pool_quant8_4) \ + FN(mean) \ + FN(mean_float_1) \ + FN(mean_float_2) \ + FN(mean_quant8_1) \ + FN(mean_quant8_2) \ + FN(mobilenet_224_gender_basic_fixed) \ + FN(mobilenet_224_gender_basic_fixed_relaxed) \ + FN(mobilenet_quantized) \ + FN(mul) \ + FN(mul_broadcast_quant8) \ + FN(mul_quant8) \ + FN(mul_relaxed) \ + FN(mul_relu) \ + FN(mul_relu_relaxed) \ + FN(pad) \ + FN(pad_float_1) \ + FN(relu1_float_1) \ + FN(relu1_float_1_relaxed) \ + FN(relu1_float_2) \ + FN(relu1_float_2_relaxed) \ + FN(relu1_quant8_1) \ + FN(relu1_quant8_2) \ + FN(relu6_float_1) \ + FN(relu6_float_1_relaxed) \ + FN(relu6_float_2) \ + FN(relu6_float_2_relaxed) \ + FN(relu6_quant8_1) \ + FN(relu6_quant8_2) \ + FN(relu_float_1) \ + FN(relu_float_1_relaxed) \ + FN(relu_float_2) \ + FN(relu_quant8_1) \ + FN(relu_quant8_2) \ + FN(reshape) \ + FN(reshape_quant8) \ + FN(reshape_quant8_weights_as_inputs) \ + FN(reshape_relaxed) \ + FN(reshape_weights_as_inputs) \ + FN(reshape_weights_as_inputs_relaxed) \ + FN(resize_bilinear) \ + FN(resize_bilinear_2) \ + FN(resize_bilinear_relaxed) \ + FN(rnn) \ + FN(rnn_relaxed) \ + FN(rnn_state) \ + FN(rnn_state_relaxed) \ + FN(softmax_float_1) \ + FN(softmax_float_1_relaxed) \ + FN(softmax_float_2) \ + FN(softmax_float_2_relaxed) \ + FN(softmax_quant8_1) \ + FN(softmax_quant8_2) \ + FN(space_to_batch) \ + FN(space_to_batch_float_1) \ + FN(space_to_batch_float_2) \ + FN(space_to_batch_float_3) \ + FN(space_to_batch_quant8_1) \ + FN(space_to_batch_quant8_2) \ + FN(space_to_batch_quant8_3) \ + FN(space_to_depth_float_1) \ + FN(space_to_depth_float_1_relaxed) \ + FN(space_to_depth_float_2) \ + FN(space_to_depth_float_2_relaxed) \ + FN(space_to_depth_float_3) \ + FN(space_to_depth_float_3_relaxed) \ + FN(space_to_depth_quant8_1) \ + FN(space_to_depth_quant8_2) \ + FN(squeeze) \ + FN(squeeze_float_1) \ + FN(squeeze_quant8_1) \ + FN(strided_slice) \ + FN(strided_slice_float_1) \ + FN(strided_slice_float_10) \ + FN(strided_slice_float_2) \ + FN(strided_slice_float_3) \ + FN(strided_slice_float_4) \ + FN(strided_slice_float_5) \ + FN(strided_slice_float_6) \ + FN(strided_slice_float_7) \ + FN(strided_slice_float_8) \ + FN(strided_slice_float_9) \ + FN(strided_slice_qaunt8_10) \ + FN(strided_slice_quant8_1) \ + FN(strided_slice_quant8_2) \ + FN(strided_slice_quant8_3) \ + FN(strided_slice_quant8_4) \ + FN(strided_slice_quant8_5) \ + FN(strided_slice_quant8_6) \ + FN(strided_slice_quant8_7) \ + FN(strided_slice_quant8_8) \ + FN(strided_slice_quant8_9) \ + FN(sub) \ + FN(svdf) \ + FN(svdf2) \ + FN(svdf2_relaxed) \ + FN(svdf_relaxed) \ + FN(svdf_state) \ + FN(svdf_state_relaxed) \ + FN(tanh) \ + FN(tanh_relaxed) \ + FN(transpose) \ + FN(transpose_float_1) \ + FN(transpose_quant8_1) + +#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ + namespace function { \ + extern std::vector examples; \ + Model createTestModel(); \ + } + +FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) + +#undef FORWARD_DECLARE_GENERATED_OBJECTS + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp new file mode 100644 index 0000000000..7a20e26fe2 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp @@ -0,0 +1,513 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { + +using V1_0::IPreparedModel; +using V1_0::Operand; +using V1_0::OperandLifeTime; +using V1_0::OperandType; + +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void validateGetSupportedOperations(const sp& device, const std::string& message, + const V1_1::Model& model) { + SCOPED_TRACE(message + " [getSupportedOperations_1_1]"); + + Return ret = + device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); + EXPECT_TRUE(ret.isOk()); +} + +static void validatePrepareModel(const sp& device, const std::string& message, + const V1_1::Model& model) { + SCOPED_TRACE(message + " [prepareModel_1_1]"); + + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + ASSERT_EQ(nullptr, preparedModel.get()); +} + +// Primary validation function. This function will take a valid model, apply a +// mutation to it to invalidate the model, then pass it to interface calls that +// use the model. Note that the model here is passed by value, and any mutation +// to the model does not leave this function. +static void validate(const sp& device, const std::string& message, V1_1::Model model, + const std::function& mutation) { + mutation(&model); + validateGetSupportedOperations(device, message, model); + validatePrepareModel(device, message, model); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +static uint32_t addOperand(Model* model) { + return hidl_vec_push_back(&model->operands, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }); +} + +static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { + uint32_t index = addOperand(model); + model->operands[index].numberOfConsumers = 1; + model->operands[index].lifetime = lifetime; + return index; +} + +///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// + +static const int32_t invalidOperandTypes[] = { + static_cast(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental + static_cast(OperandType::OEM) - 1, // lower bound OEM + static_cast(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM +}; + +static void mutateOperandTypeTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (int32_t invalidOperandType : invalidOperandTypes) { + const std::string message = "mutateOperandTypeTest: operand " + + std::to_string(operand) + " set to value " + + std::to_string(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + model->operands[operand].type = static_cast(invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE OPERAND RANK ///////////////////////// + +static uint32_t getInvalidRank(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + return 1; + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + return 0; + default: + return 0; + } +} + +static void mutateOperandRankTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + + " has rank of " + std::to_string(invalidRank); + validate(device, message, model, [operand, invalidRank](Model* model) { + model->operands[operand].dimensions = std::vector(invalidRank, 0); + }); + } +} + +///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// + +static float getInvalidScale(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + return 1.0f; + case OperandType::TENSOR_INT32: + return -1.0f; + case OperandType::TENSOR_QUANT8_ASYMM: + return 0.0f; + default: + return 0.0f; + } +} + +static void mutateOperandScaleTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.operands[operand].type); + const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + + " has scale of " + std::to_string(invalidScale); + validate(device, message, model, [operand, invalidScale](Model* model) { + model->operands[operand].scale = invalidScale; + }); + } +} + +///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// + +static std::vector getInvalidZeroPoints(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + return {1}; + case OperandType::TENSOR_QUANT8_ASYMM: + return {-1, 256}; + default: + return {}; + } +} + +static void mutateOperandZeroPointTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector invalidZeroPoints = + getInvalidZeroPoints(model.operands[operand].type); + for (int32_t invalidZeroPoint : invalidZeroPoints) { + const std::string message = "mutateOperandZeroPointTest: operand " + + std::to_string(operand) + " has zero point of " + + std::to_string(invalidZeroPoint); + validate(device, message, model, [operand, invalidZeroPoint](Model* model) { + model->operands[operand].zeroPoint = invalidZeroPoint; + }); + } + } +} + +///////////////////////// VALIDATE EXTRA ??? ///////////////////////// + +// TODO: Operand::lifetime +// TODO: Operand::location + +///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// + +static void mutateOperand(Operand* operand, OperandType type) { + Operand newOperand = *operand; + newOperand.type = type; + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + newOperand.dimensions = hidl_vec(); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_FLOAT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_INT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_QUANT8_ASYMM: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec({1}); + newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; + break; + case OperandType::OEM: + case OperandType::TENSOR_OEM_BYTE: + default: + break; + } + *operand = newOperand; +} + +static bool mutateOperationOperandTypeSkip(size_t operand, const V1_1::Model& model) { + // LSH_PROJECTION's second argument is allowed to have any type. This is the + // only operation that currently has a type that can be anything independent + // from any other type. Changing the operand type to any other type will + // result in a valid model for LSH_PROJECTION. If this is the case, skip the + // test. + for (const Operation& operation : model.operations) { + if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { + return true; + } + } + return false; +} + +static void mutateOperationOperandTypeTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + if (mutateOperationOperandTypeSkip(operand, model)) { + continue; + } + for (OperandType invalidOperandType : hidl_enum_iterator{}) { + // Do not test OEM types + if (invalidOperandType == model.operands[operand].type || + invalidOperandType == OperandType::OEM || + invalidOperandType == OperandType::TENSOR_OEM_BYTE) { + continue; + } + const std::string message = "mutateOperationOperandTypeTest: operand " + + std::to_string(operand) + " set to type " + + toString(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + mutateOperand(&model->operands[operand], invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// + +static const int32_t invalidOperationTypes[] = { + static_cast(OperationType::ADD) - 1, // lower bound fundamental + static_cast(OperationType::TRANSPOSE) + 1, // upper bound fundamental + static_cast(OperationType::OEM_OPERATION) - 1, // lower bound OEM + static_cast(OperationType::OEM_OPERATION) + 1, // upper bound OEM +}; + +static void mutateOperationTypeTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (int32_t invalidOperationType : invalidOperationTypes) { + const std::string message = "mutateOperationTypeTest: operation " + + std::to_string(operation) + " set to value " + + std::to_string(invalidOperationType); + validate(device, message, model, [operation, invalidOperationType](Model* model) { + model->operations[operation].type = + static_cast(invalidOperationType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationInputOperandIndexTest(const sp& device, + const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const std::string message = "mutateOperationInputOperandIndexTest: operation " + + std::to_string(operation) + " input " + + std::to_string(input); + validate(device, message, model, [operation, input, invalidOperand](Model* model) { + model->operations[operation].inputs[input] = invalidOperand; + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationOutputOperandIndexTest(const sp& device, + const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "mutateOperationOutputOperandIndexTest: operation " + + std::to_string(operation) + " output " + + std::to_string(output); + validate(device, message, model, [operation, output, invalidOperand](Model* model) { + model->operations[operation].outputs[output] = invalidOperand; + }); + } + } +} + +///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// + +static void removeValueAndDecrementGreaterValues(hidl_vec* vec, uint32_t value) { + if (vec) { + // remove elements matching "value" + auto last = std::remove(vec->begin(), vec->end(), value); + vec->resize(std::distance(vec->begin(), last)); + + // decrement elements exceeding "value" + std::transform(vec->begin(), vec->end(), vec->begin(), + [value](uint32_t v) { return v > value ? v-- : v; }); + } +} + +static void removeOperand(Model* model, uint32_t index) { + hidl_vec_removeAt(&model->operands, index); + for (Operation& operation : model->operations) { + removeValueAndDecrementGreaterValues(&operation.inputs, index); + removeValueAndDecrementGreaterValues(&operation.outputs, index); + } + removeValueAndDecrementGreaterValues(&model->inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->outputIndexes, index); +} + +static void removeOperandTest(const sp& device, const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::string message = "removeOperandTest: operand " + std::to_string(operand); + validate(device, message, model, + [operand](Model* model) { removeOperand(model, operand); }); + } +} + +///////////////////////// REMOVE OPERATION ///////////////////////// + +static void removeOperation(Model* model, uint32_t index) { + for (uint32_t operand : model->operations[index].inputs) { + model->operands[operand].numberOfConsumers--; + } + hidl_vec_removeAt(&model->operations, index); +} + +static void removeOperationTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "removeOperationTest: operation " + std::to_string(operation); + validate(device, message, model, + [operation](Model* model) { removeOperation(model, operation); }); + } +} + +///////////////////////// REMOVE OPERATION INPUT ///////////////////////// + +static void removeOperationInputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const V1_1::Operation& op = model.operations[operation]; + // CONCATENATION has at least 2 inputs, with the last element being + // INT32. Skip this test if removing one of CONCATENATION's + // inputs still produces a valid model. + if (op.type == V1_1::OperationType::CONCATENATION && op.inputs.size() > 2 && + input != op.inputs.size() - 1) { + continue; + } + const std::string message = "removeOperationInputTest: operation " + + std::to_string(operation) + ", input " + + std::to_string(input); + validate(device, message, model, [operation, input](Model* model) { + uint32_t operand = model->operations[operation].inputs[input]; + model->operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->operations[operation].inputs, input); + }); + } + } +} + +///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// + +static void removeOperationOutputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "removeOperationOutputTest: operation " + + std::to_string(operation) + ", output " + + std::to_string(output); + validate(device, message, model, [operation, output](Model* model) { + hidl_vec_removeAt(&model->operations[operation].outputs, output); + }); + } + } +} + +///////////////////////// MODEL VALIDATION ///////////////////////// + +// TODO: remove model input +// TODO: remove model output +// TODO: add unused operation + +///////////////////////// ADD OPERATION INPUT ///////////////////////// + +static void addOperationInputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "addOperationInputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); + hidl_vec_push_back(&model->operations[operation].inputs, index); + hidl_vec_push_back(&model->inputIndexes, index); + }); + } +} + +///////////////////////// ADD OPERATION OUTPUT ///////////////////////// + +static void addOperationOutputTest(const sp& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = + "addOperationOutputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); + hidl_vec_push_back(&model->operations[operation].outputs, index); + hidl_vec_push_back(&model->outputIndexes, index); + }); + } +} + +////////////////////////// ENTRY POINT ////////////////////////////// + +void ValidationTest::validateModel(const V1_1::Model& model) { + mutateOperandTypeTest(device, model); + mutateOperandRankTest(device, model); + mutateOperandScaleTest(device, model); + mutateOperandZeroPointTest(device, model); + mutateOperationOperandTypeTest(device, model); + mutateOperationTypeTest(device, model); + mutateOperationInputOperandIndexTest(device, model); + mutateOperationOutputOperandIndexTest(device, model); + removeOperandTest(device, model); + removeOperationTest(device, model); + removeOperationInputTest(device, model); + removeOperationOutputTest(device, model); + addOperationInputTest(device, model); + addOperationOutputTest(device, model); +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp new file mode 100644 index 0000000000..bd966144f6 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" +#include "TestHarness.h" +#include "Utils.h" + +#include +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::hidl::memory::V1_0::IMemory; +using generated_tests::MixedTyped; +using generated_tests::MixedTypedExampleType; +using generated_tests::for_all; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void createPreparedModel(const sp& device, const V1_1::Model& model, + sp* preparedModel) { + ASSERT_NE(nullptr, preparedModel); + + // see if service can handle model + bool fullySupportsModel = false; + Return supportedOpsLaunchStatus = device->getSupportedOperations_1_1( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); + + // launch prepare model + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + *preparedModel = preparedModelCallback->getPreparedModel(); + + // The getSupportedOperations_1_1 call returns a list of operations that are + // guaranteed not to fail if prepareModel_1_1 is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Unable to test Request validation because vendor service " + "cannot prepare model that it does not support." + << std::endl; + return; + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); +} + +// Primary validation function. This function will take a valid request, apply a +// mutation to it to invalidate the request, then pass it to interface calls +// that use the request. Note that the request here is passed by value, and any +// mutation to the request does not leave this function. +static void validate(const sp& preparedModel, const std::string& message, + Request request, const std::function& mutation) { + mutation(&request); + SCOPED_TRACE(message + " [execute]"); + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template +static void hidl_vec_removeAt(hidl_vec* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template +static uint32_t hidl_vec_push_back(hidl_vec* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +///////////////////////// REMOVE INPUT //////////////////////////////////// + +static void removeInputTest(const sp& preparedModel, const Request& request) { + for (size_t input = 0; input < request.inputs.size(); ++input) { + const std::string message = "removeInput: removed input " + std::to_string(input); + validate(preparedModel, message, request, + [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); + } +} + +///////////////////////// REMOVE OUTPUT //////////////////////////////////// + +static void removeOutputTest(const sp& preparedModel, const Request& request) { + for (size_t output = 0; output < request.outputs.size(); ++output) { + const std::string message = "removeOutput: removed Output " + std::to_string(output); + validate(preparedModel, message, request, + [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); + } +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +std::vector createRequests(const std::vector& examples) { + const uint32_t INPUT = 0; + const uint32_t OUTPUT = 1; + + std::vector requests; + + for (auto& example : examples) { + const MixedTyped& inputs = example.first; + const MixedTyped& outputs = example.second; + + std::vector inputs_info, outputs_info; + uint32_t inputSize = 0, outputSize = 0; + + // This function only partially specifies the metadata (vector of RequestArguments). + // The contents are copied over below. + for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { + if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + RequestArgument arg_empty = { + .hasNoValue = true, + }; + inputs_info[index] = s ? arg : arg_empty; + inputSize += s; + }); + // Compute offset for inputs 1 and so on + { + size_t offset = 0; + for (auto& i : inputs_info) { + if (!i.hasNoValue) i.location.offset = offset; + offset += i.location.length; + } + } + + // Go through all outputs, initialize RequestArgument descriptors + for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { + if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + outputs_info[index] = arg; + outputSize += s; + }); + // Compute offset for outputs 1 and so on + { + size_t offset = 0; + for (auto& i : outputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + std::vector pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; + if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { + return {}; + } + + // map pool + sp inputMemory = mapMemory(pools[INPUT]); + if (inputMemory == nullptr) { + return {}; + } + char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); + if (inputPtr == nullptr) { + return {}; + } + + // initialize pool + inputMemory->update(); + for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { + char* begin = (char*)p; + char* end = begin + s; + // TODO: handle more than one input + std::copy(begin, end, inputPtr + inputs_info[index].location.offset); + }); + inputMemory->commit(); + + requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); + } + + return requests; +} + +void ValidationTest::validateRequests(const V1_1::Model& model, + const std::vector& requests) { + // create IPreparedModel + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); + if (preparedModel == nullptr) { + return; + } + + // validate each request + for (const Request& request : requests) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); + } +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/ValidationTests.cpp b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp new file mode 100644 index 0000000000..1c35ba842b --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "Models.h" +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +// forward declarations +std::vector createRequests(const std::vector& examples); + +// generate validation tests +#define VTS_CURRENT_TEST_CASE(TestName) \ + TEST_F(ValidationTest, TestName) { \ + const Model model = TestName::createTestModel(); \ + const std::vector requests = createRequests(TestName::examples); \ + validateModel(model); \ + validateRequests(model, requests); \ + } + +FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) + +#undef VTS_CURRENT_TEST_CASE + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp similarity index 64% rename from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp rename to neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp index b1d3be786c..62381e6796 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp @@ -16,16 +16,7 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_1.h" -#include "Utils.h" - -#include -#include - -using ::android::hardware::hidl_memory; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; +#include "VtsHalNeuralnetworks.h" namespace android { namespace hardware { @@ -34,11 +25,6 @@ namespace V1_1 { namespace vts { namespace functional { -// allocator helper -hidl_memory allocateSharedMemory(int64_t size) { - return nn::allocateSharedMemory(size); -} - // A class for test environment setup NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} @@ -52,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { } void NeuralnetworksHidlEnvironment::registerTestServices() { - registerTestService(); + registerTestService(); } // The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} + NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} void NeuralnetworksHidlTest::SetUp() { - device = ::testing::VtsHalHidlTargetTestBase::getService( + ::testing::VtsHalHidlTargetTestBase::SetUp(); + device = ::testing::VtsHalHidlTargetTestBase::getService( NeuralnetworksHidlEnvironment::getInstance()); ASSERT_NE(nullptr, device.get()); } -void NeuralnetworksHidlTest::TearDown() {} +void NeuralnetworksHidlTest::TearDown() { + device = nullptr; + ::testing::VtsHalHidlTargetTestBase::TearDown(); +} } // namespace functional } // namespace vts + +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { + return os << toString(deviceStatus); +} + } // namespace V1_1 } // namespace neuralnetworks } // namespace hardware } // namespace android + +using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; + +int main(int argc, char** argv) { + ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); + ::testing::InitGoogleTest(&argc, argv); + NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); + + int status = RUN_ALL_TESTS(); + return status; +} diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h similarity index 60% rename from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h rename to neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h index 426246ce76..0050e52d25 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h @@ -17,65 +17,71 @@ #ifndef VTS_HAL_NEURALNETWORKS_V1_1_H #define VTS_HAL_NEURALNETWORKS_V1_1_H -#include -#include -#include +#include #include #include -#include #include #include + +#include #include -#include +#include +#include namespace android { namespace hardware { namespace neuralnetworks { namespace V1_1 { + +using V1_0::Request; +using V1_0::DeviceStatus; +using V1_0::ErrorStatus; + namespace vts { namespace functional { -hidl_memory allocateSharedMemory(int64_t size); // A class for test environment setup class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); NeuralnetworksHidlEnvironment(); - NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; - NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; - NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; + ~NeuralnetworksHidlEnvironment() override; public: - ~NeuralnetworksHidlEnvironment() override; static NeuralnetworksHidlEnvironment* getInstance(); void registerTestServices() override; }; // The main test class for NEURALNETWORKS HIDL HAL. class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); + public: + NeuralnetworksHidlTest(); ~NeuralnetworksHidlTest() override; void SetUp() override; void TearDown() override; - sp device; + protected: + sp device; }; + +// Tag for the validation tests +class ValidationTest : public NeuralnetworksHidlTest { + protected: + void validateModel(const Model& model); + void validateRequests(const Model& model, const std::vector& request); +}; + +// Tag for the generated tests +class GeneratedTest : public NeuralnetworksHidlTest {}; + } // namespace functional } // namespace vts // pretty-print values for error messages - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::ErrorStatus errorStatus) { - return os << toString(errorStatus); -} - -template -::std::basic_ostream& operator<<(::std::basic_ostream& os, - V1_0::DeviceStatus deviceStatus) { - return os << toString(deviceStatus); -} +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); } // namespace V1_1 } // namespace neuralnetworks diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp deleted file mode 100644 index 10591dcb20..0000000000 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp +++ /dev/null @@ -1,468 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "neuralnetworks_hidl_hal_test" - -#include "VtsHalNeuralnetworksV1_1.h" - -#include "Callbacks.h" -#include "Models.h" -#include "TestHarness.h" - -#include -#include -#include -#include -#include - -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Operand; -using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; -using ::android::hardware::neuralnetworks::V1_0::OperandType; -using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_1::Capabilities; -using ::android::hardware::neuralnetworks::V1_1::IDevice; -using ::android::hardware::neuralnetworks::V1_1::Model; -using ::android::hardware::neuralnetworks::V1_1::Operation; -using ::android::hardware::neuralnetworks::V1_1::OperationType; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - -namespace android { -namespace hardware { -namespace neuralnetworks { -namespace V1_1 { -namespace vts { -namespace functional { -using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; - -static void doPrepareModelShortcut(const sp& device, sp* preparedModel) { - ASSERT_NE(nullptr, preparedModel); - Model model = createValidTestModel_1_1(); - - // see if service can handle model - bool fullySupportsModel = false; - Return supportedOpsLaunchStatus = device->getSupportedOperations_1_1( - model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { - ASSERT_EQ(ErrorStatus::NONE, status); - ASSERT_NE(0ul, supported.size()); - fullySupportsModel = - std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); - }); - ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); - - // launch prepare model - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); - - // retrieve prepared model - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - *preparedModel = preparedModelCallback->getPreparedModel(); - - // The getSupportedOperations call returns a list of operations that are - // guaranteed not to fail if prepareModel is called, and - // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. - // If a driver has any doubt that it can prepare an operation, it must - // return false. So here, if a driver isn't sure if it can support an - // operation, but reports that it successfully prepared the model, the test - // can continue. - if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { - ASSERT_EQ(nullptr, preparedModel->get()); - LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " - "prepare model that it does not support."; - std::cout << "[ ] Early termination of test because vendor service cannot " - "prepare model that it does not support." - << std::endl; - return; - } - ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); - ASSERT_NE(nullptr, preparedModel->get()); -} - -// create device test -TEST_F(NeuralnetworksHidlTest, CreateDevice) {} - -// status test -TEST_F(NeuralnetworksHidlTest, StatusTest) { - Return status = device->getStatus(); - ASSERT_TRUE(status.isOk()); - EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); -} - -// initialization -TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { - Return ret = - device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_LT(0.0f, capabilities.float32Performance.execTime); - EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); - EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); - EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime); - EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations positive test -TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { - Model model = createValidTestModel_1_1(); - Return ret = device->getSupportedOperations_1_1( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::NONE, status); - EXPECT_EQ(model.operations.size(), supported.size()); - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 1 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { - Model model = createInvalidTestModel1_1_1(); - Return ret = device->getSupportedOperations_1_1( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// supported operations negative test 2 -TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { - Model model = createInvalidTestModel2_1_1(); - Return ret = device->getSupportedOperations_1_1( - model, [&](ErrorStatus status, const hidl_vec& supported) { - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); - (void)supported; - }); - EXPECT_TRUE(ret.isOk()); -} - -// prepare simple model positive test -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { - sp preparedModel; - doPrepareModelShortcut(device, &preparedModel); -} - -// prepare simple model negative test 1 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { - Model model = createInvalidTestModel1_1_1(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// prepare simple model negative test 2 -TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { - Model model = createInvalidTestModel2_1_1(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_EQ(nullptr, preparedModel.get()); -} - -// execute simple graph positive test -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { - std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; - std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; - const uint32_t OUTPUT = 1; - - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createValidTestRequest(); - - auto postWork = [&] { - sp outputMemory = mapMemory(request.pools[OUTPUT]); - if (outputMemory == nullptr) { - return false; - } - float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - if (outputPtr == nullptr) { - return false; - } - outputMemory->read(); - std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); - outputMemory->commit(); - return true; - }; - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - executionCallback->on_finish(postWork); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); - EXPECT_EQ(expectedData, outputData); -} - -// execute simple graph negative test 1 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest1(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -// execute simple graph negative test 2 -TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { - sp preparedModel; - ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); - if (preparedModel == nullptr) { - return; - } - Request request = createInvalidTestRequest2(); - - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executeLaunchStatus = preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executeLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); - - executionCallback->wait(); - ErrorStatus executionReturnStatus = executionCallback->getStatus(); - EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); -} - -class NeuralnetworksInputsOutputsTest - : public NeuralnetworksHidlTest, - public ::testing::WithParamInterface> { - protected: - virtual void SetUp() { NeuralnetworksHidlTest::SetUp(); } - virtual void TearDown() { NeuralnetworksHidlTest::TearDown(); } - V1_1::Model createModel(const std::vector& inputs, - const std::vector& outputs) { - // We set up the operands as floating-point with no designated - // model inputs and outputs, and then patch type and lifetime - // later on in this function. - - std::vector operands = { - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - { - .type = OperandType::INT32, - .dimensions = {}, - .numberOfConsumers = 1, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)}, - }, - { - .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1}, - .numberOfConsumers = 0, - .scale = 0.0f, - .zeroPoint = 0, - .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - }, - }; - - const std::vector operations = {{ - .type = OperationType::ADD, .inputs = {0, 1, 2}, .outputs = {3}, - }}; - - std::vector operandValues; - int32_t activation[1] = {static_cast(FusedActivationFunc::NONE)}; - operandValues.insert(operandValues.end(), reinterpret_cast(&activation[0]), - reinterpret_cast(&activation[1])); - - if (kQuantized) { - for (auto& operand : operands) { - if (operand.type == OperandType::TENSOR_FLOAT32) { - operand.type = OperandType::TENSOR_QUANT8_ASYMM; - operand.scale = 1.0f; - operand.zeroPoint = 0; - } - } - } - - auto patchLifetime = [&operands](const std::vector& operandIndexes, - OperandLifeTime lifetime) { - for (uint32_t index : operandIndexes) { - operands[index].lifetime = lifetime; - } - }; - if (kInputHasPrecedence) { - patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT); - patchLifetime(inputs, OperandLifeTime::MODEL_INPUT); - } else { - patchLifetime(inputs, OperandLifeTime::MODEL_INPUT); - patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT); - } - - return { - .operands = operands, - .operations = operations, - .inputIndexes = inputs, - .outputIndexes = outputs, - .operandValues = operandValues, - .pools = {}, - }; - } - void check(const std::string& name, - bool expectation, // true = success - const std::vector& inputs, const std::vector& outputs) { - SCOPED_TRACE(name + " (HAL calls should " + (expectation ? "succeed" : "fail") + ", " + - (kInputHasPrecedence ? "input" : "output") + " precedence, " + - (kQuantized ? "quantized" : "float")); - - V1_1::Model model = createModel(inputs, outputs); - - // ensure that getSupportedOperations_1_1() checks model validity - ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; - Return supportedOpsReturn = device->getSupportedOperations_1_1( - model, [&model, &supportedOpsErrorStatus](ErrorStatus status, - const hidl_vec& supported) { - supportedOpsErrorStatus = status; - if (status == ErrorStatus::NONE) { - ASSERT_EQ(supported.size(), model.operations.size()); - } - }); - ASSERT_TRUE(supportedOpsReturn.isOk()); - ASSERT_EQ(supportedOpsErrorStatus, - (expectation ? ErrorStatus::NONE : ErrorStatus::INVALID_ARGUMENT)); - - // ensure that prepareModel_1_1() checks model validity - sp preparedModelCallback = new PreparedModelCallback; - ASSERT_NE(preparedModelCallback.get(), nullptr); - Return prepareLaunchReturn = - device->prepareModel_1_1(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchReturn.isOk()); - ASSERT_TRUE(prepareLaunchReturn == ErrorStatus::NONE || - prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT); - bool preparationOk = (prepareLaunchReturn == ErrorStatus::NONE); - if (preparationOk) { - preparedModelCallback->wait(); - preparationOk = (preparedModelCallback->getStatus() == ErrorStatus::NONE); - } - - if (preparationOk) { - ASSERT_TRUE(expectation); - } else { - // Preparation can fail for reasons other than an invalid model -- - // for example, perhaps not all operations are supported, or perhaps - // the device hit some kind of capacity limit. - bool invalid = prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT || - preparedModelCallback->getStatus() == ErrorStatus::INVALID_ARGUMENT; - ASSERT_NE(expectation, invalid); - } - } - - // Indicates whether an operand that appears in both the inputs - // and outputs vector should have lifetime appropriate for input - // rather than for output. - const bool kInputHasPrecedence = std::get<0>(GetParam()); - - // Indicates whether we should test TENSOR_QUANT8_ASYMM rather - // than TENSOR_FLOAT32. - const bool kQuantized = std::get<1>(GetParam()); -}; - -TEST_P(NeuralnetworksInputsOutputsTest, Validate) { - check("Ok", true, {0, 1}, {3}); - check("InputIsOutput", false, {0, 1}, {3, 0}); - check("OutputIsInput", false, {0, 1, 3}, {3}); - check("DuplicateInputs", false, {0, 1, 0}, {3}); - check("DuplicateOutputs", false, {0, 1}, {3, 3}); -} - -INSTANTIATE_TEST_CASE_P(Flavor, NeuralnetworksInputsOutputsTest, - ::testing::Combine(::testing::Bool(), ::testing::Bool())); - -} // namespace functional -} // namespace vts -} // namespace V1_1 -} // namespace neuralnetworks -} // namespace hardware -} // namespace android - -using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; - -int main(int argc, char** argv) { - ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); - ::testing::InitGoogleTest(&argc, argv); - NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); - - int status = RUN_ALL_TESTS(); - return status; -}