mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 11:36:00 +00:00
Merge changes from topics "replace_asymm", "fp16-op-add"
* changes: Replace TENSOR_QUANT16_ASYMM with TENSOR_QUANT16_SYMM Fix VTS ValidationTest for 1.2 ops. Adds float16 support to generated tests. Autogenerates VTS ValidationTest tests. Fix VTS ValidationTest for 1.2 ops. Separates VTS tests by HAL version.
This commit is contained in:
@@ -39,17 +39,14 @@ cc_library_static {
|
||||
],
|
||||
}
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_0TargetTest",
|
||||
cc_defaults {
|
||||
name: "VtsHalNeuralNetworksTargetTestDefaults",
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
srcs: [
|
||||
"BasicTests.cpp",
|
||||
"GeneratedTests.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"ValidationTests.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
@@ -66,4 +63,22 @@ cc_test {
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
// Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
|
||||
// error in ld.gold.
|
||||
arch: {
|
||||
arm: {
|
||||
sanitize: {
|
||||
never: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_0TargetTest",
|
||||
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
|
||||
srcs: [
|
||||
"BasicTests.cpp",
|
||||
"GeneratedTests.cpp",
|
||||
],
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@ using ::test_helper::for_each;
|
||||
using ::test_helper::Int32Operands;
|
||||
using ::test_helper::MixedTyped;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
using ::test_helper::MixedTypedIndex;
|
||||
using ::test_helper::Quant8Operands;
|
||||
using ::test_helper::resize_accordingly;
|
||||
|
||||
@@ -63,14 +64,16 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr
|
||||
copy_back_<int32_t>(dst, ra, src);
|
||||
copy_back_<uint8_t>(dst, ra, src);
|
||||
copy_back_<int16_t>(dst, ra, src);
|
||||
static_assert(4 == std::tuple_size<MixedTyped>::value,
|
||||
copy_back_<_Float16>(dst, ra, src);
|
||||
static_assert(5 == std::tuple_size<MixedTyped>::value,
|
||||
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
|
||||
}
|
||||
|
||||
// Top level driver for models and examples generated by test_generator.py
|
||||
// Test driver for those generated from ml/nn/runtime/test/spec
|
||||
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExample>& examples, float fpAtol = 1e-5f,
|
||||
const std::vector<MixedTypedExample>& examples,
|
||||
bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
|
||||
float fpRtol = 1e-5f) {
|
||||
const uint32_t INPUT = 0;
|
||||
const uint32_t OUTPUT = 1;
|
||||
@@ -78,13 +81,20 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
|
||||
int example_no = 1;
|
||||
for (auto& example : examples) {
|
||||
SCOPED_TRACE(example_no++);
|
||||
|
||||
const MixedTyped& inputs = example.operands.first;
|
||||
const MixedTyped& golden = example.operands.second;
|
||||
|
||||
const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty();
|
||||
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
|
||||
// TODO: Adjust the error limit based on testing.
|
||||
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
|
||||
fpAtol = 5.0f * 0.0009765625f;
|
||||
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
|
||||
fpRtol = 5.0f * 0.0009765625f;
|
||||
}
|
||||
|
||||
std::vector<RequestArgument> inputs_info, outputs_info;
|
||||
uint32_t inputSize = 0, outputSize = 0;
|
||||
|
||||
// This function only partially specifies the metadata (vector of RequestArguments).
|
||||
// The contents are copied over below.
|
||||
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
|
||||
@@ -228,7 +238,8 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples,
|
||||
/*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol);
|
||||
}
|
||||
|
||||
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
|
||||
@@ -272,13 +283,8 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
// TODO: Adjust the error limit based on testing.
|
||||
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
|
||||
float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
|
||||
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
|
||||
float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
|
||||
: 5.0f * 0.0009765625f;
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples,
|
||||
model.relaxComputationFloat32toFloat16);
|
||||
}
|
||||
|
||||
// TODO: Reduce code duplication.
|
||||
@@ -323,13 +329,8 @@ void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> c
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
// TODO: Adjust the error limit based on testing.
|
||||
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
|
||||
float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
|
||||
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
|
||||
float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
|
||||
: 5.0f * 0.0009765625f;
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples,
|
||||
model.relaxComputationFloat32toFloat16);
|
||||
}
|
||||
|
||||
} // namespace generated_tests
|
||||
|
||||
@@ -45,6 +45,8 @@ using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCa
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using MixedTypedExample = test_helper::MixedTypedExample;
|
||||
|
||||
#define FOR_EACH_TEST_MODEL(FN) \
|
||||
FN(add_broadcast_quant8) \
|
||||
FN(add) \
|
||||
FN(add_quant8) \
|
||||
FN(avg_pool_float_1) \
|
||||
FN(avg_pool_float_2) \
|
||||
FN(avg_pool_float_3) \
|
||||
FN(avg_pool_float_4) \
|
||||
FN(avg_pool_float_5) \
|
||||
FN(avg_pool_quant8_1) \
|
||||
FN(avg_pool_quant8_2) \
|
||||
FN(avg_pool_quant8_3) \
|
||||
FN(avg_pool_quant8_4) \
|
||||
FN(avg_pool_quant8_5) \
|
||||
FN(concat_float_1) \
|
||||
FN(concat_float_2) \
|
||||
FN(concat_float_3) \
|
||||
FN(concat_quant8_1) \
|
||||
FN(concat_quant8_2) \
|
||||
FN(concat_quant8_3) \
|
||||
FN(conv_1_h3_w2_SAME) \
|
||||
FN(conv_1_h3_w2_VALID) \
|
||||
FN(conv_3_h3_w2_SAME) \
|
||||
FN(conv_3_h3_w2_VALID) \
|
||||
FN(conv_float_2) \
|
||||
FN(conv_float_channels) \
|
||||
FN(conv_float_channels_weights_as_inputs) \
|
||||
FN(conv_float_large) \
|
||||
FN(conv_float_large_weights_as_inputs) \
|
||||
FN(conv_float) \
|
||||
FN(conv_float_weights_as_inputs) \
|
||||
FN(conv_quant8_2) \
|
||||
FN(conv_quant8_channels) \
|
||||
FN(conv_quant8_channels_weights_as_inputs) \
|
||||
FN(conv_quant8_large) \
|
||||
FN(conv_quant8_large_weights_as_inputs) \
|
||||
FN(conv_quant8) \
|
||||
FN(conv_quant8_overflow) \
|
||||
FN(conv_quant8_overflow_weights_as_inputs) \
|
||||
FN(conv_quant8_weights_as_inputs) \
|
||||
FN(depth_to_space_float_1) \
|
||||
FN(depth_to_space_float_2) \
|
||||
FN(depth_to_space_float_3) \
|
||||
FN(depth_to_space_quant8_1) \
|
||||
FN(depth_to_space_quant8_2) \
|
||||
FN(depthwise_conv2d_float_2) \
|
||||
FN(depthwise_conv2d_float_large_2) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8_2) \
|
||||
FN(depthwise_conv2d_quant8_large) \
|
||||
FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8) \
|
||||
FN(depthwise_conv2d_quant8_weights_as_inputs) \
|
||||
FN(depthwise_conv) \
|
||||
FN(dequantize) \
|
||||
FN(embedding_lookup) \
|
||||
FN(floor) \
|
||||
FN(fully_connected_float_2) \
|
||||
FN(fully_connected_float_large) \
|
||||
FN(fully_connected_float_large_weights_as_inputs) \
|
||||
FN(fully_connected_float) \
|
||||
FN(fully_connected_float_weights_as_inputs) \
|
||||
FN(fully_connected_quant8_2) \
|
||||
FN(fully_connected_quant8_large) \
|
||||
FN(fully_connected_quant8_large_weights_as_inputs) \
|
||||
FN(fully_connected_quant8) \
|
||||
FN(fully_connected_quant8_weights_as_inputs) \
|
||||
FN(hashtable_lookup_float) \
|
||||
FN(hashtable_lookup_quant8) \
|
||||
FN(l2_normalization_2) \
|
||||
FN(l2_normalization_large) \
|
||||
FN(l2_normalization) \
|
||||
FN(l2_pool_float_2) \
|
||||
FN(l2_pool_float_large) \
|
||||
FN(l2_pool_float) \
|
||||
FN(local_response_norm_float_1) \
|
||||
FN(local_response_norm_float_2) \
|
||||
FN(local_response_norm_float_3) \
|
||||
FN(local_response_norm_float_4) \
|
||||
FN(logistic_float_1) \
|
||||
FN(logistic_float_2) \
|
||||
FN(logistic_quant8_1) \
|
||||
FN(logistic_quant8_2) \
|
||||
FN(lsh_projection_2) \
|
||||
FN(lsh_projection) \
|
||||
FN(lsh_projection_weights_as_inputs) \
|
||||
FN(lstm2) \
|
||||
FN(lstm2_state2) \
|
||||
FN(lstm2_state) \
|
||||
FN(lstm3) \
|
||||
FN(lstm3_state2) \
|
||||
FN(lstm3_state3) \
|
||||
FN(lstm3_state) \
|
||||
FN(lstm) \
|
||||
FN(lstm_state2) \
|
||||
FN(lstm_state) \
|
||||
FN(max_pool_float_1) \
|
||||
FN(max_pool_float_2) \
|
||||
FN(max_pool_float_3) \
|
||||
FN(max_pool_float_4) \
|
||||
FN(max_pool_quant8_1) \
|
||||
FN(max_pool_quant8_2) \
|
||||
FN(max_pool_quant8_3) \
|
||||
FN(max_pool_quant8_4) \
|
||||
FN(mobilenet_224_gender_basic_fixed) \
|
||||
FN(mobilenet_quantized) \
|
||||
FN(mul_broadcast_quant8) \
|
||||
FN(mul) \
|
||||
FN(mul_quant8) \
|
||||
FN(mul_relu) \
|
||||
FN(relu1_float_1) \
|
||||
FN(relu1_float_2) \
|
||||
FN(relu1_quant8_1) \
|
||||
FN(relu1_quant8_2) \
|
||||
FN(relu6_float_1) \
|
||||
FN(relu6_float_2) \
|
||||
FN(relu6_quant8_1) \
|
||||
FN(relu6_quant8_2) \
|
||||
FN(relu_float_1) \
|
||||
FN(relu_float_2) \
|
||||
FN(relu_quant8_1) \
|
||||
FN(relu_quant8_2) \
|
||||
FN(reshape) \
|
||||
FN(reshape_quant8) \
|
||||
FN(reshape_quant8_weights_as_inputs) \
|
||||
FN(reshape_weights_as_inputs) \
|
||||
FN(resize_bilinear_2) \
|
||||
FN(resize_bilinear) \
|
||||
FN(rnn) \
|
||||
FN(rnn_state) \
|
||||
FN(softmax_float_1) \
|
||||
FN(softmax_float_2) \
|
||||
FN(softmax_quant8_1) \
|
||||
FN(softmax_quant8_2) \
|
||||
FN(space_to_depth_float_1) \
|
||||
FN(space_to_depth_float_2) \
|
||||
FN(space_to_depth_float_3) \
|
||||
FN(space_to_depth_quant8_1) \
|
||||
FN(space_to_depth_quant8_2) \
|
||||
FN(svdf2) \
|
||||
FN(svdf) \
|
||||
FN(svdf_state) \
|
||||
FN(tanh)
|
||||
|
||||
#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
|
||||
namespace function { \
|
||||
extern std::vector<MixedTypedExample> examples; \
|
||||
Model createTestModel(); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
|
||||
|
||||
#undef FORWARD_DECLARE_GENERATED_OBJECTS
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
#endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
|
||||
@@ -14,40 +14,21 @@
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Tests for V1_0 models using the V1_1 HAL.
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_1CompatV1_0TargetTest",
|
||||
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
|
||||
srcs: [
|
||||
"GeneratedTestsV1_0.cpp",
|
||||
],
|
||||
}
|
||||
|
||||
// Tests for V1_1 models.
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_1TargetTest",
|
||||
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
|
||||
srcs: [
|
||||
"BasicTests.cpp",
|
||||
"GeneratedTests.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"ValidationTests.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_utils",
|
||||
"VtsHalNeuralnetworksTest_utils",
|
||||
],
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
// Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
|
||||
// error in ld.gold.
|
||||
arch: {
|
||||
arm: {
|
||||
sanitize: {
|
||||
never: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -45,8 +45,9 @@ using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCa
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
#include "all_generated_V1_1_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
|
||||
@@ -16,31 +16,39 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::test_helper::MixedTypedExample;
|
||||
extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
|
||||
std::function<bool(int)>, const std::vector<MixedTypedExample>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// forward declarations
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// generate validation tests
|
||||
#define VTS_CURRENT_TEST_CASE(TestName) \
|
||||
TEST_F(ValidationTest, TestName) { \
|
||||
const Model model = TestName::createTestModel(); \
|
||||
const std::vector<Request> requests = createRequests(TestName::examples); \
|
||||
validateModel(model); \
|
||||
validateRequests(model, requests); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
|
||||
|
||||
#undef VTS_CURRENT_TEST_CASE
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
@@ -1,377 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using MixedTypedExample = test_helper::MixedTypedExample;
|
||||
|
||||
#define FOR_EACH_TEST_MODEL(FN) \
|
||||
FN(add) \
|
||||
FN(add_broadcast_quant8) \
|
||||
FN(add_quant8) \
|
||||
FN(add_relaxed) \
|
||||
FN(avg_pool_float_1) \
|
||||
FN(avg_pool_float_1_relaxed) \
|
||||
FN(avg_pool_float_2) \
|
||||
FN(avg_pool_float_2_relaxed) \
|
||||
FN(avg_pool_float_3) \
|
||||
FN(avg_pool_float_3_relaxed) \
|
||||
FN(avg_pool_float_4) \
|
||||
FN(avg_pool_float_4_relaxed) \
|
||||
FN(avg_pool_float_5) \
|
||||
FN(avg_pool_float_5_relaxed) \
|
||||
FN(avg_pool_quant8_1) \
|
||||
FN(avg_pool_quant8_2) \
|
||||
FN(avg_pool_quant8_3) \
|
||||
FN(avg_pool_quant8_4) \
|
||||
FN(avg_pool_quant8_5) \
|
||||
FN(batch_to_space) \
|
||||
FN(batch_to_space_float_1) \
|
||||
FN(batch_to_space_float_1_relaxed) \
|
||||
FN(batch_to_space_quant8_1) \
|
||||
FN(batch_to_space_relaxed) \
|
||||
FN(concat_float_1) \
|
||||
FN(concat_float_1_relaxed) \
|
||||
FN(concat_float_2) \
|
||||
FN(concat_float_2_relaxed) \
|
||||
FN(concat_float_3) \
|
||||
FN(concat_float_3_relaxed) \
|
||||
FN(concat_quant8_1) \
|
||||
FN(concat_quant8_2) \
|
||||
FN(concat_quant8_3) \
|
||||
FN(conv_1_h3_w2_SAME) \
|
||||
FN(conv_1_h3_w2_SAME_relaxed) \
|
||||
FN(conv_1_h3_w2_VALID) \
|
||||
FN(conv_1_h3_w2_VALID_relaxed) \
|
||||
FN(conv_3_h3_w2_SAME) \
|
||||
FN(conv_3_h3_w2_SAME_relaxed) \
|
||||
FN(conv_3_h3_w2_VALID) \
|
||||
FN(conv_3_h3_w2_VALID_relaxed) \
|
||||
FN(conv_float) \
|
||||
FN(conv_float_2) \
|
||||
FN(conv_float_2_relaxed) \
|
||||
FN(conv_float_channels) \
|
||||
FN(conv_float_channels_relaxed) \
|
||||
FN(conv_float_channels_weights_as_inputs) \
|
||||
FN(conv_float_channels_weights_as_inputs_relaxed) \
|
||||
FN(conv_float_large) \
|
||||
FN(conv_float_large_relaxed) \
|
||||
FN(conv_float_large_weights_as_inputs) \
|
||||
FN(conv_float_large_weights_as_inputs_relaxed) \
|
||||
FN(conv_float_relaxed) \
|
||||
FN(conv_float_weights_as_inputs) \
|
||||
FN(conv_float_weights_as_inputs_relaxed) \
|
||||
FN(conv_quant8) \
|
||||
FN(conv_quant8_2) \
|
||||
FN(conv_quant8_channels) \
|
||||
FN(conv_quant8_channels_weights_as_inputs) \
|
||||
FN(conv_quant8_large) \
|
||||
FN(conv_quant8_large_weights_as_inputs) \
|
||||
FN(conv_quant8_overflow) \
|
||||
FN(conv_quant8_overflow_weights_as_inputs) \
|
||||
FN(conv_quant8_weights_as_inputs) \
|
||||
FN(depth_to_space_float_1) \
|
||||
FN(depth_to_space_float_1_relaxed) \
|
||||
FN(depth_to_space_float_2) \
|
||||
FN(depth_to_space_float_2_relaxed) \
|
||||
FN(depth_to_space_float_3) \
|
||||
FN(depth_to_space_float_3_relaxed) \
|
||||
FN(depth_to_space_quant8_1) \
|
||||
FN(depth_to_space_quant8_2) \
|
||||
FN(depthwise_conv) \
|
||||
FN(depthwise_conv2d_float) \
|
||||
FN(depthwise_conv2d_float_2) \
|
||||
FN(depthwise_conv2d_float_2_relaxed) \
|
||||
FN(depthwise_conv2d_float_large) \
|
||||
FN(depthwise_conv2d_float_large_2) \
|
||||
FN(depthwise_conv2d_float_large_2_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_float_relaxed) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_quant8) \
|
||||
FN(depthwise_conv2d_quant8_2) \
|
||||
FN(depthwise_conv2d_quant8_large) \
|
||||
FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8_weights_as_inputs) \
|
||||
FN(depthwise_conv_relaxed) \
|
||||
FN(dequantize) \
|
||||
FN(dequantize_relaxed) \
|
||||
FN(div) \
|
||||
FN(div_broadcast_float) \
|
||||
FN(div_broadcast_float_relaxed) \
|
||||
FN(div_relaxed) \
|
||||
FN(embedding_lookup) \
|
||||
FN(embedding_lookup_relaxed) \
|
||||
FN(floor) \
|
||||
FN(floor_relaxed) \
|
||||
FN(fully_connected_float) \
|
||||
FN(fully_connected_float_2) \
|
||||
FN(fully_connected_float_2_relaxed) \
|
||||
FN(fully_connected_float_4d_simple) \
|
||||
FN(fully_connected_float_4d_simple_relaxed) \
|
||||
FN(fully_connected_float_large) \
|
||||
FN(fully_connected_float_large_relaxed) \
|
||||
FN(fully_connected_float_large_weights_as_inputs) \
|
||||
FN(fully_connected_float_large_weights_as_inputs_relaxed) \
|
||||
FN(fully_connected_float_relaxed) \
|
||||
FN(fully_connected_float_weights_as_inputs) \
|
||||
FN(fully_connected_float_weights_as_inputs_relaxed) \
|
||||
FN(fully_connected_quant8) \
|
||||
FN(fully_connected_quant8_2) \
|
||||
FN(fully_connected_quant8_large) \
|
||||
FN(fully_connected_quant8_large_weights_as_inputs) \
|
||||
FN(fully_connected_quant8_weights_as_inputs) \
|
||||
FN(hashtable_lookup_float) \
|
||||
FN(hashtable_lookup_float_relaxed) \
|
||||
FN(hashtable_lookup_quant8) \
|
||||
FN(l2_normalization) \
|
||||
FN(l2_normalization_2) \
|
||||
FN(l2_normalization_2_relaxed) \
|
||||
FN(l2_normalization_large) \
|
||||
FN(l2_normalization_large_relaxed) \
|
||||
FN(l2_normalization_relaxed) \
|
||||
FN(l2_pool_float) \
|
||||
FN(l2_pool_float_2) \
|
||||
FN(l2_pool_float_2_relaxed) \
|
||||
FN(l2_pool_float_large) \
|
||||
FN(l2_pool_float_large_relaxed) \
|
||||
FN(l2_pool_float_relaxed) \
|
||||
FN(local_response_norm_float_1) \
|
||||
FN(local_response_norm_float_1_relaxed) \
|
||||
FN(local_response_norm_float_2) \
|
||||
FN(local_response_norm_float_2_relaxed) \
|
||||
FN(local_response_norm_float_3) \
|
||||
FN(local_response_norm_float_3_relaxed) \
|
||||
FN(local_response_norm_float_4) \
|
||||
FN(local_response_norm_float_4_relaxed) \
|
||||
FN(logistic_float_1) \
|
||||
FN(logistic_float_1_relaxed) \
|
||||
FN(logistic_float_2) \
|
||||
FN(logistic_float_2_relaxed) \
|
||||
FN(logistic_quant8_1) \
|
||||
FN(logistic_quant8_2) \
|
||||
FN(lsh_projection) \
|
||||
FN(lsh_projection_2) \
|
||||
FN(lsh_projection_2_relaxed) \
|
||||
FN(lsh_projection_relaxed) \
|
||||
FN(lsh_projection_weights_as_inputs) \
|
||||
FN(lsh_projection_weights_as_inputs_relaxed) \
|
||||
FN(lstm) \
|
||||
FN(lstm2) \
|
||||
FN(lstm2_relaxed) \
|
||||
FN(lstm2_state) \
|
||||
FN(lstm2_state2) \
|
||||
FN(lstm2_state2_relaxed) \
|
||||
FN(lstm2_state_relaxed) \
|
||||
FN(lstm3) \
|
||||
FN(lstm3_relaxed) \
|
||||
FN(lstm3_state) \
|
||||
FN(lstm3_state2) \
|
||||
FN(lstm3_state2_relaxed) \
|
||||
FN(lstm3_state3) \
|
||||
FN(lstm3_state3_relaxed) \
|
||||
FN(lstm3_state_relaxed) \
|
||||
FN(lstm_relaxed) \
|
||||
FN(lstm_state) \
|
||||
FN(lstm_state2) \
|
||||
FN(lstm_state2_relaxed) \
|
||||
FN(lstm_state_relaxed) \
|
||||
FN(max_pool_float_1) \
|
||||
FN(max_pool_float_1_relaxed) \
|
||||
FN(max_pool_float_2) \
|
||||
FN(max_pool_float_2_relaxed) \
|
||||
FN(max_pool_float_3) \
|
||||
FN(max_pool_float_3_relaxed) \
|
||||
FN(max_pool_float_4) \
|
||||
FN(max_pool_float_4_relaxed) \
|
||||
FN(max_pool_quant8_1) \
|
||||
FN(max_pool_quant8_2) \
|
||||
FN(max_pool_quant8_3) \
|
||||
FN(max_pool_quant8_4) \
|
||||
FN(mean) \
|
||||
FN(mean_float_1) \
|
||||
FN(mean_float_1_relaxed) \
|
||||
FN(mean_float_2) \
|
||||
FN(mean_float_2_relaxed) \
|
||||
FN(mean_quant8_1) \
|
||||
FN(mean_quant8_2) \
|
||||
FN(mean_relaxed) \
|
||||
FN(mobilenet_224_gender_basic_fixed) \
|
||||
FN(mobilenet_224_gender_basic_fixed_relaxed) \
|
||||
FN(mobilenet_quantized) \
|
||||
FN(mul) \
|
||||
FN(mul_broadcast_quant8) \
|
||||
FN(mul_quant8) \
|
||||
FN(mul_relaxed) \
|
||||
FN(mul_relu) \
|
||||
FN(mul_relu_relaxed) \
|
||||
FN(pad) \
|
||||
FN(pad_float_1) \
|
||||
FN(pad_float_1_relaxed) \
|
||||
FN(pad_relaxed) \
|
||||
FN(relu1_float_1) \
|
||||
FN(relu1_float_1_relaxed) \
|
||||
FN(relu1_float_2) \
|
||||
FN(relu1_float_2_relaxed) \
|
||||
FN(relu1_quant8_1) \
|
||||
FN(relu1_quant8_2) \
|
||||
FN(relu6_float_1) \
|
||||
FN(relu6_float_1_relaxed) \
|
||||
FN(relu6_float_2) \
|
||||
FN(relu6_float_2_relaxed) \
|
||||
FN(relu6_quant8_1) \
|
||||
FN(relu6_quant8_2) \
|
||||
FN(relu_float_1) \
|
||||
FN(relu_float_1_relaxed) \
|
||||
FN(relu_float_2) \
|
||||
FN(relu_float_2_relaxed) \
|
||||
FN(relu_quant8_1) \
|
||||
FN(relu_quant8_2) \
|
||||
FN(reshape) \
|
||||
FN(reshape_quant8) \
|
||||
FN(reshape_quant8_weights_as_inputs) \
|
||||
FN(reshape_relaxed) \
|
||||
FN(reshape_weights_as_inputs) \
|
||||
FN(reshape_weights_as_inputs_relaxed) \
|
||||
FN(resize_bilinear) \
|
||||
FN(resize_bilinear_2) \
|
||||
FN(resize_bilinear_2_relaxed) \
|
||||
FN(resize_bilinear_relaxed) \
|
||||
FN(rnn) \
|
||||
FN(rnn_relaxed) \
|
||||
FN(rnn_state) \
|
||||
FN(rnn_state_relaxed) \
|
||||
FN(softmax_float_1) \
|
||||
FN(softmax_float_1_relaxed) \
|
||||
FN(softmax_float_2) \
|
||||
FN(softmax_float_2_relaxed) \
|
||||
FN(softmax_quant8_1) \
|
||||
FN(softmax_quant8_2) \
|
||||
FN(space_to_batch) \
|
||||
FN(space_to_batch_float_1) \
|
||||
FN(space_to_batch_float_1_relaxed) \
|
||||
FN(space_to_batch_float_2) \
|
||||
FN(space_to_batch_float_2_relaxed) \
|
||||
FN(space_to_batch_float_3) \
|
||||
FN(space_to_batch_float_3_relaxed) \
|
||||
FN(space_to_batch_quant8_1) \
|
||||
FN(space_to_batch_quant8_2) \
|
||||
FN(space_to_batch_quant8_3) \
|
||||
FN(space_to_batch_relaxed) \
|
||||
FN(space_to_depth_float_1) \
|
||||
FN(space_to_depth_float_1_relaxed) \
|
||||
FN(space_to_depth_float_2) \
|
||||
FN(space_to_depth_float_2_relaxed) \
|
||||
FN(space_to_depth_float_3) \
|
||||
FN(space_to_depth_float_3_relaxed) \
|
||||
FN(space_to_depth_quant8_1) \
|
||||
FN(space_to_depth_quant8_2) \
|
||||
FN(squeeze) \
|
||||
FN(squeeze_float_1) \
|
||||
FN(squeeze_float_1_relaxed) \
|
||||
FN(squeeze_quant8_1) \
|
||||
FN(squeeze_relaxed) \
|
||||
FN(strided_slice) \
|
||||
FN(strided_slice_float_1) \
|
||||
FN(strided_slice_float_10) \
|
||||
FN(strided_slice_float_10_relaxed) \
|
||||
FN(strided_slice_float_11) \
|
||||
FN(strided_slice_float_11_relaxed) \
|
||||
FN(strided_slice_float_1_relaxed) \
|
||||
FN(strided_slice_float_2) \
|
||||
FN(strided_slice_float_2_relaxed) \
|
||||
FN(strided_slice_float_3) \
|
||||
FN(strided_slice_float_3_relaxed) \
|
||||
FN(strided_slice_float_4) \
|
||||
FN(strided_slice_float_4_relaxed) \
|
||||
FN(strided_slice_float_5) \
|
||||
FN(strided_slice_float_5_relaxed) \
|
||||
FN(strided_slice_float_6) \
|
||||
FN(strided_slice_float_6_relaxed) \
|
||||
FN(strided_slice_float_7) \
|
||||
FN(strided_slice_float_7_relaxed) \
|
||||
FN(strided_slice_float_8) \
|
||||
FN(strided_slice_float_8_relaxed) \
|
||||
FN(strided_slice_float_9) \
|
||||
FN(strided_slice_float_9_relaxed) \
|
||||
FN(strided_slice_qaunt8_10) \
|
||||
FN(strided_slice_qaunt8_11) \
|
||||
FN(strided_slice_quant8_1) \
|
||||
FN(strided_slice_quant8_2) \
|
||||
FN(strided_slice_quant8_3) \
|
||||
FN(strided_slice_quant8_4) \
|
||||
FN(strided_slice_quant8_5) \
|
||||
FN(strided_slice_quant8_6) \
|
||||
FN(strided_slice_quant8_7) \
|
||||
FN(strided_slice_quant8_8) \
|
||||
FN(strided_slice_quant8_9) \
|
||||
FN(strided_slice_relaxed) \
|
||||
FN(sub) \
|
||||
FN(sub_broadcast_float) \
|
||||
FN(sub_broadcast_float_relaxed) \
|
||||
FN(sub_relaxed) \
|
||||
FN(svdf) \
|
||||
FN(svdf2) \
|
||||
FN(svdf2_relaxed) \
|
||||
FN(svdf_relaxed) \
|
||||
FN(svdf_state) \
|
||||
FN(svdf_state_relaxed) \
|
||||
FN(tanh) \
|
||||
FN(tanh_relaxed) \
|
||||
FN(transpose) \
|
||||
FN(transpose_float_1) \
|
||||
FN(transpose_float_1_relaxed) \
|
||||
FN(transpose_quant8_1) \
|
||||
FN(transpose_relaxed)
|
||||
|
||||
#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
|
||||
namespace function { \
|
||||
extern std::vector<MixedTypedExample> examples; \
|
||||
Model createTestModel(); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
|
||||
|
||||
#undef FORWARD_DECLARE_GENERATED_OBJECTS
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
#endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
|
||||
@@ -33,15 +33,13 @@ enum OperandType : @1.0::OperandType {
|
||||
/**
|
||||
* A tensor of 16 bit signed integers that represent real numbers.
|
||||
*
|
||||
* Attached to this tensor are two numbers that are used to convert the 16
|
||||
* bit integer to the real value and vice versa. These two numbers are:
|
||||
* - scale: a 32 bit floating point value greater than zero.
|
||||
* - zeroPoint: a 32 bit integer, in range [-32768, 32767].
|
||||
* Attached to this tensor is a number representing real value scale that is
|
||||
* used to convert the 16 bit number to a real value in the following way:
|
||||
* realValue = integerValue * scale.
|
||||
*
|
||||
* The formula is:
|
||||
* realValue = (integerValue - zeroPoint) * scale.
|
||||
* scale is a 32 bit floating point with value greater then zero.
|
||||
*/
|
||||
TENSOR_QUANT16_ASYMM = 7,
|
||||
TENSOR_QUANT16_SYMM = 7,
|
||||
/** A tensor of 16 bit floating point values. */
|
||||
TENSOR_FLOAT16 = 8,
|
||||
};
|
||||
|
||||
@@ -14,40 +14,30 @@
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Tests for V1_0 models using the V1_2 HAL.
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_2CompatV1_0TargetTest",
|
||||
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
|
||||
srcs: [
|
||||
"GeneratedTestsV1_0.cpp",
|
||||
]
|
||||
}
|
||||
|
||||
// Tests for V1_1 models using the V1_2 HAL.
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_2CompatV1_1TargetTest",
|
||||
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
|
||||
srcs: [
|
||||
"GeneratedTestsV1_1.cpp",
|
||||
],
|
||||
}
|
||||
|
||||
// Tests for V1_2 models.
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_2TargetTest",
|
||||
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
|
||||
srcs: [
|
||||
"BasicTests.cpp",
|
||||
"GeneratedTests.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"ValidationTests.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_utils",
|
||||
"VtsHalNeuralnetworksTest_utils",
|
||||
],
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
// Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
|
||||
// error in ld.gold.
|
||||
arch: {
|
||||
arm: {
|
||||
sanitize: {
|
||||
never: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -45,9 +45,9 @@ using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCa
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
#include "all_generated_V1_1_vts_tests.cpp"
|
||||
#include "all_generated_V1_2_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
|
||||
@@ -16,31 +16,39 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::test_helper::MixedTypedExample;
|
||||
extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
|
||||
std::function<bool(int)>, const std::vector<MixedTypedExample>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// forward declarations
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// generate validation tests
|
||||
#define VTS_CURRENT_TEST_CASE(TestName) \
|
||||
TEST_F(ValidationTest, TestName) { \
|
||||
const Model model = TestName::createTestModel(); \
|
||||
const std::vector<Request> requests = createRequests(TestName::examples); \
|
||||
validateModel(model); \
|
||||
validateRequests(model, requests); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
|
||||
|
||||
#undef VTS_CURRENT_TEST_CASE
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
@@ -16,35 +16,43 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::test_helper::MixedTypedExample;
|
||||
extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
|
||||
std::function<bool(int)>, const std::vector<MixedTypedExample>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// forward declarations
|
||||
std::vector<Request> createRequests(const std::vector<::test_helper::MixedTypedExample>& examples);
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
|
||||
// generate validation tests
|
||||
#define VTS_CURRENT_TEST_CASE(TestName) \
|
||||
TEST_F(ValidationTest, TestName) { \
|
||||
const Model model = TestName::createTestModel(); \
|
||||
const std::vector<Request> requests = createRequests(TestName::examples); \
|
||||
validateModel(model); \
|
||||
validateRequests(model, requests); \
|
||||
}
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
|
||||
|
||||
#undef VTS_CURRENT_TEST_CASE
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_1_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -1,379 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using MixedTypedExample = test_helper::MixedTypedExample;
|
||||
|
||||
#define FOR_EACH_TEST_MODEL(FN) \
|
||||
FN(add) \
|
||||
FN(add_broadcast_quant8) \
|
||||
FN(add_quant8) \
|
||||
FN(add_relaxed) \
|
||||
FN(avg_pool_float_1) \
|
||||
FN(avg_pool_float_1_relaxed) \
|
||||
FN(avg_pool_float_2) \
|
||||
FN(avg_pool_float_2_relaxed) \
|
||||
FN(avg_pool_float_3) \
|
||||
FN(avg_pool_float_3_relaxed) \
|
||||
FN(avg_pool_float_4) \
|
||||
FN(avg_pool_float_4_relaxed) \
|
||||
FN(avg_pool_float_5) \
|
||||
FN(avg_pool_float_5_relaxed) \
|
||||
FN(avg_pool_quant8_1) \
|
||||
FN(avg_pool_quant8_2) \
|
||||
FN(avg_pool_quant8_3) \
|
||||
FN(avg_pool_quant8_4) \
|
||||
FN(avg_pool_quant8_5) \
|
||||
FN(batch_to_space) \
|
||||
FN(batch_to_space_float_1) \
|
||||
FN(batch_to_space_float_1_relaxed) \
|
||||
FN(batch_to_space_quant8_1) \
|
||||
FN(batch_to_space_relaxed) \
|
||||
FN(concat_float_1) \
|
||||
FN(concat_float_1_relaxed) \
|
||||
FN(concat_float_2) \
|
||||
FN(concat_float_2_relaxed) \
|
||||
FN(concat_float_3) \
|
||||
FN(concat_float_3_relaxed) \
|
||||
FN(concat_quant8_1) \
|
||||
FN(concat_quant8_2) \
|
||||
FN(concat_quant8_3) \
|
||||
FN(conv_1_h3_w2_SAME) \
|
||||
FN(conv_1_h3_w2_SAME_relaxed) \
|
||||
FN(conv_1_h3_w2_VALID) \
|
||||
FN(conv_1_h3_w2_VALID_relaxed) \
|
||||
FN(conv_3_h3_w2_SAME) \
|
||||
FN(conv_3_h3_w2_SAME_relaxed) \
|
||||
FN(conv_3_h3_w2_VALID) \
|
||||
FN(conv_3_h3_w2_VALID_relaxed) \
|
||||
FN(conv_float) \
|
||||
FN(conv_float_2) \
|
||||
FN(conv_float_2_relaxed) \
|
||||
FN(conv_float_channels) \
|
||||
FN(conv_float_channels_relaxed) \
|
||||
FN(conv_float_channels_weights_as_inputs) \
|
||||
FN(conv_float_channels_weights_as_inputs_relaxed) \
|
||||
FN(conv_float_large) \
|
||||
FN(conv_float_large_relaxed) \
|
||||
FN(conv_float_large_weights_as_inputs) \
|
||||
FN(conv_float_large_weights_as_inputs_relaxed) \
|
||||
FN(conv_float_relaxed) \
|
||||
FN(conv_float_weights_as_inputs) \
|
||||
FN(conv_float_weights_as_inputs_relaxed) \
|
||||
FN(conv_quant8) \
|
||||
FN(conv_quant8_2) \
|
||||
FN(conv_quant8_channels) \
|
||||
FN(conv_quant8_channels_weights_as_inputs) \
|
||||
FN(conv_quant8_large) \
|
||||
FN(conv_quant8_large_weights_as_inputs) \
|
||||
FN(conv_quant8_overflow) \
|
||||
FN(conv_quant8_overflow_weights_as_inputs) \
|
||||
FN(conv_quant8_weights_as_inputs) \
|
||||
FN(depth_to_space_float_1) \
|
||||
FN(depth_to_space_float_1_relaxed) \
|
||||
FN(depth_to_space_float_2) \
|
||||
FN(depth_to_space_float_2_relaxed) \
|
||||
FN(depth_to_space_float_3) \
|
||||
FN(depth_to_space_float_3_relaxed) \
|
||||
FN(depth_to_space_quant8_1) \
|
||||
FN(depth_to_space_quant8_2) \
|
||||
FN(depthwise_conv) \
|
||||
FN(depthwise_conv2d_float) \
|
||||
FN(depthwise_conv2d_float_2) \
|
||||
FN(depthwise_conv2d_float_2_relaxed) \
|
||||
FN(depthwise_conv2d_float_large) \
|
||||
FN(depthwise_conv2d_float_large_2) \
|
||||
FN(depthwise_conv2d_float_large_2_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_float_relaxed) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_quant8) \
|
||||
FN(depthwise_conv2d_quant8_2) \
|
||||
FN(depthwise_conv2d_quant8_large) \
|
||||
FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8_weights_as_inputs) \
|
||||
FN(depthwise_conv_relaxed) \
|
||||
FN(dequantize) \
|
||||
FN(dequantize_relaxed) \
|
||||
FN(div) \
|
||||
FN(div_broadcast_float) \
|
||||
FN(div_broadcast_float_relaxed) \
|
||||
FN(div_relaxed) \
|
||||
FN(embedding_lookup) \
|
||||
FN(embedding_lookup_relaxed) \
|
||||
FN(floor) \
|
||||
FN(floor_relaxed) \
|
||||
FN(fully_connected_float) \
|
||||
FN(fully_connected_float_2) \
|
||||
FN(fully_connected_float_2_relaxed) \
|
||||
FN(fully_connected_float_4d_simple) \
|
||||
FN(fully_connected_float_4d_simple_relaxed) \
|
||||
FN(fully_connected_float_large) \
|
||||
FN(fully_connected_float_large_relaxed) \
|
||||
FN(fully_connected_float_large_weights_as_inputs) \
|
||||
FN(fully_connected_float_large_weights_as_inputs_relaxed) \
|
||||
FN(fully_connected_float_relaxed) \
|
||||
FN(fully_connected_float_weights_as_inputs) \
|
||||
FN(fully_connected_float_weights_as_inputs_relaxed) \
|
||||
FN(fully_connected_quant8) \
|
||||
FN(fully_connected_quant8_2) \
|
||||
FN(fully_connected_quant8_large) \
|
||||
FN(fully_connected_quant8_large_weights_as_inputs) \
|
||||
FN(fully_connected_quant8_weights_as_inputs) \
|
||||
FN(hashtable_lookup_float) \
|
||||
FN(hashtable_lookup_float_relaxed) \
|
||||
FN(hashtable_lookup_quant8) \
|
||||
FN(l2_normalization) \
|
||||
FN(l2_normalization_2) \
|
||||
FN(l2_normalization_2_relaxed) \
|
||||
FN(l2_normalization_large) \
|
||||
FN(l2_normalization_large_relaxed) \
|
||||
FN(l2_normalization_relaxed) \
|
||||
FN(l2_pool_float) \
|
||||
FN(l2_pool_float_2) \
|
||||
FN(l2_pool_float_2_relaxed) \
|
||||
FN(l2_pool_float_large) \
|
||||
FN(l2_pool_float_large_relaxed) \
|
||||
FN(l2_pool_float_relaxed) \
|
||||
FN(local_response_norm_float_1) \
|
||||
FN(local_response_norm_float_1_relaxed) \
|
||||
FN(local_response_norm_float_2) \
|
||||
FN(local_response_norm_float_2_relaxed) \
|
||||
FN(local_response_norm_float_3) \
|
||||
FN(local_response_norm_float_3_relaxed) \
|
||||
FN(local_response_norm_float_4) \
|
||||
FN(local_response_norm_float_4_relaxed) \
|
||||
FN(logistic_float_1) \
|
||||
FN(logistic_float_1_relaxed) \
|
||||
FN(logistic_float_2) \
|
||||
FN(logistic_float_2_relaxed) \
|
||||
FN(logistic_quant8_1) \
|
||||
FN(logistic_quant8_2) \
|
||||
FN(lsh_projection) \
|
||||
FN(lsh_projection_2) \
|
||||
FN(lsh_projection_2_relaxed) \
|
||||
FN(lsh_projection_relaxed) \
|
||||
FN(lsh_projection_weights_as_inputs) \
|
||||
FN(lsh_projection_weights_as_inputs_relaxed) \
|
||||
FN(lstm) \
|
||||
FN(lstm2) \
|
||||
FN(lstm2_relaxed) \
|
||||
FN(lstm2_state) \
|
||||
FN(lstm2_state2) \
|
||||
FN(lstm2_state2_relaxed) \
|
||||
FN(lstm2_state_relaxed) \
|
||||
FN(lstm3) \
|
||||
FN(lstm3_relaxed) \
|
||||
FN(lstm3_state) \
|
||||
FN(lstm3_state2) \
|
||||
FN(lstm3_state2_relaxed) \
|
||||
FN(lstm3_state3) \
|
||||
FN(lstm3_state3_relaxed) \
|
||||
FN(lstm3_state_relaxed) \
|
||||
FN(lstm_relaxed) \
|
||||
FN(lstm_state) \
|
||||
FN(lstm_state2) \
|
||||
FN(lstm_state2_relaxed) \
|
||||
FN(lstm_state_relaxed) \
|
||||
FN(max_pool_float_1) \
|
||||
FN(max_pool_float_1_relaxed) \
|
||||
FN(max_pool_float_2) \
|
||||
FN(max_pool_float_2_relaxed) \
|
||||
FN(max_pool_float_3) \
|
||||
FN(max_pool_float_3_relaxed) \
|
||||
FN(max_pool_float_4) \
|
||||
FN(max_pool_float_4_relaxed) \
|
||||
FN(max_pool_quant8_1) \
|
||||
FN(max_pool_quant8_2) \
|
||||
FN(max_pool_quant8_3) \
|
||||
FN(max_pool_quant8_4) \
|
||||
FN(mean) \
|
||||
FN(mean_float_1) \
|
||||
FN(mean_float_1_relaxed) \
|
||||
FN(mean_float_2) \
|
||||
FN(mean_float_2_relaxed) \
|
||||
FN(mean_quant8_1) \
|
||||
FN(mean_quant8_2) \
|
||||
FN(mean_relaxed) \
|
||||
FN(mobilenet_224_gender_basic_fixed) \
|
||||
FN(mobilenet_224_gender_basic_fixed_relaxed) \
|
||||
FN(mobilenet_quantized) \
|
||||
FN(mul) \
|
||||
FN(mul_broadcast_quant8) \
|
||||
FN(mul_quant8) \
|
||||
FN(mul_relaxed) \
|
||||
FN(mul_relu) \
|
||||
FN(mul_relu_relaxed) \
|
||||
FN(pad) \
|
||||
FN(pad_float_1) \
|
||||
FN(pad_float_1_relaxed) \
|
||||
FN(pad_relaxed) \
|
||||
FN(random_multinomial) \
|
||||
FN(relu1_float_1) \
|
||||
FN(relu1_float_1_relaxed) \
|
||||
FN(relu1_float_2) \
|
||||
FN(relu1_float_2_relaxed) \
|
||||
FN(relu1_quant8_1) \
|
||||
FN(relu1_quant8_2) \
|
||||
FN(relu6_float_1) \
|
||||
FN(relu6_float_1_relaxed) \
|
||||
FN(relu6_float_2) \
|
||||
FN(relu6_float_2_relaxed) \
|
||||
FN(relu6_quant8_1) \
|
||||
FN(relu6_quant8_2) \
|
||||
FN(relu_float_1) \
|
||||
FN(relu_float_1_relaxed) \
|
||||
FN(relu_float_2) \
|
||||
FN(relu_float_2_relaxed) \
|
||||
FN(relu_quant8_1) \
|
||||
FN(relu_quant8_2) \
|
||||
FN(reshape) \
|
||||
FN(reshape_quant8) \
|
||||
FN(reshape_quant8_weights_as_inputs) \
|
||||
FN(reshape_relaxed) \
|
||||
FN(reshape_weights_as_inputs) \
|
||||
FN(reshape_weights_as_inputs_relaxed) \
|
||||
FN(resize_bilinear) \
|
||||
FN(resize_bilinear_2) \
|
||||
FN(resize_bilinear_2_relaxed) \
|
||||
FN(resize_bilinear_relaxed) \
|
||||
FN(rnn) \
|
||||
FN(rnn_relaxed) \
|
||||
FN(rnn_state) \
|
||||
FN(rnn_state_relaxed) \
|
||||
FN(softmax_float_1) \
|
||||
FN(softmax_float_1_relaxed) \
|
||||
FN(softmax_float_2) \
|
||||
FN(softmax_float_2_relaxed) \
|
||||
FN(softmax_quant8_1) \
|
||||
FN(softmax_quant8_2) \
|
||||
FN(space_to_batch) \
|
||||
FN(space_to_batch_float_1) \
|
||||
FN(space_to_batch_float_1_relaxed) \
|
||||
FN(space_to_batch_float_2) \
|
||||
FN(space_to_batch_float_2_relaxed) \
|
||||
FN(space_to_batch_float_3) \
|
||||
FN(space_to_batch_float_3_relaxed) \
|
||||
FN(space_to_batch_quant8_1) \
|
||||
FN(space_to_batch_quant8_2) \
|
||||
FN(space_to_batch_quant8_3) \
|
||||
FN(space_to_batch_relaxed) \
|
||||
FN(space_to_depth_float_1) \
|
||||
FN(space_to_depth_float_1_relaxed) \
|
||||
FN(space_to_depth_float_2) \
|
||||
FN(space_to_depth_float_2_relaxed) \
|
||||
FN(space_to_depth_float_3) \
|
||||
FN(space_to_depth_float_3_relaxed) \
|
||||
FN(space_to_depth_quant8_1) \
|
||||
FN(space_to_depth_quant8_2) \
|
||||
FN(squeeze) \
|
||||
FN(squeeze_float_1) \
|
||||
FN(squeeze_float_1_relaxed) \
|
||||
FN(squeeze_quant8_1) \
|
||||
FN(squeeze_relaxed) \
|
||||
FN(strided_slice) \
|
||||
FN(strided_slice_float_1) \
|
||||
FN(strided_slice_float_10) \
|
||||
FN(strided_slice_float_10_relaxed) \
|
||||
FN(strided_slice_float_11) \
|
||||
FN(strided_slice_float_11_relaxed) \
|
||||
FN(strided_slice_float_1_relaxed) \
|
||||
FN(strided_slice_float_2) \
|
||||
FN(strided_slice_float_2_relaxed) \
|
||||
FN(strided_slice_float_3) \
|
||||
FN(strided_slice_float_3_relaxed) \
|
||||
FN(strided_slice_float_4) \
|
||||
FN(strided_slice_float_4_relaxed) \
|
||||
FN(strided_slice_float_5) \
|
||||
FN(strided_slice_float_5_relaxed) \
|
||||
FN(strided_slice_float_6) \
|
||||
FN(strided_slice_float_6_relaxed) \
|
||||
FN(strided_slice_float_7) \
|
||||
FN(strided_slice_float_7_relaxed) \
|
||||
FN(strided_slice_float_8) \
|
||||
FN(strided_slice_float_8_relaxed) \
|
||||
FN(strided_slice_float_9) \
|
||||
FN(strided_slice_float_9_relaxed) \
|
||||
FN(strided_slice_qaunt8_10) \
|
||||
FN(strided_slice_qaunt8_11) \
|
||||
FN(strided_slice_quant8_1) \
|
||||
FN(strided_slice_quant8_2) \
|
||||
FN(strided_slice_quant8_3) \
|
||||
FN(strided_slice_quant8_4) \
|
||||
FN(strided_slice_quant8_5) \
|
||||
FN(strided_slice_quant8_6) \
|
||||
FN(strided_slice_quant8_7) \
|
||||
FN(strided_slice_quant8_8) \
|
||||
FN(strided_slice_quant8_9) \
|
||||
FN(strided_slice_relaxed) \
|
||||
FN(sub) \
|
||||
FN(sub_broadcast_float) \
|
||||
FN(sub_broadcast_float_relaxed) \
|
||||
FN(sub_relaxed) \
|
||||
FN(svdf) \
|
||||
FN(svdf2) \
|
||||
FN(svdf2_relaxed) \
|
||||
FN(svdf_relaxed) \
|
||||
FN(svdf_state) \
|
||||
FN(svdf_state_relaxed) \
|
||||
FN(tanh) \
|
||||
FN(tanh_relaxed) \
|
||||
FN(transpose) \
|
||||
FN(transpose_float_1) \
|
||||
FN(transpose_float_1_relaxed) \
|
||||
FN(transpose_quant8_1) \
|
||||
FN(transpose_relaxed)
|
||||
|
||||
#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
|
||||
namespace function { \
|
||||
extern std::vector<MixedTypedExample> examples; \
|
||||
Model createTestModel(); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
|
||||
|
||||
#undef FORWARD_DECLARE_GENERATED_OBJECTS
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_2
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
#endif // VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H
|
||||
@@ -161,7 +161,7 @@ static uint32_t getInvalidRank(OperandType type) {
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
@@ -193,7 +193,7 @@ static float getInvalidScale(OperandType type) {
|
||||
case OperandType::TENSOR_INT32:
|
||||
return -1.0f;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
return 0.0f;
|
||||
default:
|
||||
return 0.0f;
|
||||
@@ -224,8 +224,9 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
|
||||
case OperandType::TENSOR_INT32:
|
||||
return {1};
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
return {-1, 256};
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
return {-32769, -1, 1, 32768};
|
||||
default:
|
||||
return {};
|
||||
}
|
||||
@@ -278,7 +279,7 @@ static void mutateOperand(Operand* operand, OperandType type) {
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_ASYMM:
|
||||
case OperandType::TENSOR_QUANT16_SYMM:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
|
||||
@@ -291,15 +292,33 @@ static void mutateOperand(Operand* operand, OperandType type) {
|
||||
*operand = newOperand;
|
||||
}
|
||||
|
||||
static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) {
|
||||
// LSH_PROJECTION's second argument is allowed to have any type. This is the
|
||||
// only operation that currently has a type that can be anything independent
|
||||
// from any other type. Changing the operand type to any other type will
|
||||
// result in a valid model for LSH_PROJECTION. If this is the case, skip the
|
||||
// test.
|
||||
static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) {
|
||||
// Do not test OEM types
|
||||
if (type == model.operands[operand].type || type == OperandType::OEM ||
|
||||
type == OperandType::TENSOR_OEM_BYTE) {
|
||||
return true;
|
||||
}
|
||||
for (const Operation& operation : model.operations) {
|
||||
if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) {
|
||||
return true;
|
||||
// Skip mutateOperationOperandTypeTest for the following operations.
|
||||
// - LSH_PROJECTION's second argument is allowed to have any type.
|
||||
// - ARGMIN and ARGMAX's first argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM).
|
||||
// - CAST's argument can be any of TENSOR_(FLOAT32|INT32|QUANT8_ASYMM).
|
||||
switch (operation.type) {
|
||||
case OperationType::LSH_PROJECTION: {
|
||||
if (operand == operation.inputs[1]) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::CAST:
|
||||
case OperationType::ARGMAX:
|
||||
case OperationType::ARGMIN: {
|
||||
if (type == OperandType::TENSOR_FLOAT32 || type == OperandType::TENSOR_INT32 ||
|
||||
type == OperandType::TENSOR_QUANT8_ASYMM) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@@ -307,14 +326,8 @@ static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) {
|
||||
|
||||
static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
if (mutateOperationOperandTypeSkip(operand, model)) {
|
||||
continue;
|
||||
}
|
||||
for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) {
|
||||
// Do not test OEM types
|
||||
if (invalidOperandType == model.operands[operand].type ||
|
||||
invalidOperandType == OperandType::OEM ||
|
||||
invalidOperandType == OperandType::TENSOR_OEM_BYTE) {
|
||||
if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "mutateOperationOperandTypeTest: operand " +
|
||||
@@ -406,8 +419,26 @@ static void removeOperand(Model* model, uint32_t index) {
|
||||
removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
|
||||
}
|
||||
|
||||
static bool removeOperandSkip(size_t operand, const Model& model) {
|
||||
for (const Operation& operation : model.operations) {
|
||||
// Skip removeOperandTest for the following operations.
|
||||
// - SPLIT's outputs are not checked during prepareModel.
|
||||
if (operation.type == OperationType::SPLIT) {
|
||||
for (const size_t outOprand : operation.outputs) {
|
||||
if (operand == outOprand) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
if (removeOperandSkip(operand, model)) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
|
||||
validate(device, message, model,
|
||||
[operand](Model* model) { removeOperand(model, operand); });
|
||||
@@ -433,15 +464,76 @@ static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
|
||||
|
||||
///////////////////////// REMOVE OPERATION INPUT /////////////////////////
|
||||
|
||||
static bool removeOperationInputSkip(const Operation& op, size_t input) {
|
||||
// Skip removeOperationInputTest for the following operations.
|
||||
// - CONCATENATION has at least 2 inputs, with the last element being INT32.
|
||||
// - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR,
|
||||
// SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional
|
||||
// layout parameter.
|
||||
// - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis
|
||||
// parameter.
|
||||
switch (op.type) {
|
||||
case OperationType::CONCATENATION: {
|
||||
if (op.inputs.size() > 2 && input != op.inputs.size() - 1) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::DEPTHWISE_CONV_2D: {
|
||||
if ((op.inputs.size() == 12 && input == 11) || (op.inputs.size() == 9 && input == 8)) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::CONV_2D:
|
||||
case OperationType::AVERAGE_POOL_2D:
|
||||
case OperationType::MAX_POOL_2D:
|
||||
case OperationType::L2_POOL_2D: {
|
||||
if ((op.inputs.size() == 11 && input == 10) || (op.inputs.size() == 8 && input == 7)) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::RESIZE_BILINEAR: {
|
||||
if (op.inputs.size() == 4 && input == 3) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::SPACE_TO_DEPTH:
|
||||
case OperationType::DEPTH_TO_SPACE:
|
||||
case OperationType::BATCH_TO_SPACE_ND: {
|
||||
if (op.inputs.size() == 3 && input == 2) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::SPACE_TO_BATCH_ND: {
|
||||
if (op.inputs.size() == 4 && input == 3) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::L2_NORMALIZATION: {
|
||||
if (op.inputs.size() == 2 && input == 1) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::LOCAL_RESPONSE_NORMALIZATION: {
|
||||
if (op.inputs.size() == 6 && input == 5) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
case OperationType::SOFTMAX: {
|
||||
if (op.inputs.size() == 3 && input == 2) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
||||
const Operation& op = model.operations[operation];
|
||||
// CONCATENATION has at least 2 inputs, with the last element being
|
||||
// INT32. Skip this test if removing one of CONCATENATION's
|
||||
// inputs still produces a valid model.
|
||||
if (op.type == OperationType::CONCATENATION && op.inputs.size() > 2 &&
|
||||
input != op.inputs.size() - 1) {
|
||||
if (removeOperationInputSkip(op, input)) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "removeOperationInputTest: operation " +
|
||||
@@ -479,8 +571,23 @@ static void removeOperationOutputTest(const sp<IDevice>& device, const Model& mo
|
||||
|
||||
///////////////////////// ADD OPERATION INPUT /////////////////////////
|
||||
|
||||
static bool addOperationInputSkip(const Operation& op) {
|
||||
// Skip addOperationInputTest for the following operations.
|
||||
// - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional INT32 axis
|
||||
// parameter.
|
||||
if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) ||
|
||||
(op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) ||
|
||||
(op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
if (addOperationInputSkip(model.operations[operation])) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model, [operation](Model* model) {
|
||||
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
|
||||
|
||||
Reference in New Issue
Block a user