From 4862d612add0bb302bbd2651df3cbb94f15df7ae Mon Sep 17 00:00:00 2001 From: Miao Wang Date: Mon, 5 Feb 2018 17:26:54 -0800 Subject: [PATCH] Refactor NN API VTS tests and add v1.1 tests - Create VtsHalNeuralnetworksTest_utils module to be shared between v1.x tests. - Split the existing tests into two categories: basic, and generated. - Created v1.1 VTS tests ensuring no regression in existing ML models. Bug: 63911257 Test: mm Test: NNAPI VTS tests pass on v1.0 and v1.1 sample drivers Merged-In: Ic77c90a3a5bbd96b0ce2acd03764dde4b3034cc9 Change-Id: Ic77c90a3a5bbd96b0ce2acd03764dde4b3034cc9 (cherry picked from commit a2d04c828e98bdadc6dd44c6235556451e4e2a88) --- neuralnetworks/1.0/vts/functional/Android.bp | 35 ++- .../vts/functional/GeneratedTestHarness.cpp | 167 +++++++---- neuralnetworks/1.0/vts/functional/Models.cpp | 53 ++-- neuralnetworks/1.0/vts/functional/Models.h | 27 +- .../functional/VtsHalNeuralnetworksV1_0.cpp | 73 +++++ ...argetTest.h => VtsHalNeuralnetworksV1_0.h} | 34 +-- ... => VtsHalNeuralnetworksV1_0BasicTest.cpp} | 87 ++---- .../VtsHalNeuralnetworksV1_0GeneratedTest.cpp | 72 +++++ neuralnetworks/1.1/vts/OWNERS | 10 + neuralnetworks/1.1/vts/functional/Android.bp | 39 +++ .../functional/VtsHalNeuralnetworksV1_1.cpp | 74 +++++ .../vts/functional/VtsHalNeuralnetworksV1_1.h | 85 ++++++ .../VtsHalNeuralnetworksV1_1BasicTest.cpp | 280 ++++++++++++++++++ .../VtsHalNeuralnetworksV1_1GeneratedTest.cpp | 80 +++++ 14 files changed, 929 insertions(+), 187 deletions(-) create mode 100644 neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp rename neuralnetworks/1.0/vts/functional/{VtsHalNeuralnetworksV1_0TargetTest.h => VtsHalNeuralnetworksV1_0.h} (69%) rename neuralnetworks/1.0/vts/functional/{VtsHalNeuralnetworksV1_0TargetTest.cpp => VtsHalNeuralnetworksV1_0BasicTest.cpp} (80%) create mode 100644 neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp create mode 100644 neuralnetworks/1.1/vts/OWNERS create mode 100644 neuralnetworks/1.1/vts/functional/Android.bp create mode 100644 neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp create mode 100644 neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h create mode 100644 neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp create mode 100644 neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index e33ee77f12..54dd14aba3 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp @@ -14,22 +14,49 @@ // limitations under the License. // -cc_test { - name: "VtsHalNeuralnetworksV1_0TargetTest", +cc_library_static { + name: "VtsHalNeuralnetworksTest_utils", srcs: [ "Callbacks.cpp", - "GeneratedTestHarness.cpp", "Models.cpp", - "VtsHalNeuralnetworksV1_0TargetTest.cpp", + "GeneratedTestHarness.cpp", ], defaults: ["VtsHalTargetTestDefaults"], + export_include_dirs: ["."], static_libs: [ "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libhidlmemory", + "libneuralnetworks_utils", ], header_libs: [ + "libneuralnetworks_headers", + "libneuralnetworks_generated_test_harness_headers", + "libneuralnetworks_generated_tests", + ], +} + +cc_test { + name: "VtsHalNeuralnetworksV1_0TargetTest", + srcs: [ + "VtsHalNeuralnetworksV1_0.cpp", + "VtsHalNeuralnetworksV1_0BasicTest.cpp", + "VtsHalNeuralnetworksV1_0GeneratedTest.cpp", + ], + defaults: ["VtsHalTargetTestDefaults"], + static_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hidl.allocator@1.0", + "android.hidl.memory@1.0", + "libhidlmemory", + "libneuralnetworks_utils", + "VtsHalNeuralnetworksTest_utils", + ], + header_libs: [ + "libneuralnetworks_headers", "libneuralnetworks_generated_test_harness_headers", "libneuralnetworks_generated_tests", ], diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index d740b5f53c..5fe8415147 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -16,9 +16,15 @@ #include "Callbacks.h" #include "TestHarness.h" -#include "VtsHalNeuralnetworksV1_0TargetTest.h" +#include "Utils.h" #include +#include +#include +#include +#include +#include +#include #include #include #include @@ -26,11 +32,6 @@ namespace android { namespace hardware { namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { -// allocator helper -hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem"); namespace generated_tests { using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; @@ -64,54 +65,10 @@ void copy_back(MixedTyped* dst, const std::vector& ra, char* sr // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec -void Execute(const sp& device, std::function create_model, - std::function is_ignored, - const std::vector& examples) { +void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, + const std::vector& examples) { const uint32_t INPUT = 0; const uint32_t OUTPUT = 1; - Model model = create_model(); - - // see if service can handle model - ErrorStatus supportedStatus; - bool fullySupportsModel = false; - Return supportedCall = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - supportedStatus = status; - ASSERT_NE(0ul, supported.size()); - fullySupportsModel = - std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); - }); - ASSERT_TRUE(supportedCall.isOk()); - ASSERT_EQ(ErrorStatus::NONE, supportedStatus); - - // launch prepare model - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - - // retrieve prepared model - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - sp preparedModel = preparedModelCallback->getPreparedModel(); - if (fullySupportsModel) { - EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); - } else { - EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE || - prepareReturnStatus == ErrorStatus::GENERAL_FAILURE); - } - - // early termination if vendor service cannot fully prepare model - if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(nullptr, preparedModel.get()); - LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " - "prepare model that it does not support."; - std::cout << "[ ] Early termination of test because vendor service cannot " - "prepare model that it does not support." - << std::endl; - return; - } - ASSERT_NE(nullptr, preparedModel.get()); int example_no = 1; for (auto& example : examples) { @@ -167,8 +124,8 @@ void Execute(const sp& device, std::function create_model, offset += i.location.length; } } - std::vector pools = {allocateSharedMemory(inputSize), - allocateSharedMemory(outputSize)}; + std::vector pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; ASSERT_NE(0ull, pools[INPUT].size()); ASSERT_NE(0ull, pools[OUTPUT].size()); @@ -221,11 +178,107 @@ void Execute(const sp& device, std::function create_model, } } +void Execute(sp& device, std::function create_model, + std::function is_ignored, + const std::vector& examples) { + V1_0::Model model = create_model(); + + // see if service can handle model + bool fullySupportsModel = false; + ErrorStatus supportedStatus; + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + + Return supportedCall = device->getSupportedOperations( + model, [&](ErrorStatus status, const hidl_vec& supported) { + supportedStatus = status; + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + ASSERT_EQ(ErrorStatus::NONE, supportedStatus); + Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + sp preparedModel = preparedModelCallback->getPreparedModel(); + if (fullySupportsModel) { + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); + } else { + EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE || + prepareReturnStatus == ErrorStatus::GENERAL_FAILURE); + } + + // early termination if vendor service cannot fully prepare model + if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(nullptr, preparedModel.get()); + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return; + } + ASSERT_NE(nullptr, preparedModel.get()); + + EvaluatePreparedModel(preparedModel, is_ignored, examples); +} + +void Execute(sp& device, std::function create_model, + std::function is_ignored, + const std::vector& examples) { + V1_1::Model model = create_model(); + + // see if service can handle model + bool fullySupportsModel = false; + ErrorStatus supportedStatus; + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + + Return supportedCall = device->getSupportedOperations_1_1( + model, [&](ErrorStatus status, const hidl_vec& supported) { + supportedStatus = status; + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + ASSERT_EQ(ErrorStatus::NONE, supportedStatus); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + sp preparedModel = preparedModelCallback->getPreparedModel(); + if (fullySupportsModel) { + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); + } else { + EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE || + prepareReturnStatus == ErrorStatus::GENERAL_FAILURE); + } + + // early termination if vendor service cannot fully prepare model + if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(nullptr, preparedModel.get()); + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return; + } + ASSERT_NE(nullptr, preparedModel.get()); + + EvaluatePreparedModel(preparedModel, is_ignored, examples); +} + } // namespace generated_tests -} // namespace functional -} // namespace vts -} // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android diff --git a/neuralnetworks/1.0/vts/functional/Models.cpp b/neuralnetworks/1.0/vts/functional/Models.cpp index 8ce4f25938..180286a5b7 100644 --- a/neuralnetworks/1.0/vts/functional/Models.cpp +++ b/neuralnetworks/1.0/vts/functional/Models.cpp @@ -17,19 +17,22 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" #include "Models.h" +#include "Utils.h" + +#include +#include #include #include #include +using ::android::sp; + namespace android { namespace hardware { namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { // create a valid model -Model createValidTestModel() { +V1_1::Model createValidTestModel_1_1() { const std::vector operand2Data = {5.0f, 6.0f, 7.0f, 8.0f}; const uint32_t size = operand2Data.size() * sizeof(float); @@ -103,39 +106,34 @@ Model createValidTestModel() { } // create first invalid model -Model createInvalidTestModel1() { - Model model = createValidTestModel(); +V1_1::Model createInvalidTestModel1_1_1() { + Model model = createValidTestModel_1_1(); model.operations[0].type = static_cast(0xDEADBEEF); /* INVALID */ return model; } // create second invalid model -Model createInvalidTestModel2() { - Model model = createValidTestModel(); +V1_1::Model createInvalidTestModel2_1_1() { + Model model = createValidTestModel_1_1(); const uint32_t operand1 = 0; const uint32_t operand5 = 4; // INVALID OPERAND model.inputIndexes = std::vector({operand1, operand5 /* INVALID OPERAND */}); return model; } -// allocator helper -hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") { - hidl_memory memory; +V1_0::Model createValidTestModel_1_0() { + V1_1::Model model = createValidTestModel_1_1(); + return nn::convertToV1_0(model); +} - sp allocator = IAllocator::getService(type); - if (!allocator.get()) { - return {}; - } +V1_0::Model createInvalidTestModel1_1_0() { + V1_1::Model model = createInvalidTestModel1_1_1(); + return nn::convertToV1_0(model); +} - Return ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) { - ASSERT_TRUE(success); - memory = mem; - }); - if (!ret.isOk()) { - return {}; - } - - return memory; +V1_0::Model createInvalidTestModel2_1_0() { + V1_1::Model model = createInvalidTestModel2_1_1(); + return nn::convertToV1_0(model); } // create a valid request @@ -154,8 +152,8 @@ Request createValidTestRequest() { std::vector outputs = {{ .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {}, }}; - std::vector pools = {allocateSharedMemory(inputSize), - allocateSharedMemory(outputSize)}; + std::vector pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { return {}; } @@ -199,9 +197,6 @@ Request createInvalidTestRequest2() { return request; } -} // namespace functional -} // namespace vts -} // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h index e0d57d533b..93982351f4 100644 --- a/neuralnetworks/1.0/vts/functional/Models.h +++ b/neuralnetworks/1.0/vts/functional/Models.h @@ -16,28 +16,27 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_0TargetTest.h" +#include namespace android { namespace hardware { namespace neuralnetworks { -namespace V1_0 { -namespace vts { -namespace functional { -// create the model -Model createValidTestModel(); -Model createInvalidTestModel1(); -Model createInvalidTestModel2(); +// create V1_1 model +V1_1::Model createValidTestModel_1_1(); +V1_1::Model createInvalidTestModel1_1_1(); +V1_1::Model createInvalidTestModel2_1_1(); + +// create V1_0 model +V1_0::Model createValidTestModel_1_0(); +V1_0::Model createInvalidTestModel1_1_0(); +V1_0::Model createInvalidTestModel2_1_0(); // create the request -Request createValidTestRequest(); -Request createInvalidTestRequest1(); -Request createInvalidTestRequest2(); +V1_0::Request createValidTestRequest(); +V1_0::Request createInvalidTestRequest1(); +V1_0::Request createInvalidTestRequest2(); -} // namespace functional -} // namespace vts -} // namespace V1_0 } // namespace neuralnetworks } // namespace hardware } // namespace android diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp new file mode 100644 index 0000000000..b14fb2c4c8 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworksV1_0.h" +#include "Utils.h" + +#include + +using ::android::hardware::hidl_memory; +using ::android::hidl::allocator::V1_0::IAllocator; +using ::android::hidl::memory::V1_0::IMemory; +using ::android::sp; + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { + +// allocator helper +hidl_memory allocateSharedMemory(int64_t size) { + return nn::allocateSharedMemory(size); +} + +// A class for test environment setup +NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} + +NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {} + +NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { + // This has to return a "new" object because it is freed inside + // ::testing::AddGlobalTestEnvironment when the gtest is being torn down + static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment(); + return instance; +} + +void NeuralnetworksHidlEnvironment::registerTestServices() { + registerTestService(); +} + +// The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} + +void NeuralnetworksHidlTest::SetUp() { + device = ::testing::VtsHalHidlTargetTestBase::getService( + NeuralnetworksHidlEnvironment::getInstance()); + ASSERT_NE(nullptr, device.get()); +} + +void NeuralnetworksHidlTest::TearDown() {} + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h similarity index 69% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h rename to neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h index 5cd209ae62..fbb1607478 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 The Android Open Source Project + * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,23 +29,6 @@ #include #include -using ::android::hardware::neuralnetworks::V1_0::IDevice; -using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; -using ::android::hardware::neuralnetworks::V1_0::Capabilities; -using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; -using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; -using ::android::hardware::neuralnetworks::V1_0::Model; -using ::android::hardware::neuralnetworks::V1_0::OperationType; -using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; -using ::android::hardware::Return; -using ::android::hardware::Void; -using ::android::hardware::hidl_memory; -using ::android::hardware::hidl_string; -using ::android::hardware::hidl_vec; -using ::android::hidl::allocator::V1_0::IAllocator; -using ::android::hidl::memory::V1_0::IMemory; -using ::android::sp; - namespace android { namespace hardware { namespace neuralnetworks { @@ -53,6 +36,8 @@ namespace V1_0 { namespace vts { namespace functional { +hidl_memory allocateSharedMemory(int64_t size); + // A class for test environment setup class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { NeuralnetworksHidlEnvironment(); @@ -74,25 +59,22 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { void SetUp() override; void TearDown() override; - sp doPrepareModelShortcut(); - - sp device; + sp device; }; - } // namespace functional } // namespace vts // pretty-print values for error messages -template +template ::std::basic_ostream& operator<<(::std::basic_ostream& os, - ErrorStatus errorStatus) { + V1_0::ErrorStatus errorStatus) { return os << toString(errorStatus); } -template +template ::std::basic_ostream& operator<<(::std::basic_ostream& os, - DeviceStatus deviceStatus) { + V1_0::DeviceStatus deviceStatus) { return os << toString(deviceStatus); } diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp similarity index 80% rename from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp rename to neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp index b99e20e3b4..e838997a8d 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 The Android Open Source Project + * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworksV1_0TargetTest.h" +#include "VtsHalNeuralnetworksV1_0.h" #include "Callbacks.h" #include "Models.h" @@ -26,51 +26,34 @@ #include #include +using ::android::hardware::neuralnetworks::V1_0::IDevice; +using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; +using ::android::hardware::neuralnetworks::V1_0::Capabilities; +using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; +using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; +using ::android::hardware::neuralnetworks::V1_0::Model; +using ::android::hardware::neuralnetworks::V1_0::OperationType; +using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; +using ::android::hardware::Return; +using ::android::hardware::Void; +using ::android::hardware::hidl_memory; +using ::android::hardware::hidl_string; +using ::android::hardware::hidl_vec; +using ::android::hidl::allocator::V1_0::IAllocator; +using ::android::hidl::memory::V1_0::IMemory; +using ::android::sp; + namespace android { namespace hardware { namespace neuralnetworks { namespace V1_0 { namespace vts { namespace functional { - using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; -using ::generated_tests::MixedTypedExampleType; -namespace generated_tests { -extern void Execute(const sp&, std::function, std::function, - const std::vector&); -} - -// A class for test environment setup -NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} - -NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {} - -NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { - // This has to return a "new" object because it is freed inside - // ::testing::AddGlobalTestEnvironment when the gtest is being torn down - static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment(); - return instance; -} - -void NeuralnetworksHidlEnvironment::registerTestServices() { - registerTestService(); -} - -// The main test class for NEURALNETWORK HIDL HAL. -NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} - -void NeuralnetworksHidlTest::SetUp() { - device = ::testing::VtsHalHidlTargetTestBase::getService( - NeuralnetworksHidlEnvironment::getInstance()); - ASSERT_NE(nullptr, device.get()); -} - -void NeuralnetworksHidlTest::TearDown() {} - -sp NeuralnetworksHidlTest::doPrepareModelShortcut() { - Model model = createValidTestModel(); +inline sp doPrepareModelShortcut(sp& device) { + Model model = createValidTestModel_1_0(); sp preparedModelCallback = new PreparedModelCallback(); if (preparedModelCallback == nullptr) { @@ -116,7 +99,7 @@ TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { // supported operations positive test TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { - Model model = createValidTestModel(); + Model model = createValidTestModel_1_0(); Return ret = device->getSupportedOperations( model, [&](ErrorStatus status, const hidl_vec& supported) { EXPECT_EQ(ErrorStatus::NONE, status); @@ -127,7 +110,7 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { // supported operations negative test 1 TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { - Model model = createInvalidTestModel1(); + Model model = createInvalidTestModel1_1_0(); Return ret = device->getSupportedOperations( model, [&](ErrorStatus status, const hidl_vec& supported) { EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); @@ -138,7 +121,7 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { // supported operations negative test 2 TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { - Model model = createInvalidTestModel2(); + Model model = createInvalidTestModel2_1_0(); Return ret = device->getSupportedOperations( model, [&](ErrorStatus status, const hidl_vec& supported) { EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); @@ -149,7 +132,7 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { // prepare simple model positive test TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { - Model model = createValidTestModel(); + Model model = createValidTestModel_1_0(); sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); @@ -165,7 +148,7 @@ TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { // prepare simple model negative test 1 TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { - Model model = createInvalidTestModel1(); + Model model = createInvalidTestModel1_1_0(); sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); @@ -181,7 +164,7 @@ TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { // prepare simple model negative test 2 TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { - Model model = createInvalidTestModel2(); + Model model = createInvalidTestModel2_1_0(); sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); @@ -201,7 +184,7 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; const uint32_t OUTPUT = 1; - sp preparedModel = doPrepareModelShortcut(); + sp preparedModel = doPrepareModelShortcut(device); ASSERT_NE(nullptr, preparedModel.get()); Request request = createValidTestRequest(); @@ -235,7 +218,7 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { // execute simple graph negative test 1 TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { - sp preparedModel = doPrepareModelShortcut(); + sp preparedModel = doPrepareModelShortcut(device); ASSERT_NE(nullptr, preparedModel.get()); Request request = createInvalidTestRequest1(); @@ -252,7 +235,7 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { // execute simple graph negative test 2 TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { - sp preparedModel = doPrepareModelShortcut(); + sp preparedModel = doPrepareModelShortcut(device); ASSERT_NE(nullptr, preparedModel.get()); Request request = createInvalidTestRequest2(); @@ -267,16 +250,6 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); } -// Mixed-typed examples -typedef MixedTypedExampleType MixedTypedExample; - -// in frameworks/ml/nn/runtime/tests/generated/ -#include "all_generated_vts_tests.cpp" - -// TODO: Add tests for execution failure, or wait_for/wait_until timeout. -// Discussion: -// https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222 - } // namespace functional } // namespace vts } // namespace V1_0 diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp new file mode 100644 index 0000000000..b99aef7fc0 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworksV1_0.h" + +#include "Callbacks.h" +#include "TestHarness.h" + +#include +#include +#include + +using ::android::hardware::neuralnetworks::V1_0::IDevice; +using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; +using ::android::hardware::neuralnetworks::V1_0::Capabilities; +using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; +using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; +using ::android::hardware::neuralnetworks::V1_0::Model; +using ::android::hardware::neuralnetworks::V1_0::OperationType; +using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; +using ::android::hardware::Return; +using ::android::hardware::Void; +using ::android::hardware::hidl_memory; +using ::android::hardware::hidl_string; +using ::android::hardware::hidl_vec; +using ::android::hidl::allocator::V1_0::IAllocator; +using ::android::hidl::memory::V1_0::IMemory; +using ::android::sp; + +namespace android { +namespace hardware { +namespace neuralnetworks { + +namespace generated_tests { +using ::generated_tests::MixedTypedExampleType; +extern void Execute(sp&, std::function, std::function, + const std::vector&); +} // namespace generated_tests + +namespace V1_0 { +namespace vts { +namespace functional { +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +// Mixed-typed examples +typedef generated_tests::MixedTypedExampleType MixedTypedExample; + +// in frameworks/ml/nn/runtime/tests/generated/ +#include "all_generated_V1_0_vts_tests.cpp" + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/OWNERS b/neuralnetworks/1.1/vts/OWNERS new file mode 100644 index 0000000000..7f75ab30c6 --- /dev/null +++ b/neuralnetworks/1.1/vts/OWNERS @@ -0,0 +1,10 @@ +# Neuralnetworks team +butlermichael@google.com +dgross@google.com +jeanluc@google.com +miaowang@google.com +yangni@google.com + +# VTS team +yim@google.com +yuexima@google.com diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp new file mode 100644 index 0000000000..623b44103a --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/Android.bp @@ -0,0 +1,39 @@ +// +// Copyright (C) 2018 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +cc_test { + name: "VtsHalNeuralnetworksV1_1TargetTest", + srcs: [ + "VtsHalNeuralnetworksV1_1.cpp", + "VtsHalNeuralnetworksV1_1BasicTest.cpp", + "VtsHalNeuralnetworksV1_1GeneratedTest.cpp", + ], + defaults: ["VtsHalTargetTestDefaults"], + static_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hidl.allocator@1.0", + "android.hidl.memory@1.0", + "libhidlmemory", + "libneuralnetworks_utils", + "VtsHalNeuralnetworksTest_utils", + ], + header_libs: [ + "libneuralnetworks_headers", + "libneuralnetworks_generated_test_harness_headers", + "libneuralnetworks_generated_tests", + ], +} diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp new file mode 100644 index 0000000000..b1d3be786c --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworksV1_1.h" +#include "Utils.h" + +#include +#include + +using ::android::hardware::hidl_memory; +using ::android::hidl::allocator::V1_0::IAllocator; +using ::android::hidl::memory::V1_0::IMemory; +using ::android::sp; + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { + +// allocator helper +hidl_memory allocateSharedMemory(int64_t size) { + return nn::allocateSharedMemory(size); +} + +// A class for test environment setup +NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} + +NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {} + +NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { + // This has to return a "new" object because it is freed inside + // ::testing::AddGlobalTestEnvironment when the gtest is being torn down + static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment(); + return instance; +} + +void NeuralnetworksHidlEnvironment::registerTestServices() { + registerTestService(); +} + +// The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} + +void NeuralnetworksHidlTest::SetUp() { + device = ::testing::VtsHalHidlTargetTestBase::getService( + NeuralnetworksHidlEnvironment::getInstance()); + ASSERT_NE(nullptr, device.get()); +} + +void NeuralnetworksHidlTest::TearDown() {} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h new file mode 100644 index 0000000000..426246ce76 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H +#define VTS_HAL_NEURALNETWORKS_V1_1_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { +hidl_memory allocateSharedMemory(int64_t size); + +// A class for test environment setup +class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { + NeuralnetworksHidlEnvironment(); + NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; + NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; + NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; + NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; + + public: + ~NeuralnetworksHidlEnvironment() override; + static NeuralnetworksHidlEnvironment* getInstance(); + void registerTestServices() override; +}; + +// The main test class for NEURALNETWORKS HIDL HAL. +class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { + public: + ~NeuralnetworksHidlTest() override; + void SetUp() override; + void TearDown() override; + + sp device; +}; +} // namespace functional +} // namespace vts + +// pretty-print values for error messages + +template +::std::basic_ostream& operator<<(::std::basic_ostream& os, + V1_0::ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +template +::std::basic_ostream& operator<<(::std::basic_ostream& os, + V1_0::DeviceStatus deviceStatus) { + return os << toString(deviceStatus); +} + +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_1_H diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp new file mode 100644 index 0000000000..51eff2a019 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp @@ -0,0 +1,280 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworksV1_1.h" + +#include "Callbacks.h" +#include "Models.h" +#include "TestHarness.h" + +#include +#include +#include +#include +#include + +using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; +using ::android::hardware::neuralnetworks::V1_0::Capabilities; +using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; +using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; +using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; +using ::android::hardware::neuralnetworks::V1_0::Operand; +using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; +using ::android::hardware::neuralnetworks::V1_0::OperandType; +using ::android::hardware::neuralnetworks::V1_0::Request; +using ::android::hardware::neuralnetworks::V1_1::IDevice; +using ::android::hardware::neuralnetworks::V1_1::Model; +using ::android::hardware::neuralnetworks::V1_1::Operation; +using ::android::hardware::neuralnetworks::V1_1::OperationType; +using ::android::hardware::Return; +using ::android::hardware::Void; +using ::android::hardware::hidl_memory; +using ::android::hardware::hidl_string; +using ::android::hardware::hidl_vec; +using ::android::hidl::allocator::V1_0::IAllocator; +using ::android::hidl::memory::V1_0::IMemory; +using ::android::sp; + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_1 { +namespace vts { +namespace functional { +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +inline sp doPrepareModelShortcut(sp& device) { + Model model = createValidTestModel_1_1(); + + sp preparedModelCallback = new PreparedModelCallback(); + if (preparedModelCallback == nullptr) { + return nullptr; + } + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) { + return nullptr; + } + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + sp preparedModel = preparedModelCallback->getPreparedModel(); + if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) { + return nullptr; + } + + return preparedModel; +} + +// create device test +TEST_F(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_F(NeuralnetworksHidlTest, StatusTest) { + Return status = device->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); +} + +// initialization +TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { + Return ret = + device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LT(0.0f, capabilities.float32Performance.execTime); + EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); + EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); + EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); + }); + EXPECT_TRUE(ret.isOk()); +} + +// supported operations positive test +TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { + Model model = createValidTestModel_1_1(); + Return ret = device->getSupportedOperations_1_1( + model, [&](ErrorStatus status, const hidl_vec& supported) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_EQ(model.operations.size(), supported.size()); + }); + EXPECT_TRUE(ret.isOk()); +} + +// supported operations negative test 1 +TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { + Model model = createInvalidTestModel1_1_1(); + Return ret = device->getSupportedOperations_1_1( + model, [&](ErrorStatus status, const hidl_vec& supported) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + (void)supported; + }); + EXPECT_TRUE(ret.isOk()); +} + +// supported operations negative test 2 +TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { + Model model = createInvalidTestModel2_1_1(); + Return ret = device->getSupportedOperations_1_1( + model, [&](ErrorStatus status, const hidl_vec& supported) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + (void)supported; + }); + EXPECT_TRUE(ret.isOk()); +} + +// prepare simple model positive test +TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { + Model model = createValidTestModel_1_1(); + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + EXPECT_NE(nullptr, preparedModel.get()); +} + +// prepare simple model negative test 1 +TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { + Model model = createInvalidTestModel1_1_1(); + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +// prepare simple model negative test 2 +TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { + Model model = createInvalidTestModel2_1_1(); + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = + device->prepareModel_1_1(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp preparedModel = preparedModelCallback->getPreparedModel(); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +// execute simple graph positive test +TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { + std::vector outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; + std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; + const uint32_t OUTPUT = 1; + + sp preparedModel = doPrepareModelShortcut(device); + ASSERT_NE(nullptr, preparedModel.get()); + Request request = createValidTestRequest(); + + auto postWork = [&] { + sp outputMemory = mapMemory(request.pools[OUTPUT]); + if (outputMemory == nullptr) { + return false; + } + float* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); + if (outputPtr == nullptr) { + return false; + } + outputMemory->read(); + std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); + outputMemory->commit(); + return true; + }; + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + executionCallback->on_finish(postWork); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); + EXPECT_EQ(expectedData, outputData); +} + +// execute simple graph negative test 1 +TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { + sp preparedModel = doPrepareModelShortcut(device); + ASSERT_NE(nullptr, preparedModel.get()); + Request request = createInvalidTestRequest1(); + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +// execute simple graph negative test 2 +TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { + sp preparedModel = doPrepareModelShortcut(device); + ASSERT_NE(nullptr, preparedModel.get()); + Request request = createInvalidTestRequest2(); + + sp executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; + +int main(int argc, char** argv) { + ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); + ::testing::InitGoogleTest(&argc, argv); + NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); + + int status = RUN_ALL_TESTS(); + return status; +} diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp new file mode 100644 index 0000000000..025d9feda3 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworksV1_1.h" + +#include "Callbacks.h" +#include "TestHarness.h" + +#include +#include +#include +#include +#include + +using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; +using ::android::hardware::neuralnetworks::V1_0::Capabilities; +using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; +using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; +using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; +using ::android::hardware::neuralnetworks::V1_0::Operand; +using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; +using ::android::hardware::neuralnetworks::V1_0::OperandType; +using ::android::hardware::neuralnetworks::V1_0::Request; +using ::android::hardware::neuralnetworks::V1_1::IDevice; +using ::android::hardware::neuralnetworks::V1_1::Model; +using ::android::hardware::neuralnetworks::V1_1::Operation; +using ::android::hardware::neuralnetworks::V1_1::OperationType; +using ::android::hardware::Return; +using ::android::hardware::Void; +using ::android::hardware::hidl_memory; +using ::android::hardware::hidl_string; +using ::android::hardware::hidl_vec; +using ::android::hidl::allocator::V1_0::IAllocator; +using ::android::hidl::memory::V1_0::IMemory; +using ::android::sp; + +namespace android { +namespace hardware { +namespace neuralnetworks { + +namespace generated_tests { +using ::generated_tests::MixedTypedExampleType; +extern void Execute(sp&, std::function, std::function, + const std::vector&); +} // namespace generated_tests + +namespace V1_1 { +namespace vts { +namespace functional { +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +// Mixed-typed examples +typedef generated_tests::MixedTypedExampleType MixedTypedExample; + +// in frameworks/ml/nn/runtime/tests/generated/ +#include "all_generated_V1_0_vts_tests.cpp" +#include "all_generated_V1_1_vts_tests.cpp" + +} // namespace functional +} // namespace vts +} // namespace V1_1 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android