From 2c4e1368e1dacbe9fa3d8e696e713bb08f09953e Mon Sep 17 00:00:00 2001 From: "I-Jui (Ray) Sung" Date: Wed, 6 Sep 2017 02:15:54 -0700 Subject: [PATCH] Test harness for generated tests Created initial test harness for test models and examples from NNAPI test generator in VtsHalNeuralnetworksV1_0TargetTest. As an example, also added a test generated from test spec at frameworks/ml/nn/tools/test_generator/tests/P_vts_full/. Generated model setup code and examples are from: frameworks/ml/nn/runtime/test/generated/examples and frameworks/ml/nn/runtime/test/generated/vts_models respectively. Bug: 63905942 Bug: 63525563 Test: VtsHalNeuralnetworksV1_0TargetTest with sample driver enabled by cherry-pick Change-Id: Ief029eed9718c8724ef0b64fc6a7f6b9a7bc7b7b --- neuralnetworks/1.0/vts/functional/Android.bp | 5 + .../vts/functional/GeneratedTestHarness.cpp | 191 ++++++++++++++++++ .../VtsHalNeuralnetworksV1_0TargetTest.cpp | 37 ++-- 3 files changed, 217 insertions(+), 16 deletions(-) create mode 100644 neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index 1efff0e0e5..2318430020 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp @@ -18,6 +18,7 @@ cc_test { name: "VtsHalNeuralnetworksV1_0TargetTest", srcs: [ "Event.cpp", + "GeneratedTestHarness.cpp", "VtsHalNeuralnetworksV1_0TargetTest.cpp", ], defaults: ["VtsHalTargetTestDefaults"], @@ -27,4 +28,8 @@ cc_test { "android.hidl.memory@1.0", "libhidlmemory", ], + header_libs: [ + "libneuralnetworks_generated_test_harness_headers", + "libneuralnetworks_generated_tests", + ], } diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp new file mode 100644 index 0000000000..2f557f8794 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Event.h" +#include "TestHarness.h" +#include "VtsHalNeuralnetworksV1_0TargetTest.h" + +#include +#include +#include + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_0 { +namespace vts { +namespace functional { +// allocator helper +hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem"); + +namespace generated_tests { +using ::android::hardware::neuralnetworks::V1_0::implementation::Event; +using ::generated_tests::for_all; +using ::generated_tests::for_each; +using ::generated_tests::resize_accordingly; +using ::generated_tests::MixedTyped; +using ::generated_tests::MixedTypedExampleType; +using ::generated_tests::Float32Operands; +using ::generated_tests::Int32Operands; +using ::generated_tests::Quant8Operands; +// Top level driver for models and examples generated by test_generator.py +// Test driver for those generated from ml/nn/runtime/test/spec +void Execute(const sp& device, std::function create_model, + const std::vector& examples) { + Model model = create_model(); + sp preparedModel; + sp preparationEvent = new Event(); + ASSERT_NE(nullptr, preparationEvent.get()); + Return prepareRet = device->prepareModel( + model, preparationEvent, [&](ErrorStatus status, const sp& prepared) { + EXPECT_EQ(ErrorStatus::NONE, status); + preparedModel = prepared; + }); + ASSERT_TRUE(prepareRet.isOk()); + ASSERT_NE(nullptr, preparedModel.get()); + Event::Status preparationStatus = preparationEvent->wait(); + EXPECT_EQ(Event::Status::SUCCESS, preparationStatus); + + const uint32_t INPUT = 0; + const uint32_t OUTPUT = 1; + + int example_no = 1; + for (auto& example : examples) { + SCOPED_TRACE(example_no++); + + const MixedTyped& inputs = example.first; + const MixedTyped& golden = example.second; + + std::vector inputs_info, outputs_info; + uint32_t inputSize = 0, outputSize = 0; + + // This function only partially specifies the metadata (vector of RequestArguments). + // The contents are copied over below. + for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { + if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + inputs_info[index] = arg; + inputSize += s; + }); + // Compute offset for inputs 1 and so on + { + size_t offset = 0; + for (auto& i : inputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + + MixedTyped test; // holding test results + + // Go through all outputs, initialize RequestArgument descriptors + resize_accordingly(golden, test); + resize_accordingly(golden, test); + resize_accordingly(golden, test); + for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) { + if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast(s)}, + .dimensions = {}, + }; + outputs_info[index] = arg; + outputSize += s; + }); + // Compute offset for outputs 1 and so on + { + size_t offset = 0; + for (auto& i : outputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + std::vector pools = {allocateSharedMemory(inputSize), + allocateSharedMemory(outputSize)}; + ASSERT_NE(0ull, pools[INPUT].size()); + ASSERT_NE(0ull, pools[OUTPUT].size()); + + // load data + sp inputMemory = mapMemory(pools[INPUT]); + sp outputMemory = mapMemory(pools[OUTPUT]); + ASSERT_NE(nullptr, inputMemory.get()); + ASSERT_NE(nullptr, outputMemory.get()); + char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); + char* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); + ASSERT_NE(nullptr, inputPtr); + ASSERT_NE(nullptr, outputPtr); + inputMemory->update(); + outputMemory->update(); + + // Go through all inputs, copy the values + for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { + char* begin = (char*)p; + char* end = begin + s; + // TODO: handle more than one input + std::copy(begin, end, inputPtr + inputs_info[index].location.offset); + }); + + inputMemory->commit(); + outputMemory->commit(); + // execute request + sp executionEvent = new Event(); + ASSERT_NE(nullptr, executionEvent.get()); + Return executeStatus = preparedModel->execute( + {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionEvent); + ASSERT_TRUE(executeStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(executeStatus)); + Event::Status eventStatus = executionEvent->wait(); + EXPECT_EQ(Event::Status::SUCCESS, eventStatus); + + // validate results + outputMemory->read(); +#define COPY_BACK(ty) \ + for_each(test, [&outputs_info, outputPtr](int index, std::vector& m) { \ + RequestArgument& i = outputs_info[index]; \ + ASSERT_EQ(m.size(), i.location.length / sizeof(ty)); \ + char* begin = outputPtr + i.location.offset; \ + memcpy(m.data(), begin, i.location.length); \ + }); + COPY_BACK(float); + COPY_BACK(int32_t); + COPY_BACK(uint8_t); +#undef COPY_BACK + outputMemory->commit(); + // We want "close-enough" results for float + for_each(golden, [&test](int index, auto& golden_float) { + auto& test_float_operands = std::get(test); + auto& test_float = test_float_operands[index]; + for (unsigned int i = 0; i < golden_float.size(); i++) { + SCOPED_TRACE(i); + EXPECT_FLOAT_EQ(golden_float[i], test_float[i]); + } + }); + + EXPECT_EQ(std::get(golden), std::get(test)); + EXPECT_EQ(std::get(golden), std::get(test)); + } +} + +} // namespace generated_tests + +} // namespace functional +} // namespace vts +} // namespace V1_0 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp index cd8a527b26..5a20f44a60 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp @@ -16,12 +16,13 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "Event.h" #include "VtsHalNeuralnetworksV1_0TargetTest.h" +#include "Event.h" +#include "TestHarness.h" + #include #include #include -#include namespace android { namespace hardware { @@ -31,6 +32,11 @@ namespace vts { namespace functional { using ::android::hardware::neuralnetworks::V1_0::implementation::Event; +using ::generated_tests::MixedTypedExampleType; +namespace generated_tests { +extern void Execute(const sp&, std::function, + const std::vector&); +} // A class for test environment setup NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} @@ -107,9 +113,7 @@ Model createTestModel() { .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_INPUT, - .location = {.poolIndex = 0, - .offset = 0, - .length = 0}, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, { .type = OperandType::TENSOR_FLOAT32, @@ -118,9 +122,7 @@ Model createTestModel() { .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, - .offset = 0, - .length = size}, + .location = {.poolIndex = 0, .offset = 0, .length = size}, }, { .type = OperandType::INT32, @@ -129,9 +131,7 @@ Model createTestModel() { .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, - .offset = size, - .length = sizeof(int32_t)}, + .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)}, }, { .type = OperandType::TENSOR_FLOAT32, @@ -140,9 +140,7 @@ Model createTestModel() { .scale = 0.0f, .zeroPoint = 0, .lifetime = OperandLifeTime::MODEL_OUTPUT, - .location = {.poolIndex = 0, - .offset = 0, - .length = 0}, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, }, }; @@ -172,6 +170,7 @@ Model createTestModel() { .pools = pools, }; } +} // anonymous namespace // allocator helper hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") { @@ -192,7 +191,6 @@ hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem return memory; } -} // anonymous namespace // supported subgraph test TEST_F(NeuralnetworksHidlTest, SupportedOperationsTest) { @@ -275,8 +273,15 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) { EXPECT_EQ(expectedData, outputData); } +// Mixed-typed examples +typedef MixedTypedExampleType MixedTypedExample; + +// in frameworks/ml/nn/runtime/tests/generated/ +#include "all_generated_vts_tests.cpp" + // TODO: Add tests for execution failure, or wait_for/wait_until timeout. -// Discussion: https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222 +// Discussion: +// https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222 } // namespace functional } // namespace vts