diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index a8406de235..abff213847 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp @@ -32,12 +32,11 @@ cc_library_static { "android.hidl.memory@1.0", "libgmock", "libhidlmemory", + "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", ], header_libs: [ "libneuralnetworks_headers", - "libneuralnetworks_generated_test_harness_headers", - "libneuralnetworks_generated_tests", ], } @@ -60,13 +59,12 @@ cc_defaults { "android.hidl.memory@1.0", "libgmock", "libhidlmemory", + "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", "VtsHalNeuralNetworksV1_0_utils", ], header_libs: [ "libneuralnetworks_headers", - "libneuralnetworks_generated_test_harness_headers", - "libneuralnetworks_generated_tests", ], test_suites: ["general-tests"], } diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 40d2f4ceb7..0fd9947ede 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -15,6 +15,7 @@ */ #include "GeneratedTestHarness.h" + #include "1.0/Callbacks.h" #include "1.0/Utils.h" #include "MemoryUtils.h" @@ -28,6 +29,7 @@ #include #include +#include #include namespace android { @@ -36,6 +38,7 @@ namespace neuralnetworks { namespace V1_0 { namespace generated_tests { +using namespace test_helper; using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; using ::android::hardware::neuralnetworks::V1_0::IDevice; using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; @@ -45,137 +48,111 @@ using ::android::hardware::neuralnetworks::V1_0::RequestArgument; using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; using ::android::hidl::memory::V1_0::IMemory; -using ::test_helper::compare; -using ::test_helper::filter; -using ::test_helper::for_all; -using ::test_helper::MixedTyped; -using ::test_helper::MixedTypedExample; -using ::test_helper::resize_accordingly; + +Model createModel(const TestModel& testModel) { + // Model operands. + hidl_vec operands(testModel.operands.size()); + size_t constCopySize = 0, constRefSize = 0; + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + + DataLocation loc = {}; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + loc = {.poolIndex = 0, + .offset = static_cast(constCopySize), + .length = static_cast(op.data.size())}; + constCopySize += op.data.alignedSize(); + } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + loc = {.poolIndex = 0, + .offset = static_cast(constRefSize), + .length = static_cast(op.data.size())}; + constRefSize += op.data.alignedSize(); + } + + operands[i] = {.type = static_cast(op.type), + .dimensions = op.dimensions, + .numberOfConsumers = op.numberOfConsumers, + .scale = op.scale, + .zeroPoint = op.zeroPoint, + .lifetime = static_cast(op.lifetime), + .location = loc}; + } + + // Model operations. + hidl_vec operations(testModel.operations.size()); + std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(), + [](const TestOperation& op) -> Operation { + return {.type = static_cast(op.type), + .inputs = op.inputs, + .outputs = op.outputs}; + }); + + // Constant copies. + hidl_vec operandValues(constCopySize); + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, operandValues.data() + operands[i].location.offset); + } + } + + // Shared memory. + hidl_vec pools; + if (constRefSize > 0) { + hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize)); + CHECK_NE(pools[0].size(), 0u); + + // load data + sp mappedMemory = mapMemory(pools[0]); + CHECK(mappedMemory.get() != nullptr); + uint8_t* mappedPtr = + reinterpret_cast(static_cast(mappedMemory->getPointer())); + CHECK(mappedPtr != nullptr); + + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, mappedPtr + operands[i].location.offset); + } + } + } + + return {.operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = testModel.inputIndexes, + .outputIndexes = testModel.outputIndexes, + .operandValues = std::move(operandValues), + .pools = std::move(pools)}; +} // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec -void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, - const std::vector& examples, float fpAtol, - float fpRtol) { - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; +void EvaluatePreparedModel(const sp& preparedModel, const TestModel& testModel) { + const Request request = createRequest(testModel); - int example_no = 1; - for (auto& example : examples) { - SCOPED_TRACE(example_no++); - const MixedTyped& inputs = example.operands.first; - const MixedTyped& golden = example.operands.second; + // Launch execution. + sp executionCallback = new ExecutionCallback(); + Return executionLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executionLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); - CHECK(inputs.float16Operands.empty()) << "float16 is not supported in 1.0"; + // Retrieve execution status. + executionCallback->wait(); + ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus()); - std::vector inputs_info, outputs_info; - uint32_t inputSize = 0, outputSize = 0; - // This function only partially specifies the metadata (vector of RequestArguments). - // The contents are copied over below. - for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { - if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = INPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - RequestArgument arg_empty = { - .hasNoValue = true, - }; - inputs_info[index] = s ? arg : arg_empty; - inputSize += s; - }); - // Compute offset for inputs 1 and so on - { - size_t offset = 0; - for (auto& i : inputs_info) { - if (!i.hasNoValue) i.location.offset = offset; - offset += i.location.length; - } - } + // Retrieve execution results. + const std::vector outputs = getOutputBuffers(request); - MixedTyped test; // holding test results - - // Go through all outputs, initialize RequestArgument descriptors - resize_accordingly(golden, test); - for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) { - if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = OUTPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - outputs_info[index] = arg; - outputSize += s; - }); - // Compute offset for outputs 1 and so on - { - size_t offset = 0; - for (auto& i : outputs_info) { - i.location.offset = offset; - offset += i.location.length; - } - } - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - ASSERT_NE(0ull, pools[INPUT].size()); - ASSERT_NE(0ull, pools[OUTPUT].size()); - - // load data - sp inputMemory = mapMemory(pools[INPUT]); - sp outputMemory = mapMemory(pools[OUTPUT]); - ASSERT_NE(nullptr, inputMemory.get()); - ASSERT_NE(nullptr, outputMemory.get()); - char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - char* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - ASSERT_NE(nullptr, inputPtr); - ASSERT_NE(nullptr, outputPtr); - inputMemory->update(); - outputMemory->update(); - - // Go through all inputs, copy the values - for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { - char* begin = (char*)p; - char* end = begin + s; - // TODO: handle more than one input - std::copy(begin, end, inputPtr + inputs_info[index].location.offset); - }); - - inputMemory->commit(); - outputMemory->commit(); - - const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}; - - // launch execution - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executionLaunchStatus = - preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executionLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); - - // retrieve execution status - executionCallback->wait(); - ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus()); - - // validate results - outputMemory->read(); - copy_back(&test, outputs_info, outputPtr); - outputMemory->commit(); - // Filter out don't cares - MixedTyped filtered_golden = filter(golden, is_ignored); - MixedTyped filtered_test = filter(test, is_ignored); - - // We want "close-enough" results for float - compare(filtered_golden, filtered_test, fpAtol, fpRtol); - } + // We want "close-enough" results. + checkResults(testModel, outputs); } -void Execute(const sp& device, std::function create_model, - std::function is_ignored, const std::vector& examples) { - Model model = create_model(); +void Execute(const sp& device, const TestModel& testModel) { + Model model = createModel(testModel); // see if service can handle model bool fullySupportsModel = false; @@ -190,7 +167,6 @@ void Execute(const sp& device, std::function create_model, // launch prepare model sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); @@ -213,8 +189,7 @@ void Execute(const sp& device, std::function create_model, EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); ASSERT_NE(nullptr, preparedModel.get()); - float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f; - EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol); + EvaluatePreparedModel(preparedModel, testModel); } } // namespace generated_tests diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h index 337eb0f924..5d22158529 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h @@ -26,10 +26,9 @@ namespace neuralnetworks { namespace V1_0 { namespace generated_tests { -using ::test_helper::MixedTypedExample; +Model createModel(const ::test_helper::TestModel& testModel); -void Execute(const sp& device, std::function create_model, - std::function is_ignored, const std::vector& examples); +void Execute(const sp& device, const ::test_helper::TestModel& testModel); } // namespace generated_tests } // namespace V1_0 diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.h b/neuralnetworks/1.0/vts/functional/GeneratedTests.h index 5cabf68c1d..9528905d61 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTests.h +++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.h @@ -14,20 +14,11 @@ * limitations under the License. */ -#include -#include - +#include "1.0/Utils.h" #include "GeneratedTestHarness.h" -#include "MemoryUtils.h" #include "TestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_0::vts::functional { - -std::vector createRequests(const std::vector<::test_helper::MixedTypedExample>& examples); - -} // namespace android::hardware::neuralnetworks::V1_0::vts::functional - namespace android::hardware::neuralnetworks::V1_0::generated_tests { using namespace android::hardware::neuralnetworks::V1_0::vts::functional; diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp index 521e524687..5aa27516db 100644 --- a/neuralnetworks/1.0/vts/functional/Utils.cpp +++ b/neuralnetworks/1.0/vts/functional/Utils.cpp @@ -14,45 +14,108 @@ * limitations under the License. */ -#include "GeneratedTestHarness.h" +#include "1.0/Utils.h" + +#include "MemoryUtils.h" #include "TestHarness.h" +#include #include +#include +#include +#include -#include -#include +#include #include namespace android { namespace hardware { namespace neuralnetworks { +using namespace test_helper; +using ::android::hardware::neuralnetworks::V1_0::DataLocation; +using ::android::hardware::neuralnetworks::V1_0::Request; using ::android::hardware::neuralnetworks::V1_0::RequestArgument; -using ::test_helper::for_each; -using ::test_helper::MixedTyped; +using ::android::hidl::memory::V1_0::IMemory; -template -void copy_back_(std::map>* dst, const std::vector& ra, - char* src) { - for_each(*dst, [&ra, src](int index, std::vector& m) { - ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T)); - char* begin = src + ra[index].location.offset; - memcpy(m.data(), begin, ra[index].location.length); - }); +constexpr uint32_t kInputPoolIndex = 0; +constexpr uint32_t kOutputPoolIndex = 1; + +Request createRequest(const TestModel& testModel) { + // Model inputs. + hidl_vec inputs(testModel.inputIndexes.size()); + size_t inputSize = 0; + for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) { + const auto& op = testModel.operands[testModel.inputIndexes[i]]; + if (op.data.size() == 0) { + // Omitted input. + inputs[i] = {.hasNoValue = true}; + } else { + DataLocation loc = {.poolIndex = kInputPoolIndex, + .offset = static_cast(inputSize), + .length = static_cast(op.data.size())}; + inputSize += op.data.alignedSize(); + inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}}; + } + } + + // Model outputs. + hidl_vec outputs(testModel.outputIndexes.size()); + size_t outputSize = 0; + for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) { + const auto& op = testModel.operands[testModel.outputIndexes[i]]; + + // In the case of zero-sized output, we should at least provide a one-byte buffer. + // This is because zero-sized tensors are only supported internally to the driver, or + // reported in output shapes. It is illegal for the client to pre-specify a zero-sized + // tensor as model output. Otherwise, we will have two semantic conflicts: + // - "Zero dimension" conflicts with "unspecified dimension". + // - "Omitted operand buffer" conflicts with "zero-sized operand buffer". + size_t bufferSize = std::max(op.data.size(), 1); + + DataLocation loc = {.poolIndex = kOutputPoolIndex, + .offset = static_cast(outputSize), + .length = static_cast(bufferSize)}; + outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize(); + outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}}; + } + + // Allocate memory pools. + hidl_vec pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; + CHECK_NE(pools[kInputPoolIndex].size(), 0u); + CHECK_NE(pools[kOutputPoolIndex].size(), 0u); + sp inputMemory = mapMemory(pools[kInputPoolIndex]); + CHECK(inputMemory.get() != nullptr); + uint8_t* inputPtr = static_cast(static_cast(inputMemory->getPointer())); + CHECK(inputPtr != nullptr); + + // Copy input data to the memory pool. + for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) { + const auto& op = testModel.operands[testModel.inputIndexes[i]]; + if (op.data.size() > 0) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, inputPtr + inputs[i].location.offset); + } + } + + return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)}; } -void copy_back(MixedTyped* dst, const std::vector& ra, char* src) { - copy_back_(&dst->float32Operands, ra, src); - copy_back_(&dst->int32Operands, ra, src); - copy_back_(&dst->quant8AsymmOperands, ra, src); - copy_back_(&dst->quant16SymmOperands, ra, src); - copy_back_(&dst->float16Operands, ra, src); - copy_back_(&dst->bool8Operands, ra, src); - copy_back_(&dst->quant8ChannelOperands, ra, src); - copy_back_(&dst->quant16AsymmOperands, ra, src); - copy_back_(&dst->quant8SymmOperands, ra, src); - static_assert(9 == MixedTyped::kNumTypes, - "Number of types in MixedTyped changed, but copy_back function wasn't updated"); +std::vector getOutputBuffers(const Request& request) { + sp outputMemory = mapMemory(request.pools[kOutputPoolIndex]); + CHECK(outputMemory.get() != nullptr); + uint8_t* outputPtr = static_cast(static_cast(outputMemory->getPointer())); + CHECK(outputPtr != nullptr); + + // Copy out output results. + std::vector outputBuffers; + for (const auto& output : request.outputs) { + outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset); + } + + return outputBuffers; } } // namespace neuralnetworks diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp index 058eb25002..d62365cb6a 100644 --- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp @@ -16,13 +16,7 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include -#include -#include - #include "1.0/Callbacks.h" -#include "MemoryUtils.h" -#include "TestHarness.h" #include "VtsHalNeuralnetworks.h" namespace android { @@ -33,10 +27,6 @@ namespace vts { namespace functional { using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; -using ::android::hidl::memory::V1_0::IMemory; -using test_helper::for_all; -using test_helper::MixedTyped; -using test_helper::MixedTypedExample; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -102,103 +92,10 @@ static void removeOutputTest(const sp& preparedModel, const Requ ///////////////////////////// ENTRY POINT ////////////////////////////////// -std::vector createRequests(const std::vector& examples) { - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; - - std::vector requests; - - for (const MixedTypedExample& example : examples) { - const MixedTyped& inputs = example.operands.first; - const MixedTyped& outputs = example.operands.second; - - std::vector inputs_info, outputs_info; - uint32_t inputSize = 0, outputSize = 0; - - // This function only partially specifies the metadata (vector of RequestArguments). - // The contents are copied over below. - for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { - if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = INPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - RequestArgument arg_empty = { - .hasNoValue = true, - }; - inputs_info[index] = s ? arg : arg_empty; - inputSize += s; - }); - // Compute offset for inputs 1 and so on - { - size_t offset = 0; - for (auto& i : inputs_info) { - if (!i.hasNoValue) i.location.offset = offset; - offset += i.location.length; - } - } - - // Go through all outputs, initialize RequestArgument descriptors - for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { - if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = OUTPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - outputs_info[index] = arg; - outputSize += s; - }); - // Compute offset for outputs 1 and so on - { - size_t offset = 0; - for (auto& i : outputs_info) { - i.location.offset = offset; - offset += i.location.length; - } - } - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { - return {}; - } - - // map pool - sp inputMemory = mapMemory(pools[INPUT]); - if (inputMemory == nullptr) { - return {}; - } - char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - if (inputPtr == nullptr) { - return {}; - } - - // initialize pool - inputMemory->update(); - for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { - char* begin = (char*)p; - char* end = begin + s; - // TODO: handle more than one input - std::copy(begin, end, inputPtr + inputs_info[index].location.offset); - }); - inputMemory->commit(); - - requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); - } - - return requests; -} - -void ValidationTest::validateRequests(const sp& preparedModel, - const std::vector& requests) { - // validate each request - for (const Request& request : requests) { - removeInputTest(preparedModel, request); - removeOutputTest(preparedModel, request); - } +void ValidationTest::validateRequest(const sp& preparedModel, + const Request& request) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); } } // namespace functional diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp index 95b7ad3e09..626deac143 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp @@ -121,7 +121,7 @@ void NeuralnetworksHidlTest::TearDown() { ::testing::VtsHalHidlTargetTestBase::TearDown(); } -void ValidationTest::validateEverything(const Model& model, const std::vector& requests) { +void ValidationTest::validateEverything(const Model& model, const Request& request) { validateModel(model); // create IPreparedModel @@ -131,7 +131,7 @@ void ValidationTest::validateEverything(const Model& model, const std::vector& request); + void validateEverything(const Model& model, const Request& request); private: void validateModel(const Model& model); - void validateRequests(const sp& preparedModel, - const std::vector& requests); + void validateRequest(const sp& preparedModel, const Request& request); }; // Tag for the generated tests diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h index b270c20450..2955b6e35c 100644 --- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h +++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h @@ -26,8 +26,11 @@ namespace android { namespace hardware { namespace neuralnetworks { -void copy_back(::test_helper::MixedTyped* dst, const std::vector& ra, - char* src); +// Create HIDL Request from the TestModel struct. +V1_0::Request createRequest(const ::test_helper::TestModel& testModel); + +// After execution, copy out output results from the output memory pool. +std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request); // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, // so this is efficiently accomplished by moving the element to the end and diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp index 1b31008cbb..86002d21aa 100644 --- a/neuralnetworks/1.1/vts/functional/Android.bp +++ b/neuralnetworks/1.1/vts/functional/Android.bp @@ -34,13 +34,12 @@ cc_defaults { "android.hidl.memory@1.0", "libgmock", "libhidlmemory", + "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", "VtsHalNeuralNetworksV1_0_utils", ], header_libs: [ "libneuralnetworks_headers", - "libneuralnetworks_generated_test_harness_headers", - "libneuralnetworks_generated_tests", ], test_suites: ["general-tests"], } diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp index e7d59eca91..73eeb93a47 100644 --- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp @@ -24,6 +24,7 @@ #include #include +#include #include #include "1.0/Callbacks.h" @@ -37,8 +38,13 @@ namespace neuralnetworks { namespace V1_1 { namespace generated_tests { +using namespace test_helper; +using ::android::hardware::neuralnetworks::V1_0::DataLocation; using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; +using ::android::hardware::neuralnetworks::V1_0::Operand; +using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; +using ::android::hardware::neuralnetworks::V1_0::OperandType; using ::android::hardware::neuralnetworks::V1_0::Request; using ::android::hardware::neuralnetworks::V1_0::RequestArgument; using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; @@ -47,144 +53,112 @@ using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference; using ::android::hardware::neuralnetworks::V1_1::IDevice; using ::android::hardware::neuralnetworks::V1_1::Model; using ::android::hidl::memory::V1_0::IMemory; -using ::test_helper::compare; -using ::test_helper::filter; -using ::test_helper::for_all; -using ::test_helper::MixedTyped; -using ::test_helper::MixedTypedExample; -using ::test_helper::resize_accordingly; + +Model createModel(const TestModel& testModel) { + // Model operands. + hidl_vec operands(testModel.operands.size()); + size_t constCopySize = 0, constRefSize = 0; + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + + DataLocation loc = {}; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + loc = {.poolIndex = 0, + .offset = static_cast(constCopySize), + .length = static_cast(op.data.size())}; + constCopySize += op.data.alignedSize(); + } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + loc = {.poolIndex = 0, + .offset = static_cast(constRefSize), + .length = static_cast(op.data.size())}; + constRefSize += op.data.alignedSize(); + } + + operands[i] = {.type = static_cast(op.type), + .dimensions = op.dimensions, + .numberOfConsumers = op.numberOfConsumers, + .scale = op.scale, + .zeroPoint = op.zeroPoint, + .lifetime = static_cast(op.lifetime), + .location = loc}; + } + + // Model operations. + hidl_vec operations(testModel.operations.size()); + std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(), + [](const TestOperation& op) -> Operation { + return {.type = static_cast(op.type), + .inputs = op.inputs, + .outputs = op.outputs}; + }); + + // Constant copies. + hidl_vec operandValues(constCopySize); + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, operandValues.data() + operands[i].location.offset); + } + } + + // Shared memory. + hidl_vec pools; + if (constRefSize > 0) { + hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize)); + CHECK_NE(pools[0].size(), 0u); + + // load data + sp mappedMemory = mapMemory(pools[0]); + CHECK(mappedMemory.get() != nullptr); + uint8_t* mappedPtr = + reinterpret_cast(static_cast(mappedMemory->getPointer())); + CHECK(mappedPtr != nullptr); + + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, mappedPtr + operands[i].location.offset); + } + } + } + + return {.operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = testModel.inputIndexes, + .outputIndexes = testModel.outputIndexes, + .operandValues = std::move(operandValues), + .pools = std::move(pools), + .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; +} // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec -void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, - const std::vector& examples, - bool hasRelaxedFloat32Model, float fpAtol, float fpRtol) { - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; +void EvaluatePreparedModel(const sp& preparedModel, const TestModel& testModel) { + const Request request = createRequest(testModel); - int example_no = 1; - for (auto& example : examples) { - SCOPED_TRACE(example_no++); - const MixedTyped& inputs = example.operands.first; - const MixedTyped& golden = example.operands.second; + // Launch execution. + sp executionCallback = new ExecutionCallback(); + Return executionLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executionLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); - const bool hasFloat16Inputs = !inputs.float16Operands.empty(); - if (hasRelaxedFloat32Model || hasFloat16Inputs) { - // TODO: Adjust the error limit based on testing. - // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. - fpAtol = 5.0f * 0.0009765625f; - // Set the relative tolerance to be 5ULP of the corresponding FP precision. - fpRtol = 5.0f * 0.0009765625f; - } + // Retrieve execution status. + executionCallback->wait(); + ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus()); - std::vector inputs_info, outputs_info; - uint32_t inputSize = 0, outputSize = 0; - // This function only partially specifies the metadata (vector of RequestArguments). - // The contents are copied over below. - for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { - if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = INPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - RequestArgument arg_empty = { - .hasNoValue = true, - }; - inputs_info[index] = s ? arg : arg_empty; - inputSize += s; - }); - // Compute offset for inputs 1 and so on - { - size_t offset = 0; - for (auto& i : inputs_info) { - if (!i.hasNoValue) i.location.offset = offset; - offset += i.location.length; - } - } + // Retrieve execution results. + const std::vector outputs = getOutputBuffers(request); - MixedTyped test; // holding test results - - // Go through all outputs, initialize RequestArgument descriptors - resize_accordingly(golden, test); - for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) { - if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = OUTPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - outputs_info[index] = arg; - outputSize += s; - }); - // Compute offset for outputs 1 and so on - { - size_t offset = 0; - for (auto& i : outputs_info) { - i.location.offset = offset; - offset += i.location.length; - } - } - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - ASSERT_NE(0ull, pools[INPUT].size()); - ASSERT_NE(0ull, pools[OUTPUT].size()); - - // load data - sp inputMemory = mapMemory(pools[INPUT]); - sp outputMemory = mapMemory(pools[OUTPUT]); - ASSERT_NE(nullptr, inputMemory.get()); - ASSERT_NE(nullptr, outputMemory.get()); - char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - char* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - ASSERT_NE(nullptr, inputPtr); - ASSERT_NE(nullptr, outputPtr); - inputMemory->update(); - outputMemory->update(); - - // Go through all inputs, copy the values - for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { - char* begin = (char*)p; - char* end = begin + s; - // TODO: handle more than one input - std::copy(begin, end, inputPtr + inputs_info[index].location.offset); - }); - - inputMemory->commit(); - outputMemory->commit(); - - const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}; - - // launch execution - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executionLaunchStatus = - preparedModel->execute(request, executionCallback); - ASSERT_TRUE(executionLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); - - // retrieve execution status - executionCallback->wait(); - ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus()); - - // validate results - outputMemory->read(); - copy_back(&test, outputs_info, outputPtr); - outputMemory->commit(); - // Filter out don't cares - MixedTyped filtered_golden = filter(golden, is_ignored); - MixedTyped filtered_test = filter(test, is_ignored); - - // We want "close-enough" results for float - compare(filtered_golden, filtered_test, fpAtol, fpRtol); - } + // We want "close-enough" results. + checkResults(testModel, outputs); } -void Execute(const sp& device, std::function create_model, - std::function is_ignored, const std::vector& examples) { - Model model = create_model(); +void Execute(const sp& device, const TestModel& testModel) { + Model model = createModel(testModel); // see if service can handle model bool fullySupportsModel = false; @@ -199,7 +173,6 @@ void Execute(const sp& device, std::function create_model, // launch prepare model sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel_1_1( model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); @@ -223,8 +196,7 @@ void Execute(const sp& device, std::function create_model, EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); ASSERT_NE(nullptr, preparedModel.get()); - EvaluatePreparedModel(preparedModel, is_ignored, examples, - model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f); + EvaluatePreparedModel(preparedModel, testModel); } } // namespace generated_tests diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h index 64b88dd689..56fc8257bd 100644 --- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h @@ -18,9 +18,6 @@ #define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H #include -#include -#include -#include #include "TestHarness.h" namespace android { @@ -29,9 +26,9 @@ namespace neuralnetworks { namespace V1_1 { namespace generated_tests { -void Execute(const sp& device, std::function create_model, - std::function is_ignored, - const std::vector<::test_helper::MixedTypedExample>& examples); +Model createModel(const ::test_helper::TestModel& testModel); + +void Execute(const sp& device, const ::test_helper::TestModel& testModel); } // namespace generated_tests } // namespace V1_1 diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.h b/neuralnetworks/1.1/vts/functional/GeneratedTests.h index 80442bfece..a55213d2a6 100644 --- a/neuralnetworks/1.1/vts/functional/GeneratedTests.h +++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.h @@ -14,20 +14,11 @@ * limitations under the License. */ -#include -#include - +#include "1.0/Utils.h" #include "GeneratedTestHarness.h" -#include "MemoryUtils.h" #include "TestHarness.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_1::vts::functional { - -std::vector createRequests(const std::vector<::test_helper::MixedTypedExample>& examples); - -} // namespace android::hardware::neuralnetworks::V1_1::vts::functional - namespace android::hardware::neuralnetworks::V1_1::generated_tests { using namespace android::hardware::neuralnetworks::V1_1::vts::functional; diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp index c54972887e..757bee9711 100644 --- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp @@ -16,14 +16,8 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include -#include -#include - #include "1.0/Callbacks.h" #include "1.0/Utils.h" -#include "MemoryUtils.h" -#include "TestHarness.h" #include "VtsHalNeuralnetworks.h" namespace android { @@ -35,13 +29,8 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; using ::android::hardware::neuralnetworks::V1_0::Request; -using ::android::hardware::neuralnetworks::V1_0::RequestArgument; using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_1::IPreparedModel; -using ::android::hidl::memory::V1_0::IMemory; -using ::test_helper::for_all; -using ::test_helper::MixedTyped; -using ::test_helper::MixedTypedExample; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -87,103 +76,10 @@ static void removeOutputTest(const sp& preparedModel, const Requ ///////////////////////////// ENTRY POINT ////////////////////////////////// -std::vector createRequests(const std::vector& examples) { - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; - - std::vector requests; - - for (auto& example : examples) { - const MixedTyped& inputs = example.operands.first; - const MixedTyped& outputs = example.operands.second; - - std::vector inputs_info, outputs_info; - uint32_t inputSize = 0, outputSize = 0; - - // This function only partially specifies the metadata (vector of RequestArguments). - // The contents are copied over below. - for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { - if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = INPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - RequestArgument arg_empty = { - .hasNoValue = true, - }; - inputs_info[index] = s ? arg : arg_empty; - inputSize += s; - }); - // Compute offset for inputs 1 and so on - { - size_t offset = 0; - for (auto& i : inputs_info) { - if (!i.hasNoValue) i.location.offset = offset; - offset += i.location.length; - } - } - - // Go through all outputs, initialize RequestArgument descriptors - for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { - if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = OUTPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - outputs_info[index] = arg; - outputSize += s; - }); - // Compute offset for outputs 1 and so on - { - size_t offset = 0; - for (auto& i : outputs_info) { - i.location.offset = offset; - offset += i.location.length; - } - } - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { - return {}; - } - - // map pool - sp inputMemory = mapMemory(pools[INPUT]); - if (inputMemory == nullptr) { - return {}; - } - char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - if (inputPtr == nullptr) { - return {}; - } - - // initialize pool - inputMemory->update(); - for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { - char* begin = (char*)p; - char* end = begin + s; - // TODO: handle more than one input - std::copy(begin, end, inputPtr + inputs_info[index].location.offset); - }); - inputMemory->commit(); - - requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); - } - - return requests; -} - -void ValidationTest::validateRequests(const sp& preparedModel, - const std::vector& requests) { - // validate each request - for (const Request& request : requests) { - removeInputTest(preparedModel, request); - removeOutputTest(preparedModel, request); - } +void ValidationTest::validateRequest(const sp& preparedModel, + const Request& request) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); } } // namespace functional diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp index 12bdd3ffb6..b3b15fa8e5 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp @@ -122,7 +122,7 @@ void NeuralnetworksHidlTest::TearDown() { ::testing::VtsHalHidlTargetTestBase::TearDown(); } -void ValidationTest::validateEverything(const Model& model, const std::vector& requests) { +void ValidationTest::validateEverything(const Model& model, const Request& request) { validateModel(model); // create IPreparedModel @@ -132,7 +132,7 @@ void ValidationTest::validateEverything(const Model& model, const std::vector& request); + void validateEverything(const Model& model, const Request& request); private: void validateModel(const Model& model); - void validateRequests(const sp& preparedModel, - const std::vector& requests); + void validateRequest(const sp& preparedModel, const Request& request); }; // Tag for the generated tests diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp index 301ca5d065..e14430f45d 100644 --- a/neuralnetworks/1.2/vts/functional/Android.bp +++ b/neuralnetworks/1.2/vts/functional/Android.bp @@ -37,13 +37,12 @@ cc_defaults { "android.hidl.memory@1.0", "libgmock", "libhidlmemory", + "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", "VtsHalNeuralNetworksV1_0_utils", ], header_libs: [ "libneuralnetworks_headers", - "libneuralnetworks_generated_test_harness_headers", - "libneuralnetworks_generated_tests", ], test_suites: ["general-tests"], } @@ -75,8 +74,8 @@ cc_test { srcs: [ "BasicTests.cpp", ":VtsHalNeuralNetworksV1_2_all_generated_V1_2_tests", + ":VtsHalNeuralNetworksV1_2_mobilenets", "CompilationCachingTests.cpp", - ":VtsHalNeuralNetworksV1_2_mobilenets", // CompilationCachingTests depend on MobileNets. "ValidateBurst.cpp", ], } diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp index 590764635e..8747fb3bf5 100644 --- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp @@ -35,22 +35,14 @@ #include "Utils.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2 { +// Forward declaration of the mobilenet generated test models in +// frameworks/ml/nn/runtime/test/generated/. namespace generated_tests::mobilenet_224_gender_basic_fixed { -Model createTestModel(); +const ::test_helper::TestModel& get_test_model(); } // namespace generated_tests::mobilenet_224_gender_basic_fixed -} // namespace android::hardware::neuralnetworks::V1_2 - -namespace generated_tests::mobilenet_224_gender_basic_fixed { -std::vector& get_examples(); -} // namespace generated_tests::mobilenet_224_gender_basic_fixed - -namespace android::hardware::neuralnetworks::V1_2::generated_tests::mobilenet_quantized { -Model createTestModel(); -} // namespace android::hardware::neuralnetworks::V1_2::generated_tests::mobilenet_quantized namespace generated_tests::mobilenet_quantized { -std::vector& get_examples(); +const ::test_helper::TestModel& get_test_model(); } // namespace generated_tests::mobilenet_quantized namespace android { @@ -60,49 +52,23 @@ namespace V1_2 { namespace vts { namespace functional { +using namespace test_helper; using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference; using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; using ::android::hidl::memory::V1_0::IMemory; using ::android::nn::allocateSharedMemory; -using ::test_helper::MixedTypedExample; namespace float32_model { -constexpr auto createTestModel = ::android::hardware::neuralnetworks::V1_2::generated_tests:: - mobilenet_224_gender_basic_fixed::createTestModel; -constexpr auto get_examples = ::generated_tests::mobilenet_224_gender_basic_fixed::get_examples; - -// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h. -// This function assumes the operation is always ADD. -std::vector getLargeModelExamples(uint32_t len) { - float outputValue = 1.0f + static_cast(len); - return {{.operands = { - // Input - {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}}, - // Output - {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}}; -} +constexpr auto get_test_model = ::generated_tests::mobilenet_224_gender_basic_fixed::get_test_model; } // namespace float32_model namespace quant8_model { -constexpr auto createTestModel = ::android::hardware::neuralnetworks::V1_2::generated_tests:: - mobilenet_quantized::createTestModel; -constexpr auto get_examples = ::generated_tests::mobilenet_quantized::get_examples; - -// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h. -// This function assumes the operation is always ADD. -std::vector getLargeModelExamples(uint32_t len) { - uint8_t outputValue = 1 + static_cast(len); - return {{.operands = {// Input - {.operandDimensions = {{0, {1}}}, .quant8AsymmOperands = {{0, {1}}}}, - // Output - {.operandDimensions = {{0, {1}}}, - .quant8AsymmOperands = {{0, {outputValue}}}}}}}; -} +constexpr auto get_test_model = ::generated_tests::mobilenet_quantized::get_test_model; } // namespace quant8_model @@ -155,39 +121,34 @@ void createCacheHandles(const std::vector>& fileGroups, // [1] [1] [1] [1] // // This function assumes the operation is either ADD or MUL. -template -Model createLargeTestModelImpl(OperationType op, uint32_t len) { - EXPECT_TRUE(op == OperationType::ADD || op == OperationType::MUL); +template +TestModel createLargeTestModelImpl(TestOperationType op, uint32_t len) { + EXPECT_TRUE(op == TestOperationType::ADD || op == TestOperationType::MUL); // Model operations and operands. - std::vector operations(len); - std::vector operands(len * 2 + 2); - - // The constant buffer pool. This contains the activation scalar, followed by the - // per-operation constant operands. - std::vector operandValues(sizeof(int32_t) + len * sizeof(CppType)); + std::vector operations(len); + std::vector operands(len * 2 + 2); // The activation scalar, value = 0. operands[0] = { - .type = OperandType::INT32, + .type = TestOperandType::INT32, .dimensions = {}, .numberOfConsumers = len, .scale = 0.0f, .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)}, + .lifetime = TestOperandLifeTime::CONSTANT_COPY, + .data = TestBuffer::createFromVector({0}), }; - memset(operandValues.data(), 0, sizeof(int32_t)); // The buffer value of the constant second operand. The logical value is always 1.0f. CppType bufferValue; // The scale of the first and second operand. float scale1, scale2; - if (operandType == OperandType::TENSOR_FLOAT32) { + if (operandType == TestOperandType::TENSOR_FLOAT32) { bufferValue = 1.0f; scale1 = 0.0f; scale2 = 0.0f; - } else if (op == OperationType::ADD) { + } else if (op == TestOperationType::ADD) { bufferValue = 1; scale1 = 1.0f; scale2 = 1.0f; @@ -211,9 +172,9 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) { .numberOfConsumers = 1, .scale = scale1, .zeroPoint = 0, - .lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT - : OperandLifeTime::TEMPORARY_VARIABLE), - .location = {}, + .lifetime = (i == 0 ? TestOperandLifeTime::MODEL_INPUT + : TestOperandLifeTime::TEMPORARY_VARIABLE), + .data = (i == 0 ? TestBuffer::createFromVector({1}) : TestBuffer()), }; // The second operation input, value = 1. @@ -223,13 +184,9 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) { .numberOfConsumers = 1, .scale = scale2, .zeroPoint = 0, - .lifetime = OperandLifeTime::CONSTANT_COPY, - .location = {.poolIndex = 0, - .offset = static_cast(i * sizeof(CppType) + sizeof(int32_t)), - .length = sizeof(CppType)}, + .lifetime = TestOperandLifeTime::CONSTANT_COPY, + .data = TestBuffer::createFromVector({bufferValue}), }; - memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(CppType), &bufferValue, - sizeof(CppType)); // The operation. All operations share the same activation scalar. // The output operand is created as an input in the next iteration of the loop, in the case @@ -242,6 +199,10 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) { }; } + // For TestOperationType::ADD, output = 1 + 1 * len = len + 1 + // For TestOperationType::MUL, output = 1 * 1 ^ len = 1 + CppType outputResult = static_cast(op == TestOperationType::ADD ? len + 1u : 1u); + // The model output. operands.back() = { .type = operandType, @@ -249,21 +210,16 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) { .numberOfConsumers = 0, .scale = scale1, .zeroPoint = 0, - .lifetime = OperandLifeTime::MODEL_OUTPUT, - .location = {}, + .lifetime = TestOperandLifeTime::MODEL_OUTPUT, + .data = TestBuffer::createFromVector({outputResult}), }; - const std::vector inputIndexes = {1}; - const std::vector outputIndexes = {len * 2 + 1}; - const std::vector pools = {}; - return { - .operands = operands, - .operations = operations, - .inputIndexes = inputIndexes, - .outputIndexes = outputIndexes, - .operandValues = operandValues, - .pools = pools, + .operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = {1}, + .outputIndexes = {len * 2 + 1}, + .isRelaxed = false, }; } @@ -332,35 +288,21 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest { // Model and examples creators. According to kOperandType, the following methods will return // either float32 model/examples or the quant8 variant. - Model createTestModel() { + TestModel createTestModel() { if (kOperandType == OperandType::TENSOR_FLOAT32) { - return float32_model::createTestModel(); + return float32_model::get_test_model(); } else { - return quant8_model::createTestModel(); + return quant8_model::get_test_model(); } } - std::vector get_examples() { + TestModel createLargeTestModel(OperationType op, uint32_t len) { if (kOperandType == OperandType::TENSOR_FLOAT32) { - return float32_model::get_examples(); + return createLargeTestModelImpl( + static_cast(op), len); } else { - return quant8_model::get_examples(); - } - } - - Model createLargeTestModel(OperationType op, uint32_t len) { - if (kOperandType == OperandType::TENSOR_FLOAT32) { - return createLargeTestModelImpl(op, len); - } else { - return createLargeTestModelImpl(op, len); - } - } - - std::vector getLargeModelExamples(uint32_t len) { - if (kOperandType == OperandType::TENSOR_FLOAT32) { - return float32_model::getLargeModelExamples(len); - } else { - return quant8_model::getLargeModelExamples(len); + return createLargeTestModelImpl( + static_cast(op), len); } } @@ -482,8 +424,9 @@ class CompilationCachingTest : public CompilationCachingTestBase, TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; sp preparedModel = nullptr; // Save the compilation to cache. @@ -491,7 +434,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModel, modelCache, dataCache); + saveModelToCache(model, modelCache, dataCache); } // Retrieve preparedModel from cache. @@ -516,15 +459,15 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) { } // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); } TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; sp preparedModel = nullptr; // Save the compilation to cache. @@ -545,7 +488,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)), sizeof(dummyBytes)); } - saveModelToCache(testModel, modelCache, dataCache); + saveModelToCache(model, modelCache, dataCache); } // Retrieve preparedModel from cache. @@ -579,15 +522,15 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { } // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); } TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; // Test with number of model cache files greater than mNumModelCache. { @@ -598,12 +541,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mModelCache.pop_back(); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -625,12 +566,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mModelCache.push_back(tmp); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -651,12 +590,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mDataCache.pop_back(); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -678,12 +615,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mDataCache.push_back(tmp); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -698,15 +633,16 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) { TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; // Save the compilation to cache. { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModel, modelCache, dataCache); + saveModelToCache(model, modelCache, dataCache); } // Test with number of model cache files greater than mNumModelCache. @@ -778,8 +714,9 @@ TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) { TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; // Go through each handle in model cache, test with NumFd greater than 1. for (uint32_t i = 0; i < mNumModelCache; i++) { @@ -790,12 +727,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mModelCache[i].pop_back(); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -817,12 +752,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mModelCache[i].push_back(tmp); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -843,12 +776,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mDataCache[i].pop_back(); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -870,12 +801,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); mDataCache[i].push_back(tmp); sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -890,15 +819,16 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) { TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; // Save the compilation to cache. { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModel, modelCache, dataCache); + saveModelToCache(model, modelCache, dataCache); } // Go through each handle in model cache, test with NumFd greater than 1. @@ -970,8 +900,9 @@ TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) { TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); @@ -983,12 +914,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) { createCacheHandles(mDataCache, dataCacheMode, &dataCache); modelCacheMode[i] = AccessMode::READ_WRITE; sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -1008,12 +937,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) { createCacheHandles(mDataCache, dataCacheMode, &dataCache); dataCacheMode[i] = AccessMode::READ_WRITE; sp preparedModel = nullptr; - saveModelToCache(testModel, modelCache, dataCache, &preparedModel); + saveModelToCache(model, modelCache, dataCache, &preparedModel); ASSERT_NE(preparedModel, nullptr); // Execute and verify results. - generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, - get_examples(), - testModel.relaxComputationFloat32toFloat16, + generated_tests::EvaluatePreparedModel(preparedModel, testModel, /*testDynamicOutputShape=*/false); // Check if prepareModelFromCache fails. preparedModel = nullptr; @@ -1028,8 +955,9 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) { TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) { // Create test HIDL model and compile. - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); @@ -1038,7 +966,7 @@ TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModel, modelCache, dataCache); + saveModelToCache(model, modelCache, dataCache); } // Go through each handle in model cache, test with invalid access mode. @@ -1106,12 +1034,14 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) { if (!mIsCachingSupported) return; // Create test models and check if fully supported by the service. - const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); - if (checkEarlyTermination(testModelMul)) return; - const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); - if (checkEarlyTermination(testModelAdd)) return; + const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + const Model modelMul = generated_tests::createModel(testModelMul); + if (checkEarlyTermination(modelMul)) return; + const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + const Model modelAdd = generated_tests::createModel(testModelAdd); + if (checkEarlyTermination(modelAdd)) return; - // Save the testModelMul compilation to cache. + // Save the modelMul compilation to cache. auto modelCacheMul = mModelCache; for (auto& cache : modelCacheMul) { cache[0].append("_mul"); @@ -1120,15 +1050,15 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) { hidl_vec modelCache, dataCache; createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModelMul, modelCache, dataCache); + saveModelToCache(modelMul, modelCache, dataCache); } - // Use a different token for testModelAdd. + // Use a different token for modelAdd. mToken[0]++; // This test is probabilistic, so we run it multiple times. for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) { - // Save the testModelAdd compilation to cache. + // Save the modelAdd compilation to cache. { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); @@ -1136,7 +1066,7 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) { // Spawn a thread to copy the cache content concurrently while saving to cache. std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache)); - saveModelToCache(testModelAdd, modelCache, dataCache); + saveModelToCache(modelAdd, modelCache, dataCache); thread.join(); } @@ -1155,11 +1085,8 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) { ASSERT_EQ(preparedModel, nullptr); } else { ASSERT_NE(preparedModel, nullptr); - generated_tests::EvaluatePreparedModel( - preparedModel, [](int) { return false; }, - getLargeModelExamples(kLargeModelSize), - testModelAdd.relaxComputationFloat32toFloat16, - /*testDynamicOutputShape=*/false); + generated_tests::EvaluatePreparedModel(preparedModel, testModelAdd, + /*testDynamicOutputShape=*/false); } } } @@ -1169,12 +1096,14 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) { if (!mIsCachingSupported) return; // Create test models and check if fully supported by the service. - const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); - if (checkEarlyTermination(testModelMul)) return; - const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); - if (checkEarlyTermination(testModelAdd)) return; + const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + const Model modelMul = generated_tests::createModel(testModelMul); + if (checkEarlyTermination(modelMul)) return; + const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + const Model modelAdd = generated_tests::createModel(testModelAdd); + if (checkEarlyTermination(modelAdd)) return; - // Save the testModelMul compilation to cache. + // Save the modelMul compilation to cache. auto modelCacheMul = mModelCache; for (auto& cache : modelCacheMul) { cache[0].append("_mul"); @@ -1183,20 +1112,20 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) { hidl_vec modelCache, dataCache; createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModelMul, modelCache, dataCache); + saveModelToCache(modelMul, modelCache, dataCache); } - // Use a different token for testModelAdd. + // Use a different token for modelAdd. mToken[0]++; // This test is probabilistic, so we run it multiple times. for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) { - // Save the testModelAdd compilation to cache. + // Save the modelAdd compilation to cache. { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModelAdd, modelCache, dataCache); + saveModelToCache(modelAdd, modelCache, dataCache); } // Retrieve preparedModel from cache. @@ -1218,11 +1147,8 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) { ASSERT_EQ(preparedModel, nullptr); } else { ASSERT_NE(preparedModel, nullptr); - generated_tests::EvaluatePreparedModel( - preparedModel, [](int) { return false; }, - getLargeModelExamples(kLargeModelSize), - testModelAdd.relaxComputationFloat32toFloat16, - /*testDynamicOutputShape=*/false); + generated_tests::EvaluatePreparedModel(preparedModel, testModelAdd, + /*testDynamicOutputShape=*/false); } } } @@ -1232,12 +1158,14 @@ TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) { if (!mIsCachingSupported) return; // Create test models and check if fully supported by the service. - const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); - if (checkEarlyTermination(testModelMul)) return; - const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); - if (checkEarlyTermination(testModelAdd)) return; + const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + const Model modelMul = generated_tests::createModel(testModelMul); + if (checkEarlyTermination(modelMul)) return; + const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + const Model modelAdd = generated_tests::createModel(testModelAdd); + if (checkEarlyTermination(modelAdd)) return; - // Save the testModelMul compilation to cache. + // Save the modelMul compilation to cache. auto modelCacheMul = mModelCache; for (auto& cache : modelCacheMul) { cache[0].append("_mul"); @@ -1246,21 +1174,21 @@ TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) { hidl_vec modelCache, dataCache; createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModelMul, modelCache, dataCache); + saveModelToCache(modelMul, modelCache, dataCache); } - // Use a different token for testModelAdd. + // Use a different token for modelAdd. mToken[0]++; - // Save the testModelAdd compilation to cache. + // Save the modelAdd compilation to cache. { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModelAdd, modelCache, dataCache); + saveModelToCache(modelAdd, modelCache, dataCache); } - // Replace the model cache of testModelAdd with testModelMul. + // Replace the model cache of modelAdd with modelMul. copyCacheFiles(modelCacheMul, mModelCache); // Retrieve the preparedModel from cache, expect failure. @@ -1336,15 +1264,16 @@ class CompilationCachingSecurityTest // The modifier accepts one pointer argument "skip" as the returning value, indicating // whether the test should be skipped or not. void testCorruptedCache(ExpectedResult expected, std::function modifier) { - const Model testModel = createTestModel(); - if (checkEarlyTermination(testModel)) return; + const TestModel& testModel = createTestModel(); + const Model model = generated_tests::createModel(testModel); + if (checkEarlyTermination(model)) return; // Save the compilation to cache. { hidl_vec modelCache, dataCache; createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); - saveModelToCache(testModel, modelCache, dataCache); + saveModelToCache(model, modelCache, dataCache); } bool skip = false; diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp index 82cc73db95..1dcebbe39c 100644 --- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp @@ -31,7 +31,10 @@ #include #include +#include +#include #include +#include #include "1.0/Utils.h" #include "1.2/Callbacks.h" @@ -46,7 +49,10 @@ namespace neuralnetworks { namespace V1_2 { namespace generated_tests { +using namespace test_helper; +using ::android::hardware::neuralnetworks::V1_0::DataLocation; using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; +using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; using ::android::hardware::neuralnetworks::V1_0::Request; using ::android::hardware::neuralnetworks::V1_0::RequestArgument; using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference; @@ -60,29 +66,122 @@ using ::android::hardware::neuralnetworks::V1_2::Timing; using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; using ::android::hidl::memory::V1_0::IMemory; -using ::test_helper::compare; -using ::test_helper::expectMultinomialDistributionWithinTolerance; -using ::test_helper::filter; -using ::test_helper::for_all; -using ::test_helper::for_each; -using ::test_helper::MixedTyped; -using ::test_helper::MixedTypedExample; -using ::test_helper::resize_accordingly; using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; -static bool isZeroSized(const MixedTyped& example, uint32_t index) { - for (auto i : example.operandDimensions.at(index)) { - if (i == 0) return true; +enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; + +Model createModel(const TestModel& testModel) { + // Model operands. + hidl_vec operands(testModel.operands.size()); + size_t constCopySize = 0, constRefSize = 0; + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + + DataLocation loc = {}; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + loc = {.poolIndex = 0, + .offset = static_cast(constCopySize), + .length = static_cast(op.data.size())}; + constCopySize += op.data.alignedSize(); + } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + loc = {.poolIndex = 0, + .offset = static_cast(constRefSize), + .length = static_cast(op.data.size())}; + constRefSize += op.data.alignedSize(); + } + + Operand::ExtraParams extraParams; + if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + extraParams.channelQuant(SymmPerChannelQuantParams{ + .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim}); + } + + operands[i] = {.type = static_cast(op.type), + .dimensions = op.dimensions, + .numberOfConsumers = op.numberOfConsumers, + .scale = op.scale, + .zeroPoint = op.zeroPoint, + .lifetime = static_cast(op.lifetime), + .location = loc, + .extraParams = std::move(extraParams)}; } - return false; + + // Model operations. + hidl_vec operations(testModel.operations.size()); + std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(), + [](const TestOperation& op) -> Operation { + return {.type = static_cast(op.type), + .inputs = op.inputs, + .outputs = op.outputs}; + }); + + // Constant copies. + hidl_vec operandValues(constCopySize); + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, operandValues.data() + operands[i].location.offset); + } + } + + // Shared memory. + hidl_vec pools = {}; + if (constRefSize > 0) { + hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize)); + CHECK_NE(pools[0].size(), 0u); + + // load data + sp mappedMemory = mapMemory(pools[0]); + CHECK(mappedMemory.get() != nullptr); + uint8_t* mappedPtr = + reinterpret_cast(static_cast(mappedMemory->getPointer())); + CHECK(mappedPtr != nullptr); + + for (uint32_t i = 0; i < testModel.operands.size(); i++) { + const auto& op = testModel.operands[i]; + if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { + const uint8_t* begin = op.data.get(); + const uint8_t* end = begin + op.data.size(); + std::copy(begin, end, mappedPtr + operands[i].location.offset); + } + } + } + + return {.operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = testModel.inputIndexes, + .outputIndexes = testModel.outputIndexes, + .operandValues = std::move(operandValues), + .pools = std::move(pools), + .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; } -static Return ExecutePreparedModel(sp& preparedModel, +static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) { + const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size(); + return byteSize > 1u; +} + +static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) { + auto& length = request->outputs[outputIndex].location.length; + ASSERT_GT(length, 1u); + length -= 1u; +} + +static void makeOutputDimensionsUnspecified(Model* model) { + for (auto i : model->outputIndexes) { + auto& dims = model->operands[i].dimensions; + std::fill(dims.begin(), dims.end(), 0); + } +} + +static Return ExecutePreparedModel(const sp& preparedModel, const Request& request, MeasureTiming measure, sp& callback) { return preparedModel->execute_1_2(request, measure, callback); } -static Return ExecutePreparedModel(sp& preparedModel, +static Return ExecutePreparedModel(const sp& preparedModel, const Request& request, MeasureTiming measure, hidl_vec* outputShapes, Timing* timing) { @@ -105,294 +204,168 @@ static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst( return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true); } enum class Executor { ASYNC, SYNC, BURST }; -enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT }; -const float kDefaultAtol = 1e-5f; -const float kDefaultRtol = 1e-5f; -void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, - const std::vector& examples, - bool hasRelaxedFloat32Model, float fpAtol, float fpRtol, + +void EvaluatePreparedModel(const sp& preparedModel, const TestModel& testModel, Executor executor, MeasureTiming measure, OutputType outputType) { - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; + // If output0 does not have size larger than one byte, we can not test with insufficient buffer. + if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) { + return; + } - int example_no = 1; - for (auto& example : examples) { - SCOPED_TRACE(example_no++); - const MixedTyped& inputs = example.operands.first; - const MixedTyped& golden = example.operands.second; + Request request = createRequest(testModel); + if (outputType == OutputType::INSUFFICIENT) { + makeOutputInsufficientSize(/*outputIndex=*/0, &request); + } - const bool hasFloat16Inputs = !inputs.float16Operands.empty(); - if (hasRelaxedFloat32Model || hasFloat16Inputs) { - // TODO: Adjust the error limit based on testing. - // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. - fpAtol = 5.0f * 0.0009765625f; - // Set the relative tolerance to be 5ULP of the corresponding FP precision. - fpRtol = 5.0f * 0.0009765625f; + ErrorStatus executionStatus; + hidl_vec outputShapes; + Timing timing; + switch (executor) { + case Executor::ASYNC: { + SCOPED_TRACE("asynchronous"); + + // launch execution + sp executionCallback = new ExecutionCallback(); + Return executionLaunchStatus = + ExecutePreparedModel(preparedModel, request, measure, executionCallback); + ASSERT_TRUE(executionLaunchStatus.isOk()); + EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); + + // retrieve execution status + executionCallback->wait(); + executionStatus = executionCallback->getStatus(); + outputShapes = executionCallback->getOutputShapes(); + timing = executionCallback->getTiming(); + + break; } + case Executor::SYNC: { + SCOPED_TRACE("synchronous"); - std::vector inputs_info, outputs_info; - uint32_t inputSize = 0, outputSize = 0; - // This function only partially specifies the metadata (vector of RequestArguments). - // The contents are copied over below. - for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { - if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = INPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - RequestArgument arg_empty = { - .hasNoValue = true, - }; - inputs_info[index] = s ? arg : arg_empty; - inputSize += s; - }); - // Compute offset for inputs 1 and so on - { - size_t offset = 0; - for (auto& i : inputs_info) { - if (!i.hasNoValue) i.location.offset = offset; - offset += i.location.length; + // execute + Return executionReturnStatus = + ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing); + ASSERT_TRUE(executionReturnStatus.isOk()); + executionStatus = static_cast(executionReturnStatus); + + break; + } + case Executor::BURST: { + SCOPED_TRACE("burst"); + + // create burst + const std::shared_ptr<::android::nn::ExecutionBurstController> controller = + CreateBurst(preparedModel); + ASSERT_NE(nullptr, controller.get()); + + // create memory keys + std::vector keys(request.pools.size()); + for (size_t i = 0; i < keys.size(); ++i) { + keys[i] = reinterpret_cast(&request.pools[i]); } - } - MixedTyped test; // holding test results + // execute burst + std::tie(executionStatus, outputShapes, timing) = + controller->compute(request, measure, keys); - // Go through all outputs, initialize RequestArgument descriptors - resize_accordingly(golden, test); - bool sizeLargerThanOne = true; - for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne]( - int index, auto, auto s) { - if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); - if (index == 0) { - // On OutputType::INSUFFICIENT, set the output operand with index 0 with - // buffer size one byte less than needed. - if (outputType == OutputType::INSUFFICIENT) { - if (s > 1 && !isZeroSized(golden, index)) { - s -= 1; - } else { - sizeLargerThanOne = false; - } - } - } - RequestArgument arg = { - .location = {.poolIndex = OUTPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - outputs_info[index] = arg; - outputSize += s; - }); - // If output0 does not have size larger than one byte, - // we can not provide an insufficient buffer - if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return; - // Compute offset for outputs 1 and so on - { - size_t offset = 0; - for (auto& i : outputs_info) { - i.location.offset = offset; - offset += i.location.length; - } - } - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - ASSERT_NE(0ull, pools[INPUT].size()); - ASSERT_NE(0ull, pools[OUTPUT].size()); - - // load data - sp inputMemory = mapMemory(pools[INPUT]); - sp outputMemory = mapMemory(pools[OUTPUT]); - ASSERT_NE(nullptr, inputMemory.get()); - ASSERT_NE(nullptr, outputMemory.get()); - char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - char* outputPtr = reinterpret_cast(static_cast(outputMemory->getPointer())); - ASSERT_NE(nullptr, inputPtr); - ASSERT_NE(nullptr, outputPtr); - inputMemory->update(); - outputMemory->update(); - - // Go through all inputs, copy the values - for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { - char* begin = (char*)p; - char* end = begin + s; - // TODO: handle more than one input - std::copy(begin, end, inputPtr + inputs_info[index].location.offset); - }); - - inputMemory->commit(); - outputMemory->commit(); - - const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}; - - ErrorStatus executionStatus; - hidl_vec outputShapes; - Timing timing; - switch (executor) { - case Executor::ASYNC: { - SCOPED_TRACE("asynchronous"); - - // launch execution - sp executionCallback = new ExecutionCallback(); - ASSERT_NE(nullptr, executionCallback.get()); - Return executionLaunchStatus = - ExecutePreparedModel(preparedModel, request, measure, executionCallback); - ASSERT_TRUE(executionLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(executionLaunchStatus)); - - // retrieve execution status - executionCallback->wait(); - executionStatus = executionCallback->getStatus(); - outputShapes = executionCallback->getOutputShapes(); - timing = executionCallback->getTiming(); - - break; - } - case Executor::SYNC: { - SCOPED_TRACE("synchronous"); - - // execute - Return executionReturnStatus = ExecutePreparedModel( - preparedModel, request, measure, &outputShapes, &timing); - ASSERT_TRUE(executionReturnStatus.isOk()); - executionStatus = static_cast(executionReturnStatus); - - break; - } - case Executor::BURST: { - SCOPED_TRACE("burst"); - - // create burst - const std::shared_ptr<::android::nn::ExecutionBurstController> controller = - CreateBurst(preparedModel); - ASSERT_NE(nullptr, controller.get()); - - // create memory keys - std::vector keys(request.pools.size()); - for (size_t i = 0; i < keys.size(); ++i) { - keys[i] = reinterpret_cast(&request.pools[i]); - } - - // execute burst - std::tie(executionStatus, outputShapes, timing) = - controller->compute(request, measure, keys); - - break; - } - } - - if (outputType != OutputType::FULLY_SPECIFIED && - executionStatus == ErrorStatus::GENERAL_FAILURE) { - LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " - "execute model that it does not support."; - std::cout << "[ ] Early termination of test because vendor service cannot " - "execute model that it does not support." - << std::endl; - GTEST_SKIP(); - } - if (measure == MeasureTiming::NO) { - EXPECT_EQ(UINT64_MAX, timing.timeOnDevice); - EXPECT_EQ(UINT64_MAX, timing.timeInDriver); - } else { - if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) { - EXPECT_LE(timing.timeOnDevice, timing.timeInDriver); - } - } - - switch (outputType) { - case OutputType::FULLY_SPECIFIED: - // If the model output operands are fully specified, outputShapes must be either - // either empty, or have the same number of elements as the number of outputs. - ASSERT_EQ(ErrorStatus::NONE, executionStatus); - ASSERT_TRUE(outputShapes.size() == 0 || - outputShapes.size() == test.operandDimensions.size()); - break; - case OutputType::UNSPECIFIED: - // If the model output operands are not fully specified, outputShapes must have - // the same number of elements as the number of outputs. - ASSERT_EQ(ErrorStatus::NONE, executionStatus); - ASSERT_EQ(outputShapes.size(), test.operandDimensions.size()); - break; - case OutputType::INSUFFICIENT: - ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); - ASSERT_EQ(outputShapes.size(), test.operandDimensions.size()); - ASSERT_FALSE(outputShapes[0].isSufficient); - return; - } - // Go through all outputs, overwrite output dimensions with returned output shapes - if (outputShapes.size() > 0) { - for_each(test.operandDimensions, - [&outputShapes](int idx, std::vector& dim) { - dim = outputShapes[idx].dimensions; - }); - } - - // validate results - outputMemory->read(); - copy_back(&test, outputs_info, outputPtr); - outputMemory->commit(); - // Filter out don't cares - MixedTyped filtered_golden = filter(golden, is_ignored); - MixedTyped filtered_test = filter(test, is_ignored); - - // We want "close-enough" results for float - compare(filtered_golden, filtered_test, fpAtol, fpRtol); - - if (example.expectedMultinomialDistributionTolerance > 0) { - expectMultinomialDistributionWithinTolerance(test, example); + break; } } -} -void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, - const std::vector& examples, - bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure, - OutputType outputType) { - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol, - kDefaultRtol, executor, measure, outputType); + + if (outputType != OutputType::FULLY_SPECIFIED && + executionStatus == ErrorStatus::GENERAL_FAILURE) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "execute model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "execute model that it does not support." + << std::endl; + GTEST_SKIP(); + } + if (measure == MeasureTiming::NO) { + EXPECT_EQ(UINT64_MAX, timing.timeOnDevice); + EXPECT_EQ(UINT64_MAX, timing.timeInDriver); + } else { + if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) { + EXPECT_LE(timing.timeOnDevice, timing.timeInDriver); + } + } + + switch (outputType) { + case OutputType::FULLY_SPECIFIED: + // If the model output operands are fully specified, outputShapes must be either + // either empty, or have the same number of elements as the number of outputs. + ASSERT_EQ(ErrorStatus::NONE, executionStatus); + ASSERT_TRUE(outputShapes.size() == 0 || + outputShapes.size() == testModel.outputIndexes.size()); + break; + case OutputType::UNSPECIFIED: + // If the model output operands are not fully specified, outputShapes must have + // the same number of elements as the number of outputs. + ASSERT_EQ(ErrorStatus::NONE, executionStatus); + ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size()); + break; + case OutputType::INSUFFICIENT: + ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); + ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size()); + ASSERT_FALSE(outputShapes[0].isSufficient); + return; + } + + // Go through all outputs, check returned output shapes. + for (uint32_t i = 0; i < outputShapes.size(); i++) { + EXPECT_TRUE(outputShapes[i].isSufficient); + const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions; + const std::vector actual = outputShapes[i].dimensions; + EXPECT_EQ(expect, actual); + } + + // Retrieve execution results. + const std::vector outputs = getOutputBuffers(request); + + // We want "close-enough" results. + checkResults(testModel, outputs); } -void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, - const std::vector& examples, - bool hasRelaxedFloat32Model, bool testDynamicOutputShape) { +void EvaluatePreparedModel(const sp& preparedModel, const TestModel& testModel, + bool testDynamicOutputShape) { if (testDynamicOutputShape) { - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, + OutputType::UNSPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, + OutputType::INSUFFICIENT); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, + OutputType::INSUFFICIENT); } else { - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED); - EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, - Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES, + OutputType::FULLY_SPECIFIED); + EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES, + OutputType::FULLY_SPECIFIED); } } @@ -411,7 +384,6 @@ void PrepareModel(const sp& device, const Model& model, // launch prepare model sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel_1_2( model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); @@ -438,17 +410,18 @@ void PrepareModel(const sp& device, const Model& model, ASSERT_NE(nullptr, preparedModel->get()); } -void Execute(const sp& device, std::function create_model, - std::function is_ignored, const std::vector& examples, - bool testDynamicOutputShape) { - Model model = create_model(); +void Execute(const sp& device, const TestModel& testModel, bool testDynamicOutputShape) { + Model model = createModel(testModel); + if (testDynamicOutputShape) { + makeOutputDimensionsUnspecified(&model); + } + sp preparedModel = nullptr; PrepareModel(device, model, &preparedModel); if (preparedModel == nullptr) { GTEST_SKIP(); } - EvaluatePreparedModel(preparedModel, is_ignored, examples, - model.relaxComputationFloat32toFloat16, testDynamicOutputShape); + EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape); } } // namespace generated_tests diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h index 0ecbe7e76a..de45242ac7 100644 --- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h @@ -30,18 +30,15 @@ namespace neuralnetworks { namespace V1_2 { namespace generated_tests { -using ::test_helper::MixedTypedExample; +Model createModel(const ::test_helper::TestModel& testModel); void PrepareModel(const sp& device, const V1_2::Model& model, sp* preparedModel); -void EvaluatePreparedModel(sp& preparedModel, - std::function is_ignored, - const std::vector& examples, - bool hasRelaxedFloat32Model, bool testDynamicOutputShape); +void EvaluatePreparedModel(const sp& preparedModel, + const ::test_helper::TestModel& testModel, bool testDynamicOutputShape); -void Execute(const sp& device, std::function create_model, - std::function is_ignored, const std::vector& examples, +void Execute(const sp& device, const ::test_helper::TestModel& testModel, bool testDynamicOutputShape = false); } // namespace generated_tests diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.h b/neuralnetworks/1.2/vts/functional/GeneratedTests.h index 98420364b2..a72360941e 100644 --- a/neuralnetworks/1.2/vts/functional/GeneratedTests.h +++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.h @@ -14,21 +14,11 @@ * limitations under the License. */ -#include -#include - +#include "1.0/Utils.h" #include "GeneratedTestHarness.h" -#include "MemoryUtils.h" #include "TestHarness.h" -#include "Utils.h" #include "VtsHalNeuralnetworks.h" -namespace android::hardware::neuralnetworks::V1_2::vts::functional { - -std::vector createRequests(const std::vector<::test_helper::MixedTypedExample>& examples); - -} // namespace android::hardware::neuralnetworks::V1_2::vts::functional - namespace android::hardware::neuralnetworks::V1_2::generated_tests { using namespace ::android::hardware::neuralnetworks::V1_2::vts::functional; diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp index 06103bc570..816f861129 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp @@ -238,7 +238,7 @@ static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* ///////////////////////// BURST VALIATION TESTS //////////////////////////////////// static void validateBurstSerialization(const sp& preparedModel, - const std::vector& requests) { + const Request& request) { // create burst std::unique_ptr sender; std::unique_ptr receiver; @@ -249,35 +249,32 @@ static void validateBurstSerialization(const sp& preparedModel, ASSERT_NE(nullptr, receiver.get()); ASSERT_NE(nullptr, context.get()); - // validate each request - for (const Request& request : requests) { - // load memory into callback slots - std::vector keys; - keys.reserve(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys), - [](const auto& pool) { return reinterpret_cast(&pool); }); - const std::vector slots = callback->getSlots(request.pools, keys); + // load memory into callback slots + std::vector keys; + keys.reserve(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys), + [](const auto& pool) { return reinterpret_cast(&pool); }); + const std::vector slots = callback->getSlots(request.pools, keys); - // ensure slot std::numeric_limits::max() doesn't exist (for - // subsequent slot validation testing) - ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) { - return slot != std::numeric_limits::max(); - })); + // ensure slot std::numeric_limits::max() doesn't exist (for + // subsequent slot validation testing) + ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) { + return slot != std::numeric_limits::max(); + })); - // serialize the request - const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots); + // serialize the request + const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots); - // validations - removeDatumTest(sender.get(), receiver.get(), serialized); - addDatumTest(sender.get(), receiver.get(), serialized); - mutateDatumTest(sender.get(), receiver.get(), serialized); - } + // validations + removeDatumTest(sender.get(), receiver.get(), serialized); + addDatumTest(sender.get(), receiver.get(), serialized); + mutateDatumTest(sender.get(), receiver.get(), serialized); } // This test validates that when the Result message size exceeds length of the // result FMQ, the service instance gracefully fails and returns an error. static void validateBurstFmqLength(const sp& preparedModel, - const std::vector& requests) { + const Request& request) { // create regular burst std::shared_ptr controllerRegular; ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength( @@ -290,43 +287,40 @@ static void validateBurstFmqLength(const sp& preparedModel, preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall)); ASSERT_NE(nullptr, controllerSmall.get()); - // validate each request - for (const Request& request : requests) { - // load memory into callback slots - std::vector keys(request.pools.size()); - for (size_t i = 0; i < keys.size(); ++i) { - keys[i] = reinterpret_cast(&request.pools[i]); - } - - // collect serialized result by running regular burst - const auto [statusRegular, outputShapesRegular, timingRegular] = - controllerRegular->compute(request, MeasureTiming::NO, keys); - - // skip test if regular burst output isn't useful for testing a failure - // caused by having too small of a length for the result FMQ - const std::vector serialized = - ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular); - if (statusRegular != ErrorStatus::NONE || - serialized.size() <= kExecutionBurstChannelSmallLength) { - continue; - } - - // by this point, execution should fail because the result channel isn't - // large enough to return the serialized result - const auto [statusSmall, outputShapesSmall, timingSmall] = - controllerSmall->compute(request, MeasureTiming::NO, keys); - EXPECT_NE(ErrorStatus::NONE, statusSmall); - EXPECT_EQ(0u, outputShapesSmall.size()); - EXPECT_TRUE(badTiming(timingSmall)); + // load memory into callback slots + std::vector keys(request.pools.size()); + for (size_t i = 0; i < keys.size(); ++i) { + keys[i] = reinterpret_cast(&request.pools[i]); } + + // collect serialized result by running regular burst + const auto [statusRegular, outputShapesRegular, timingRegular] = + controllerRegular->compute(request, MeasureTiming::NO, keys); + + // skip test if regular burst output isn't useful for testing a failure + // caused by having too small of a length for the result FMQ + const std::vector serialized = + ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular); + if (statusRegular != ErrorStatus::NONE || + serialized.size() <= kExecutionBurstChannelSmallLength) { + return; + } + + // by this point, execution should fail because the result channel isn't + // large enough to return the serialized result + const auto [statusSmall, outputShapesSmall, timingSmall] = + controllerSmall->compute(request, MeasureTiming::NO, keys); + EXPECT_NE(ErrorStatus::NONE, statusSmall); + EXPECT_EQ(0u, outputShapesSmall.size()); + EXPECT_TRUE(badTiming(timingSmall)); } ///////////////////////////// ENTRY POINT ////////////////////////////////// void ValidationTest::validateBurst(const sp& preparedModel, - const std::vector& requests) { - ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, requests)); - ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, requests)); + const Request& request) { + ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, request)); + ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request)); } } // namespace functional diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index cf5905f688..13d45e4a1a 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -16,14 +16,9 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include -#include -#include - #include "1.0/Utils.h" #include "1.2/Callbacks.h" #include "ExecutionBurstController.h" -#include "MemoryUtils.h" #include "TestHarness.h" #include "Utils.h" #include "VtsHalNeuralnetworks.h" @@ -35,12 +30,7 @@ namespace V1_2 { namespace vts { namespace functional { -using ::android::hardware::neuralnetworks::V1_0::RequestArgument; using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; -using ::android::hidl::memory::V1_0::IMemory; -using test_helper::for_all; -using test_helper::MixedTyped; -using test_helper::MixedTypedExample; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -161,119 +151,23 @@ static void removeOutputTest(const sp& preparedModel, const Requ ///////////////////////////// ENTRY POINT ////////////////////////////////// -std::vector createRequests(const std::vector& examples) { - const uint32_t INPUT = 0; - const uint32_t OUTPUT = 1; - - std::vector requests; - - for (auto& example : examples) { - const MixedTyped& inputs = example.operands.first; - const MixedTyped& outputs = example.operands.second; - - std::vector inputs_info, outputs_info; - uint32_t inputSize = 0, outputSize = 0; - - // This function only partially specifies the metadata (vector of RequestArguments). - // The contents are copied over below. - for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { - if (inputs_info.size() <= static_cast(index)) inputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = INPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - RequestArgument arg_empty = { - .hasNoValue = true, - }; - inputs_info[index] = s ? arg : arg_empty; - inputSize += s; - }); - // Compute offset for inputs 1 and so on - { - size_t offset = 0; - for (auto& i : inputs_info) { - if (!i.hasNoValue) i.location.offset = offset; - offset += i.location.length; - } - } - - // Go through all outputs, initialize RequestArgument descriptors - for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { - if (outputs_info.size() <= static_cast(index)) outputs_info.resize(index + 1); - RequestArgument arg = { - .location = {.poolIndex = OUTPUT, - .offset = 0, - .length = static_cast(s)}, - .dimensions = {}, - }; - outputs_info[index] = arg; - outputSize += s; - }); - // Compute offset for outputs 1 and so on - { - size_t offset = 0; - for (auto& i : outputs_info) { - i.location.offset = offset; - offset += i.location.length; - } - } - std::vector pools = {nn::allocateSharedMemory(inputSize), - nn::allocateSharedMemory(outputSize)}; - if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { - return {}; - } - - // map pool - sp inputMemory = mapMemory(pools[INPUT]); - if (inputMemory == nullptr) { - return {}; - } - char* inputPtr = reinterpret_cast(static_cast(inputMemory->getPointer())); - if (inputPtr == nullptr) { - return {}; - } - - // initialize pool - inputMemory->update(); - for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { - char* begin = (char*)p; - char* end = begin + s; - // TODO: handle more than one input - std::copy(begin, end, inputPtr + inputs_info[index].location.offset); - }); - inputMemory->commit(); - - requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); - } - - return requests; -} - -void ValidationTest::validateRequests(const sp& preparedModel, - const std::vector& requests) { - // validate each request - for (const Request& request : requests) { - removeInputTest(preparedModel, request); - removeOutputTest(preparedModel, request); - } +void ValidationTest::validateRequest(const sp& preparedModel, + const Request& request) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); } void ValidationTest::validateRequestFailure(const sp& preparedModel, - const std::vector& requests) { - for (const Request& request : requests) { - SCOPED_TRACE("Expecting request to fail [executeSynchronously]"); - Return executeStatus = preparedModel->executeSynchronously( - request, MeasureTiming::NO, - [](ErrorStatus error, const hidl_vec& outputShapes, - const Timing& timing) { - ASSERT_NE(ErrorStatus::NONE, error); - EXPECT_EQ(outputShapes.size(), 0); - EXPECT_TRUE(badTiming(timing)); - }); - ASSERT_TRUE(executeStatus.isOk()); - } + const Request& request) { + SCOPED_TRACE("Expecting request to fail [executeSynchronously]"); + Return executeStatus = preparedModel->executeSynchronously( + request, MeasureTiming::NO, + [](ErrorStatus error, const hidl_vec& outputShapes, const Timing& timing) { + ASSERT_NE(ErrorStatus::NONE, error); + EXPECT_EQ(outputShapes.size(), 0); + EXPECT_TRUE(badTiming(timing)); + }); + ASSERT_TRUE(executeStatus.isOk()); } } // namespace functional diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp index bd24edc249..eb52110548 100644 --- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp @@ -126,7 +126,7 @@ void NeuralnetworksHidlTest::TearDown() { ::testing::VtsHalHidlTargetTestBase::TearDown(); } -void ValidationTest::validateEverything(const Model& model, const std::vector& requests) { +void ValidationTest::validateEverything(const Model& model, const Request& request) { validateModel(model); // create IPreparedModel @@ -136,11 +136,11 @@ void ValidationTest::validateEverything(const Model& model, const std::vector& requests) { +void ValidationTest::validateFailure(const Model& model, const Request& request) { // TODO: Should this always succeed? // What if the invalid input is part of the model (i.e., a parameter). validateModel(model); @@ -151,7 +151,7 @@ void ValidationTest::validateFailure(const Model& model, const std::vector getPreparedModel_1_2( diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h index 90dfe25312..e76ad7bc64 100644 --- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h +++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h @@ -68,20 +68,16 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { sp device; }; -// Tag for the validation tests class ValidationTest : public NeuralnetworksHidlTest { protected: - void validateEverything(const Model& model, const std::vector& requests); - void validateFailure(const Model& model, const std::vector& requests); + void validateEverything(const Model& model, const Request& request); + void validateFailure(const Model& model, const Request& request); private: void validateModel(const Model& model); - void validateRequests(const sp& preparedModel, - const std::vector& requests); - void validateRequestFailure(const sp& preparedModel, - const std::vector& requests); - void validateBurst(const sp& preparedModel, - const std::vector& requests); + void validateRequest(const sp& preparedModel, const Request& request); + void validateRequestFailure(const sp& preparedModel, const Request& request); + void validateBurst(const sp& preparedModel, const Request& request); }; // Tag for the generated tests