From 616701d3cdb4108a0482cf4539ce7eaff270fc2e Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Tue, 7 Jan 2020 14:52:44 -0800 Subject: [PATCH] Create VTS tests for QoS in NNAPI Bug: 136739795 Bug: 142902514 Bug: 145300530 Test: mma Test: VtsHalNeuralnetworksV1_3TargetTest Change-Id: If3ab91cfb3158e4c33e809ff3b149dff47cda76f --- neuralnetworks/1.3/vts/functional/Android.bp | 1 + .../vts/functional/GeneratedTestHarness.cpp | 10 +- .../1.3/vts/functional/GeneratedTestHarness.h | 1 + .../vts/functional/QualityOfServiceTests.cpp | 299 ++++++++++++++++++ .../1.3/vts/functional/ValidateModel.cpp | 32 +- .../1.3/vts/functional/ValidateRequest.cpp | 28 +- .../vts/functional/VtsHalNeuralnetworks.cpp | 25 +- 7 files changed, 375 insertions(+), 21 deletions(-) create mode 100644 neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp index e7a9fd34c3..ce2d3a917a 100644 --- a/neuralnetworks/1.3/vts/functional/Android.bp +++ b/neuralnetworks/1.3/vts/functional/Android.bp @@ -40,6 +40,7 @@ cc_test { "BasicTests.cpp", "CompilationCachingTests.cpp", "GeneratedTestHarness.cpp", + "QualityOfServiceTests.cpp", "TestAssertions.cpp", "ValidateBurst.cpp", "ValidateModel.cpp", diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index 82e63ac546..a2c0c4efa0 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -45,6 +45,7 @@ #include "1.0/Utils.h" #include "1.3/Callbacks.h" +#include "1.3/Utils.h" #include "ExecutionBurstController.h" #include "MemoryUtils.h" #include "TestHarness.h" @@ -714,7 +715,8 @@ void Execute(const sp& device, const TestModel& testModel, TestKind tes } break; case TestKind::QUANTIZATION_COUPLING: { ASSERT_TRUE(testModel.hasQuant8CoupledOperands()); - createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false); + createPreparedModel(device, model, &preparedModel, + /*reportSkipping*/ false); TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel); sp preparedCoupledModel; createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel, @@ -743,6 +745,12 @@ void Execute(const sp& device, const TestModel& testModel, TestKind tes void GeneratedTestBase::SetUp() { testing::TestWithParam::SetUp(); ASSERT_NE(kDevice, nullptr); + + const Return ret = + kDevice->supportsDeadlines([this](bool prepareModelDeadline, bool executionDeadline) { + mSupportsDeadlines = {prepareModelDeadline, executionDeadline}; + }); + ASSERT_TRUE(ret.isOk()); } std::vector getNamedModels(const FilterFn& filter) { diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h index 2273e3bfe4..fe695b471d 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h @@ -36,6 +36,7 @@ class GeneratedTestBase : public testing::TestWithParam { void SetUp() override; const sp kDevice = getData(std::get(GetParam())); const test_helper::TestModel& kTestModel = *getData(std::get(GetParam())); + std::pair mSupportsDeadlines; }; using FilterFn = std::function; diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp new file mode 100644 index 0000000000..62ffcda036 --- /dev/null +++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "1.0/Utils.h" +#include "1.3/Callbacks.h" +#include "1.3/Utils.h" +#include "GeneratedTestHarness.h" +#include "Utils.h" + +namespace android::hardware::neuralnetworks::V1_3::vts::functional { + +using implementation::ExecutionCallback; +using implementation::PreparedModelCallback; +using test_helper::TestBuffer; +using test_helper::TestModel; +using V1_1::ExecutionPreference; +using V1_2::MeasureTiming; +using V1_2::OutputShape; +using V1_2::Timing; + +using HidlToken = + hidl_array(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; + +enum class DeadlineBoundType { NOW, UNLIMITED }; +constexpr std::array deadlineBounds = {DeadlineBoundType::NOW, + DeadlineBoundType::UNLIMITED}; +std::string toString(DeadlineBoundType type) { + switch (type) { + case DeadlineBoundType::NOW: + return "NOW"; + case DeadlineBoundType::UNLIMITED: + return "UNLIMITED"; + } + LOG(FATAL) << "Unrecognized DeadlineBoundType: " << static_cast(type); + return {}; +} + +using Results = std::tuple, Timing>; +using MaybeResults = std::optional; + +using ExecutionFunction = + std::function& preparedModel, const Request& request, + DeadlineBoundType deadlineBound)>; + +static OptionalTimePoint makeOptionalTimePoint(DeadlineBoundType deadlineBoundType) { + OptionalTimePoint deadline; + switch (deadlineBoundType) { + case DeadlineBoundType::NOW: { + const auto currentTime = std::chrono::steady_clock::now(); + const auto currentTimeInNanoseconds = + std::chrono::time_point_cast(currentTime); + const uint64_t nanosecondsSinceEpoch = + currentTimeInNanoseconds.time_since_epoch().count(); + deadline.nanoseconds(nanosecondsSinceEpoch); + } break; + case DeadlineBoundType::UNLIMITED: { + uint64_t unlimited = std::numeric_limits::max(); + deadline.nanoseconds(unlimited); + } break; + } + return deadline; +} + +void runPrepareModelTest(const sp& device, const Model& model, Priority priority, + std::optional deadlineBound) { + OptionalTimePoint deadline; + if (deadlineBound.has_value()) { + deadline = makeOptionalTimePoint(deadlineBound.value()); + } + + // see if service can handle model + bool fullySupportsModel = false; + const Return supportedCall = device->getSupportedOperations_1_3( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = std::all_of(supported.begin(), supported.end(), + [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + + // launch prepare model + const sp preparedModelCallback = new PreparedModelCallback(); + const Return prepareLaunchStatus = device->prepareModel_1_3( + model, ExecutionPreference::FAST_SINGLE_ANSWER, priority, deadline, + hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + const sp preparedModelV1_0 = preparedModelCallback->getPreparedModel(); + const sp preparedModel = + IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr); + + // The getSupportedOperations_1_3 call returns a list of operations that are + // guaranteed not to fail if prepareModel_1_3 is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel.get()); + return; + } + + // verify return status + if (!deadlineBound.has_value()) { + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); + } else { + switch (deadlineBound.value()) { + case DeadlineBoundType::NOW: + // If the execution was launched with a deadline of NOW, the + // deadline has already passed when the driver would launch the + // execution. In this case, the driver must return + // MISSED_DEADLINE_*. + EXPECT_TRUE(prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT || + prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT); + break; + case DeadlineBoundType::UNLIMITED: + // If an unlimited deadline is supplied, we expect the execution to + // proceed normally. In this case, check it normally by breaking out + // of the switch statement. + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); + break; + } + } + ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr); +} + +void runPrepareModelTests(const sp& device, const Model& model, + bool supportsPrepareModelDeadline) { + // test priority + for (auto priority : hidl_enum_range{}) { + SCOPED_TRACE("priority: " + toString(priority)); + if (priority == kDefaultPriority) continue; + runPrepareModelTest(device, model, priority, {}); + } + + // test deadline + if (supportsPrepareModelDeadline) { + for (auto deadlineBound : deadlineBounds) { + SCOPED_TRACE("deadlineBound: " + toString(deadlineBound)); + runPrepareModelTest(device, model, kDefaultPriority, deadlineBound); + } + } +} + +static MaybeResults executeAsynchronously(const sp& preparedModel, + const Request& request, DeadlineBoundType deadlineBound) { + SCOPED_TRACE("asynchronous"); + const MeasureTiming measure = MeasureTiming::NO; + const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound); + + // launch execution + const sp callback = new ExecutionCallback(); + Return ret = preparedModel->execute_1_3(request, measure, deadline, callback); + EXPECT_TRUE(ret.isOk()); + EXPECT_EQ(ErrorStatus::NONE, ret.withDefault(ErrorStatus::GENERAL_FAILURE)); + if (!ret.isOk() || ret != ErrorStatus::NONE) return std::nullopt; + + // retrieve execution results + callback->wait(); + const ErrorStatus status = callback->getStatus(); + hidl_vec outputShapes = callback->getOutputShapes(); + const Timing timing = callback->getTiming(); + + // return results + return Results{status, std::move(outputShapes), timing}; +} + +static MaybeResults executeSynchronously(const sp& preparedModel, + const Request& request, DeadlineBoundType deadlineBound) { + SCOPED_TRACE("synchronous"); + const MeasureTiming measure = MeasureTiming::NO; + const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound); + + // configure results callback + MaybeResults results; + const auto cb = [&results](const auto&... args) { *results = {args...}; }; + + // run execution + const Return ret = + preparedModel->executeSynchronously_1_3(request, measure, deadline, cb); + EXPECT_TRUE(ret.isOk()); + if (!ret.isOk()) return std::nullopt; + + // return results + return results; +} + +void runExecutionTest(const sp& preparedModel, const TestModel& testModel, + const Request& request, bool synchronous, DeadlineBoundType deadlineBound) { + const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously; + + // Perform execution and unpack results. + const auto results = execute(preparedModel, request, deadlineBound); + if (!results.has_value()) return; + const auto& [status, outputShapes, timing] = results.value(); + + // Verify no timing information was returned + EXPECT_EQ(UINT64_MAX, timing.timeOnDevice); + EXPECT_EQ(UINT64_MAX, timing.timeInDriver); + + // Validate deadline information if applicable. + switch (deadlineBound) { + case DeadlineBoundType::NOW: + // If the execution was launched with a deadline of NOW, the + // deadline has already passed when the driver would launch the + // execution. In this case, the driver must return + // MISSED_DEADLINE_*. + ASSERT_TRUE(status == ErrorStatus::MISSED_DEADLINE_TRANSIENT || + status == ErrorStatus::MISSED_DEADLINE_PERSISTENT); + return; + case DeadlineBoundType::UNLIMITED: + // If an unlimited deadline is supplied, we expect the execution to + // proceed normally. In this case, check it normally by breaking out + // of the switch statement. + ASSERT_EQ(ErrorStatus::NONE, status); + break; + } + + // If the model output operands are fully specified, outputShapes must be either + // either empty, or have the same number of elements as the number of outputs. + ASSERT_TRUE(outputShapes.size() == 0 || outputShapes.size() == testModel.outputIndexes.size()); + + // Go through all outputs, check returned output shapes. + for (uint32_t i = 0; i < outputShapes.size(); i++) { + EXPECT_TRUE(outputShapes[i].isSufficient); + const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions; + const std::vector actual = outputShapes[i].dimensions; + EXPECT_EQ(expect, actual); + } + + // Retrieve execution results. + ASSERT_TRUE(nn::compliantWithV1_0(request)); + const V1_0::Request request10 = nn::convertToV1_0(request); + const std::vector outputs = getOutputBuffers(request10); + + // We want "close-enough" results. + checkResults(testModel, outputs); +} + +void runExecutionTests(const sp& preparedModel, const TestModel& testModel, + const Request& request) { + for (bool synchronous : {false, true}) { + for (auto deadlineBound : deadlineBounds) { + runExecutionTest(preparedModel, testModel, request, synchronous, deadlineBound); + } + } +} + +void runTests(const sp& device, const TestModel& testModel, + std::pair supportsDeadlines) { + // setup + const auto [supportsPrepareModelDeadline, supportsExecutionDeadline] = supportsDeadlines; + if (!supportsPrepareModelDeadline && !supportsExecutionDeadline) return; + const Model model = createModel(testModel); + + // run prepare model tests + runPrepareModelTests(device, model, supportsPrepareModelDeadline); + + if (supportsExecutionDeadline) { + // prepare model + sp preparedModel; + createPreparedModel(device, model, &preparedModel); + if (preparedModel == nullptr) return; + + // run execution tests + const Request request = nn::convertToV1_3(createRequest(testModel)); + runExecutionTests(preparedModel, testModel, request); + } +} + +class DeadlineTest : public GeneratedTestBase {}; + +TEST_P(DeadlineTest, Test) { + runTests(kDevice, kTestModel, mSupportsDeadlines); +} + +INSTANTIATE_GENERATED_TEST(DeadlineTest, + [](const TestModel& testModel) { return !testModel.expectFailure; }); + +} // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 43e53ef55d..a21142880e 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -44,12 +44,18 @@ static void validateGetSupportedOperations(const sp& device, const std: } static void validatePrepareModel(const sp& device, const std::string& message, - const Model& model, ExecutionPreference preference) { + const Model& model, ExecutionPreference preference, + bool testDeadline) { SCOPED_TRACE(message + " [prepareModel_1_3]"); + OptionalTimePoint deadline; + if (testDeadline) { + deadline.nanoseconds(std::numeric_limits::max()); + } + sp preparedModelCallback = new PreparedModelCallback(); Return prepareLaunchStatus = device->prepareModel_1_3( - model, preference, kDefaultPriority, {}, hidl_vec(), + model, preference, kDefaultPriority, deadline, hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); @@ -73,12 +79,13 @@ static bool validExecutionPreference(ExecutionPreference preference) { // to the model does not leave this function. static void validate(const sp& device, const std::string& message, Model model, const std::function& mutation, - ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) { + ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER, + bool testDeadline = false) { mutation(&model); - if (validExecutionPreference(preference)) { + if (validExecutionPreference(preference) && !testDeadline) { validateGetSupportedOperations(device, message, model); } - validatePrepareModel(device, message, model, preference); + validatePrepareModel(device, message, model, preference, testDeadline); } static uint32_t addOperand(Model* model) { @@ -714,9 +721,19 @@ static void mutateExecutionPreferenceTest(const sp& device, const Model } } +///////////////////////// DEADLINE ///////////////////////// + +static void deadlineTest(const sp& device, const Model& model) { + const std::string message = "deadlineTest: deadline not supported"; + const auto noop = [](Model*) {}; + validate(device, message, model, noop, ExecutionPreference::FAST_SINGLE_ANSWER, + /*testDeadline=*/true); +} + ////////////////////////// ENTRY POINT ////////////////////////////// -void validateModel(const sp& device, const Model& model) { +void validateModel(const sp& device, const Model& model, + bool prepareModelDeadlineSupported) { mutateOperandTypeTest(device, model); mutateOperandRankTest(device, model); mutateOperandScaleTest(device, model); @@ -732,6 +749,9 @@ void validateModel(const sp& device, const Model& model) { addOperationInputTest(device, model); addOperationOutputTest(device, model); mutateExecutionPreferenceTest(device, model); + if (!prepareModelDeadlineSupported) { + deadlineTest(device, model); + } } } // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp index 9fb4c6e55b..be4112ac2d 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp @@ -43,7 +43,8 @@ static bool badTiming(Timing timing) { // that use the request. Note that the request here is passed by value, and any // mutation to the request does not leave this function. static void validate(const sp& preparedModel, const std::string& message, - Request request, const std::function& mutation) { + Request request, const std::function& mutation, + bool testDeadline = false) { mutation(&request); // We'd like to test both with timing requested and without timing @@ -56,13 +57,18 @@ static void validate(const sp& preparedModel, const std::string& }; MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO; + OptionalTimePoint deadline; + if (testDeadline) { + deadline.nanoseconds(std::numeric_limits::max()); + } + // asynchronous { SCOPED_TRACE(message + " [execute_1_3]"); sp executionCallback = new ExecutionCallback(); Return executeLaunchStatus = - preparedModel->execute_1_3(request, measure, {}, executionCallback); + preparedModel->execute_1_3(request, measure, deadline, executionCallback); ASSERT_TRUE(executeLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); @@ -80,7 +86,7 @@ static void validate(const sp& preparedModel, const std::string& SCOPED_TRACE(message + " [executeSynchronously_1_3]"); Return executeStatus = preparedModel->executeSynchronously_1_3( - request, measure, {}, + request, measure, deadline, [](ErrorStatus error, const hidl_vec& outputShapes, const Timing& timing) { ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); @@ -92,7 +98,7 @@ static void validate(const sp& preparedModel, const std::string& // burst // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2. - { + if (!testDeadline) { SCOPED_TRACE(message + " [burst]"); ASSERT_TRUE(nn::compliantWithV1_0(request)); @@ -152,11 +158,23 @@ static void removeOutputTest(const sp& preparedModel, const Requ } } +///////////////////////// DEADLINE //////////////////////////////////// + +static void deadlineTest(const sp& preparedModel, const Request& request) { + const std::string message = "deadlineTest: deadline not supported"; + const auto noop = [](Request*) {}; + validate(preparedModel, message, request, noop, /*testDeadline=*/true); +} + ///////////////////////////// ENTRY POINT ////////////////////////////////// -void validateRequest(const sp& preparedModel, const Request& request) { +void validateRequest(const sp& preparedModel, const Request& request, + bool executionDeadlineSupported) { removeInputTest(preparedModel, request); removeOutputTest(preparedModel, request); + if (!executionDeadlineSupported) { + deadlineTest(preparedModel, request); + } } void validateRequestFailure(const sp& preparedModel, const Request& request) { diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp index 7a32b0441c..93c8f13c17 100644 --- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp @@ -84,6 +84,7 @@ void createPreparedModel(const sp& device, const Model& model, << std::endl; GTEST_SKIP(); } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); ASSERT_NE(nullptr, preparedModel->get()); } @@ -122,23 +123,27 @@ std::string printNeuralnetworksHidlTest( INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest); // Forward declaration from ValidateModel.cpp -void validateModel(const sp& device, const Model& model); +void validateModel(const sp& device, const Model& model, + bool prepareModelDeadlineSupported); // Forward declaration from ValidateRequest.cpp -void validateRequest(const sp& preparedModel, const Request& request); +void validateRequest(const sp& preparedModel, const Request& request, + bool executionDeadlineSupported); // Forward declaration from ValidateRequest.cpp void validateRequestFailure(const sp& preparedModel, const Request& request); // Forward declaration from ValidateBurst.cpp void validateBurst(const sp& preparedModel, const V1_0::Request& request); -void validateEverything(const sp& device, const Model& model, const Request& request) { - validateModel(device, model); +void validateEverything(const sp& device, const Model& model, const Request& request, + std::pair supportsDeadlines) { + const auto [prepareModelDeadlineSupported, executionDeadlineSupported] = supportsDeadlines; + validateModel(device, model, prepareModelDeadlineSupported); // Create IPreparedModel. sp preparedModel; createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; - validateRequest(preparedModel, request); + validateRequest(preparedModel, request, executionDeadlineSupported); // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2. ASSERT_TRUE(nn::compliantWithV1_0(request)); @@ -146,10 +151,12 @@ void validateEverything(const sp& device, const Model& model, const Req validateBurst(preparedModel, request10); } -void validateFailure(const sp& device, const Model& model, const Request& request) { +void validateFailure(const sp& device, const Model& model, const Request& request, + std::pair supportsDeadlines) { + const bool prepareModelDeadlineSupported = supportsDeadlines.first; // TODO: Should this always succeed? // What if the invalid input is part of the model (i.e., a parameter). - validateModel(device, model); + validateModel(device, model, prepareModelDeadlineSupported); // Create IPreparedModel. sp preparedModel; @@ -163,9 +170,9 @@ TEST_P(ValidationTest, Test) { const Model model = createModel(kTestModel); const Request request = nn::convertToV1_3(createRequest(kTestModel)); if (kTestModel.expectFailure) { - validateFailure(kDevice, model, request); + validateFailure(kDevice, model, request, mSupportsDeadlines); } else { - validateEverything(kDevice, model, request); + validateEverything(kDevice, model, request, mSupportsDeadlines); } }