diff --git a/current.txt b/current.txt index 8d14fa3609..8dfe34e685 100644 --- a/current.txt +++ b/current.txt @@ -673,11 +673,11 @@ ddcf89cd8ee2df0d32aee55050826446fb64f7aafde0a7cd946c64f61b1a364c android.hardwar df9c79c4fdde2821550c6d5c3d07f5ec0adfb1b702561ce543c906ddef698703 android.hardware.media.c2@1.1::IComponent a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardware.media.c2@1.1::IComponentStore 65c16331e57f6dd68b3971f06f78fe9e3209afb60630c31705aa355f9a52bf0d android.hardware.neuralnetworks@1.3::IBuffer -9db064ee44268a876be0367ff771e618362d39ec603b6ecab17e1575725fcd87 android.hardware.neuralnetworks@1.3::IDevice -4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback -2fa3679ad7c94b5e88724adcd560c561041068a4ca565c63830e68101988746a android.hardware.neuralnetworks@1.3::IFencedExecutionCallback -43088ffc71945b463a7279262cfe2e290f6ed2f15d3fd6032798a3be299fb08f android.hardware.neuralnetworks@1.3::IPreparedModel -0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback +278817920bfd5292a7713f97f1832cca53de3de640f7670e413d97c6e7fd581c android.hardware.neuralnetworks@1.3::IDevice +127ba11efb8220dc3aec9a8f441b59eaf1c68d7f03f577833e1824de75a36b17 android.hardware.neuralnetworks@1.3::IExecutionCallback +6e904be0ddca5ae1de8eba020e6c38ed935ea7d80cd08f47787f137a0ca58555 android.hardware.neuralnetworks@1.3::IFencedExecutionCallback +2b0b10d2ea7a18a4048cd0eb83d35c19a817aeee95f65807fc31f4ef21381397 android.hardware.neuralnetworks@1.3::IPreparedModel +eee3430cc86c97c7b407495863d8fb61da6f1a64b7721e77b9b4909b11b174e9 android.hardware.neuralnetworks@1.3::IPreparedModelCallback dd39887aa4fb60ce60ea9cc043edeadbbae6e922d09d3946311b0b410024ae14 android.hardware.neuralnetworks@1.3::types 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi c67aaf26a7a40d14ea61e70e20afacbd0bb906df1704d585ac8599fbb69dd44b android.hardware.wifi.hostapd@1.2::IHostapd diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal index 79f9c325ac..e0b04a8b62 100644 --- a/neuralnetworks/1.3/IDevice.hal +++ b/neuralnetworks/1.3/IDevice.hal @@ -47,19 +47,6 @@ interface IDevice extends @1.2::IDevice { */ getCapabilities_1_3() generates (ErrorStatus status, Capabilities capabilities); - /** - * Returns whether the device is able to complete or abort a task within a - * specified duration. - * - * @return prepareModelDeadline 'true' if the device supports completing or - * aborting model preparation by the deadline when the deadline is supplied, - * 'false' otherwise. - * @return executionDeadline 'true' if the device supports completing or - * aborting an execution by the deadline when the deadline is supplied, - * 'false' otherwise. - */ - supportsDeadlines() generates (bool prepareModelDeadline, bool executionDeadline); - /** * Gets the supported operations in a model. * @@ -140,14 +127,10 @@ interface IDevice extends @1.2::IDevice { * * prepareModel_1_3 can be called with an optional deadline. If the model * is not able to be prepared before the provided deadline, the model - * preparation must be aborted, and either {@link + * preparation may be aborted, and either {@link * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due + * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due * to an abort must be sent the same way as other errors, described above. - * If the service reports that it does not support preparation deadlines via - * IDevice::supportsDeadlines, and prepareModel_1_3 is called with a - * deadline, then the argument is invalid, and {@link - * ErrorStatus::INVALID_ARGUMENT} must be returned. * * Optionally, the driver may save the prepared model to cache during the * asynchronous preparation. Any error that occurs when saving to cache must @@ -172,9 +155,9 @@ interface IDevice extends @1.2::IDevice { * model. * @param priority The priority of the prepared model relative to other * prepared models owned by the client. - * @param deadline The time by which the model must be prepared. If the - * model cannot be prepared by the deadline, the preparation must be - * aborted. + * @param deadline The time by which the model is expected to be prepared. + * If the model cannot be prepared by the deadline, the preparation may + * be aborted. * @param modelCache A vector of handles with each entry holding exactly one * cache file descriptor for the security-sensitive cache. The length of * the vector must either be 0 indicating that caching information is @@ -209,8 +192,8 @@ interface IDevice extends @1.2::IDevice { * - GENERAL_FAILURE if there is an unspecified error * - INVALID_ARGUMENT if one of the input arguments related to preparing * the model is invalid - * - MISSED_DEADLINE_* if the deadline for preparing a model cannot be - * met + * - MISSED_DEADLINE_* if the preparation is aborted because the model + * cannot be prepared by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver */ prepareModel_1_3(Model model, ExecutionPreference preference, @@ -262,14 +245,11 @@ interface IDevice extends @1.2::IDevice { * * prepareModelFromCache_1_3 can be called with an optional deadline. If the * model is not able to prepared before the provided deadline, the model - * preparation must be aborted, and either {@link + * preparation may be aborted, and either {@link * ErrorStatus::MISSED_DEADLINE_TRANSIENT} - * or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The + * or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The * error due to an abort must be sent the same way as other errors, - * described above. If the service reports that it does not support - * preparation deadlines via IDevice::supportsDeadlines, and - * prepareModelFromCache_1_3 is called with a deadline, then the argument is - * invalid, and {@link ErrorStatus::INVALID_ARGUMENT} must be returned. + * described above. * * The only information that may be unknown to the model at this stage is * the shape of the tensors, which may only be known at execution time. As @@ -279,9 +259,9 @@ interface IDevice extends @1.2::IDevice { * used with different shapes of inputs on different (possibly concurrent) * executions. * - * @param deadline The time by which the model must be prepared. If the - * model cannot be prepared by the deadline, the preparation must be - * aborted. + * @param deadline The time by which the model is expected to be prepared. + * If the model cannot be prepared by the deadline, the preparation may + * be aborted. * @param modelCache A vector of handles with each entry holding exactly one * cache file descriptor for the security-sensitive cache. The length of * the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded. @@ -307,8 +287,8 @@ interface IDevice extends @1.2::IDevice { * - GENERAL_FAILURE if caching is not supported or if there is an * unspecified error * - INVALID_ARGUMENT if one of the input arguments is invalid - * - MISSED_DEADLINE_* if the deadline for preparing a model cannot be - * met + * - MISSED_DEADLINE_* if the preparation is aborted because the model + * cannot be prepared by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver */ prepareModelFromCache_1_3(OptionalTimePoint deadline, diff --git a/neuralnetworks/1.3/IExecutionCallback.hal b/neuralnetworks/1.3/IExecutionCallback.hal index 439428a5aa..ea11b17c49 100644 --- a/neuralnetworks/1.3/IExecutionCallback.hal +++ b/neuralnetworks/1.3/IExecutionCallback.hal @@ -47,7 +47,8 @@ interface IExecutionCallback extends @1.2::IExecutionCallback { * corresponding output * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid - * - MISSED_DEADLINE_* if the deadline could not be met + * - MISSED_DEADLINE_* if the execution is aborted because it + * cannot be completed by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver * @param outputShapes A list of shape information of model output operands. * The index into "outputShapes" corresponds with to index diff --git a/neuralnetworks/1.3/IFencedExecutionCallback.hal b/neuralnetworks/1.3/IFencedExecutionCallback.hal index 6030809406..949438e148 100644 --- a/neuralnetworks/1.3/IFencedExecutionCallback.hal +++ b/neuralnetworks/1.3/IFencedExecutionCallback.hal @@ -38,8 +38,8 @@ interface IFencedExecutionCallback { * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the asynchronous task resulted in an * unspecified error - * - MISSED_DEADLINE_* if the deadline for executing a model - * cannot be met + * - MISSED_DEADLINE_* if the execution is aborted because it + * cannot be completed by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the * driver * @return timingLaunched The duration starts when executeFenced is called and ends when diff --git a/neuralnetworks/1.3/IPreparedModel.hal b/neuralnetworks/1.3/IPreparedModel.hal index 4ce3691d14..a1814b5156 100644 --- a/neuralnetworks/1.3/IPreparedModel.hal +++ b/neuralnetworks/1.3/IPreparedModel.hal @@ -70,14 +70,10 @@ interface IPreparedModel extends @1.2::IPreparedModel { * * execute_1_3 can be called with an optional deadline. If the execution * is not able to be completed before the provided deadline, the execution - * must be aborted, and either {@link + * may be aborted, and either {@link * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due + * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due * to an abort must be sent the same way as other errors, described above. - * If the service reports that it does not support execution deadlines via - * IDevice::supportsDeadlines, and execute_1_3 is called with a deadline, - * then the argument is invalid, and {@link ErrorStatus::INVALID_ARGUMENT} - * must be returned. * * Any number of calls to the execute* and executeSynchronously* functions, * in any combination, may be made concurrently, even on the same @@ -89,9 +85,9 @@ interface IPreparedModel extends @1.2::IPreparedModel { * The duration runs from the time the driver sees the call * to the execute_1_3 function to the time the driver invokes * the callback. - * @param deadline The time by which the execution must complete. If the - * execution cannot be finished by the deadline, the - * execution must be aborted. + * @param deadline The time by which the execution is expected to complete. + * If the execution cannot be completed by the deadline, the + * execution may be aborted. * @param loopTimeoutDuration The maximum amount of time that should be spent * executing a {@link OperationType::WHILE} * operation. If a loop condition model does not @@ -116,8 +112,8 @@ interface IPreparedModel extends @1.2::IPreparedModel { * not large enough to store the resultant values * - INVALID_ARGUMENT if one of the input arguments is * invalid - * - MISSED_DEADLINE_* if the deadline for executing a model - * cannot be met + * - MISSED_DEADLINE_* if the execution is aborted because it + * cannot be completed by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the * driver */ @@ -150,16 +146,12 @@ interface IPreparedModel extends @1.2::IPreparedModel { * (ErrorStatus::NONE): There must be no failure unless the device itself is * in a bad state. * - * executeSynchronously_1_3 can be called with an optional deadline. If the + * executeSynchronously_1_3 may be called with an optional deadline. If the * execution is not able to be completed before the provided deadline, the - * execution must be aborted, and either {@link + * execution may be aborted, and either {@link * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due + * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due * to an abort must be sent the same way as other errors, described above. - * If the service reports that it does not support execution deadlines via - * IDevice::supportsDeadlines, and executeSynchronously_1_3 is called with a - * deadline, then the argument is invalid, and - * {@link ErrorStatus::INVALID_ARGUMENT} must be returned. * * Any number of calls to the execute* and executeSynchronously* functions, * in any combination, may be made concurrently, even on the same @@ -171,9 +163,9 @@ interface IPreparedModel extends @1.2::IPreparedModel { * The duration runs from the time the driver sees the call * to the executeSynchronously_1_3 function to the time the driver * returns from the function. - * @param deadline The time by which the execution must complete. If the - * execution cannot be finished by the deadline, the - * execution must be aborted. + * @param deadline The time by which the execution is expected to complete. + * If the execution cannot be finished by the deadline, the + * execution may be aborted. * @param loopTimeoutDuration The maximum amount of time that should be spent * executing a {@link OperationType::WHILE} * operation. If a loop condition model does not @@ -194,8 +186,8 @@ interface IPreparedModel extends @1.2::IPreparedModel { * corresponding output * - INVALID_ARGUMENT if one of the input arguments is * invalid - * - MISSED_DEADLINE_* if the deadline for executing a model - * cannot be met + * - MISSED_DEADLINE_* if the execution is aborted because it + * cannot be completed by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the * driver * @return outputShapes A list of shape information of model output operands. @@ -236,17 +228,13 @@ interface IPreparedModel extends @1.2::IPreparedModel { * any data object referenced by 'request' (described by the * {@link @1.0::DataLocation} of a {@link @1.0::RequestArgument}). * - * executeFenced can be called with an optional deadline and an optional duration. + * executeFenced may be called with an optional deadline and an optional duration. * If the execution is not able to be completed before the provided deadline or * within the timeout duration (measured from when all sync fences in waitFor are - * signaled), whichever comes earlier, the execution must be aborted, and either + * signaled), whichever comes earlier, the execution may be aborted, and either * {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The error due + * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due * to an abort must be sent the same way as other errors, described above. - * If the service reports that it does not support execution deadlines via - * IDevice::supportsDeadlines, and executeFenced is called with a - * deadline or duration, then the argument is invalid, and - * {@link ErrorStatus::INVALID_ARGUMENT} must be returned. * * If any of the sync fences in waitFor changes to error status after the executeFenced * call succeeds, or the execution is aborted because it cannot finish before the deadline @@ -263,9 +251,9 @@ interface IPreparedModel extends @1.2::IPreparedModel { * @param waitFor A vector of sync fence file descriptors. * Execution must not start until all sync fences have been signaled. * @param measure Specifies whether or not to measure duration of the execution. - * @param deadline The time by which the execution must complete. If the - * execution cannot be finished by the deadline, the - * execution must be aborted. + * @param deadline The time by which the execution is expected to complete. + * If the execution cannot be finished by the deadline, the + * execution may be aborted. * @param loopTimeoutDuration The maximum amount of time that should be spent * executing a {@link OperationType::WHILE} * operation. If a loop condition model does not @@ -277,18 +265,18 @@ interface IPreparedModel extends @1.2::IPreparedModel { * LoopTimeoutDurationNs::DEFAULT}. When * provided, the duration must not exceed {@link * LoopTimeoutDurationNs::MAXIMUM}. - * @param duration The length of time within which the execution must - * complete after all sync fences in waitFor are signaled. If the - * execution cannot be finished within the duration, the execution - * must be aborted. + * @param duration The length of time within which the execution is expected + * to complete after all sync fences in waitFor are signaled. + * If the execution cannot be finished within the duration, + * the execution may be aborted. * @return status Error status of the call, must be: * - NONE if task is successfully launched * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error * - INVALID_ARGUMENT if one of the input arguments is invalid, including * fences in error states. - * - MISSED_DEADLINE_* if the deadline for executing a model - * cannot be met + * - MISSED_DEADLINE_* if the execution is aborted because it + * cannot be completed by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the * driver * @return syncFence The sync fence that will be signaled when the task is completed. diff --git a/neuralnetworks/1.3/IPreparedModelCallback.hal b/neuralnetworks/1.3/IPreparedModelCallback.hal index 11ebbf4ab3..c0d3416439 100644 --- a/neuralnetworks/1.3/IPreparedModelCallback.hal +++ b/neuralnetworks/1.3/IPreparedModelCallback.hal @@ -47,8 +47,8 @@ interface IPreparedModelCallback extends @1.2::IPreparedModelCallback { * unspecified error * - INVALID_ARGUMENT if one of the input arguments to * prepareModel is invalid - * - MISSED_DEADLINE_* if the deadline for executing a model - * cannot be met + * - MISSED_DEADLINE_* if the preparation is aborted because + * the model cannot be prepared by the deadline * - RESOURCE_EXHAUSTED_* if the task was aborted by the * driver * @param preparedModel A model that has been asynchronously prepared for diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index b04abe219b..8c9393b030 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -858,12 +858,6 @@ void Execute(const sp& device, const TestModel& testModel, TestKind tes void GeneratedTestBase::SetUp() { testing::TestWithParam::SetUp(); ASSERT_NE(kDevice, nullptr); - - const Return ret = - kDevice->supportsDeadlines([this](bool prepareModelDeadline, bool executionDeadline) { - mSupportsDeadlines = {prepareModelDeadline, executionDeadline}; - }); - ASSERT_TRUE(ret.isOk()); } std::vector getNamedModels(const FilterFn& filter) { diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h index a8db5155b5..834d335f50 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h @@ -36,7 +36,6 @@ class GeneratedTestBase : public testing::TestWithParam { void SetUp() override; const sp kDevice = getData(std::get(GetParam())); const test_helper::TestModel& kTestModel = *getData(std::get(GetParam())); - std::pair mSupportsDeadlines; }; using FilterFn = std::function; diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp index fccc612574..2663500708 100644 --- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp +++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp @@ -34,45 +34,52 @@ using V1_2::Timing; using HidlToken = hidl_array(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; -enum class DeadlineBoundType { NOW, UNLIMITED }; -constexpr std::array deadlineBounds = {DeadlineBoundType::NOW, - DeadlineBoundType::UNLIMITED}; +enum class DeadlineBoundType { NOW, UNLIMITED, SHORT }; +constexpr std::array deadlineBounds = { + DeadlineBoundType::NOW, DeadlineBoundType::UNLIMITED, DeadlineBoundType::SHORT}; std::string toString(DeadlineBoundType type) { switch (type) { case DeadlineBoundType::NOW: return "NOW"; case DeadlineBoundType::UNLIMITED: return "UNLIMITED"; + case DeadlineBoundType::SHORT: + return "SHORT"; } LOG(FATAL) << "Unrecognized DeadlineBoundType: " << static_cast(type); return {}; } +constexpr auto kShortDuration = std::chrono::milliseconds{5}; + using Results = std::tuple, Timing>; using MaybeResults = std::optional; using ExecutionFunction = std::function& preparedModel, const Request& request, - DeadlineBoundType deadlineBound)>; + const OptionalTimePoint& deadline)>; -static OptionalTimePoint makeOptionalTimePoint(DeadlineBoundType deadlineBoundType) { - OptionalTimePoint deadline; +static OptionalTimePoint makeDeadline(DeadlineBoundType deadlineBoundType) { + const auto getNanosecondsSinceEpoch = [](const auto& time) -> uint64_t { + const auto timeSinceEpoch = time.time_since_epoch(); + return std::chrono::duration_cast(timeSinceEpoch).count(); + }; + + std::chrono::steady_clock::time_point timePoint; switch (deadlineBoundType) { - case DeadlineBoundType::NOW: { - const auto currentTime = std::chrono::steady_clock::now(); - const auto currentTimeInNanoseconds = - std::chrono::time_point_cast(currentTime); - const uint64_t nanosecondsSinceEpoch = - currentTimeInNanoseconds.time_since_epoch().count(); - deadline.nanosecondsSinceEpoch(nanosecondsSinceEpoch); - } break; - case DeadlineBoundType::UNLIMITED: { - const auto maxTime = std::chrono::time_point::max(); - const uint64_t nanosecondsSinceEpoch = maxTime.time_since_epoch().count(); - deadline.nanosecondsSinceEpoch(nanosecondsSinceEpoch); - } break; + case DeadlineBoundType::NOW: + timePoint = std::chrono::steady_clock::now(); + break; + case DeadlineBoundType::UNLIMITED: + timePoint = std::chrono::steady_clock::time_point::max(); + break; + case DeadlineBoundType::SHORT: + timePoint = std::chrono::steady_clock::now() + kShortDuration; + break; } + + OptionalTimePoint deadline; + deadline.nanosecondsSinceEpoch(getNanosecondsSinceEpoch(timePoint)); return deadline; } @@ -80,7 +87,7 @@ void runPrepareModelTest(const sp& device, const Model& model, Priority std::optional deadlineBound) { OptionalTimePoint deadline; if (deadlineBound.has_value()) { - deadline = makeOptionalTimePoint(deadlineBound.value()); + deadline = makeDeadline(deadlineBound.value()); } // see if service can handle model @@ -131,7 +138,8 @@ void runPrepareModelTest(const sp& device, const Model& model, Priority // deadline has already passed when the driver would launch the // execution. In this case, the driver must return // MISSED_DEADLINE_*. - EXPECT_TRUE(prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT || + EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE || + prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT || prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT); break; case DeadlineBoundType::UNLIMITED: @@ -140,13 +148,19 @@ void runPrepareModelTest(const sp& device, const Model& model, Priority // of the switch statement. EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); break; + case DeadlineBoundType::SHORT: + // Either the driver successfully completed the task in time or + // it aborted within the compliance time. + EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE || + prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT || + prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT); + break; } } ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr); } -void runPrepareModelTests(const sp& device, const Model& model, - bool supportsPrepareModelDeadline) { +void runPrepareModelTests(const sp& device, const Model& model) { // test priority for (auto priority : hidl_enum_range{}) { SCOPED_TRACE("priority: " + toString(priority)); @@ -155,19 +169,17 @@ void runPrepareModelTests(const sp& device, const Model& model, } // test deadline - if (supportsPrepareModelDeadline) { - for (auto deadlineBound : deadlineBounds) { - SCOPED_TRACE("deadlineBound: " + toString(deadlineBound)); - runPrepareModelTest(device, model, kDefaultPriority, deadlineBound); - } + for (auto deadlineBound : deadlineBounds) { + SCOPED_TRACE("deadlineBound: " + toString(deadlineBound)); + runPrepareModelTest(device, model, kDefaultPriority, deadlineBound); } } static MaybeResults executeAsynchronously(const sp& preparedModel, - const Request& request, DeadlineBoundType deadlineBound) { + const Request& request, + const OptionalTimePoint& deadline) { SCOPED_TRACE("asynchronous"); const MeasureTiming measure = MeasureTiming::NO; - const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound); // launch execution const sp callback = new ExecutionCallback(); @@ -187,10 +199,10 @@ static MaybeResults executeAsynchronously(const sp& preparedMode } static MaybeResults executeSynchronously(const sp& preparedModel, - const Request& request, DeadlineBoundType deadlineBound) { + const Request& request, + const OptionalTimePoint& deadline) { SCOPED_TRACE("synchronous"); const MeasureTiming measure = MeasureTiming::NO; - const OptionalTimePoint deadline = makeOptionalTimePoint(deadlineBound); // configure results callback MaybeResults results; @@ -209,9 +221,10 @@ static MaybeResults executeSynchronously(const sp& preparedModel void runExecutionTest(const sp& preparedModel, const TestModel& testModel, const Request& request, bool synchronous, DeadlineBoundType deadlineBound) { const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously; + const auto deadline = makeDeadline(deadlineBound); // Perform execution and unpack results. - const auto results = execute(preparedModel, request, deadlineBound); + const auto results = execute(preparedModel, request, deadline); if (!results.has_value()) return; const auto& [status, outputShapes, timing] = results.value(); @@ -226,7 +239,8 @@ void runExecutionTest(const sp& preparedModel, const TestModel& // deadline has already passed when the driver would launch the // execution. In this case, the driver must return // MISSED_DEADLINE_*. - ASSERT_TRUE(status == ErrorStatus::MISSED_DEADLINE_TRANSIENT || + ASSERT_TRUE(status == ErrorStatus::NONE || + status == ErrorStatus::MISSED_DEADLINE_TRANSIENT || status == ErrorStatus::MISSED_DEADLINE_PERSISTENT); return; case DeadlineBoundType::UNLIMITED: @@ -235,6 +249,13 @@ void runExecutionTest(const sp& preparedModel, const TestModel& // of the switch statement. ASSERT_EQ(ErrorStatus::NONE, status); break; + case DeadlineBoundType::SHORT: + // Either the driver successfully completed the task in time or + // it aborted within the compliance time. + EXPECT_TRUE(status == ErrorStatus::NONE || + status == ErrorStatus::MISSED_DEADLINE_TRANSIENT || + status == ErrorStatus::MISSED_DEADLINE_PERSISTENT); + break; } // If the model output operands are fully specified, outputShapes must be either @@ -268,32 +289,27 @@ void runExecutionTests(const sp& preparedModel, const TestModel& } } -void runTests(const sp& device, const TestModel& testModel, - std::pair supportsDeadlines) { +void runTests(const sp& device, const TestModel& testModel) { // setup - const auto [supportsPrepareModelDeadline, supportsExecutionDeadline] = supportsDeadlines; - if (!supportsPrepareModelDeadline && !supportsExecutionDeadline) return; const Model model = createModel(testModel); // run prepare model tests - runPrepareModelTests(device, model, supportsPrepareModelDeadline); + runPrepareModelTests(device, model); - if (supportsExecutionDeadline) { - // prepare model - sp preparedModel; - createPreparedModel(device, model, &preparedModel); - if (preparedModel == nullptr) return; + // prepare model + sp preparedModel; + createPreparedModel(device, model, &preparedModel); + if (preparedModel == nullptr) return; - // run execution tests - const Request request = nn::convertToV1_3(createRequest(testModel)); - runExecutionTests(preparedModel, testModel, request); - } + // run execution tests + const Request request = nn::convertToV1_3(createRequest(testModel)); + runExecutionTests(preparedModel, testModel, request); } class DeadlineTest : public GeneratedTestBase {}; TEST_P(DeadlineTest, Test) { - runTests(kDevice, kTestModel, mSupportsDeadlines); + runTests(kDevice, kTestModel); } INSTANTIATE_GENERATED_TEST(DeadlineTest, diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 09e9922a23..8f2d4b7f19 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -44,18 +44,12 @@ static void validateGetSupportedOperations(const sp& device, const std: } static void validatePrepareModel(const sp& device, const std::string& message, - const Model& model, ExecutionPreference preference, - bool testDeadline) { + const Model& model, ExecutionPreference preference) { SCOPED_TRACE(message + " [prepareModel_1_3]"); - OptionalTimePoint deadline; - if (testDeadline) { - deadline.nanosecondsSinceEpoch(std::numeric_limits::max()); - } - sp preparedModelCallback = new PreparedModelCallback(); Return prepareLaunchStatus = device->prepareModel_1_3( - model, preference, kDefaultPriority, deadline, hidl_vec(), + model, preference, kDefaultPriority, {}, hidl_vec(), hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); @@ -79,13 +73,12 @@ static bool validExecutionPreference(ExecutionPreference preference) { // to the model does not leave this function. static void validate(const sp& device, const std::string& message, Model model, const std::function& mutation, - ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER, - bool testDeadline = false) { + ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) { mutation(&model); - if (validExecutionPreference(preference) && !testDeadline) { + if (validExecutionPreference(preference)) { validateGetSupportedOperations(device, message, model); } - validatePrepareModel(device, message, model, preference, testDeadline); + validatePrepareModel(device, message, model, preference); } static uint32_t addOperand(Model* model) { @@ -744,19 +737,9 @@ static void mutateExecutionPreferenceTest(const sp& device, const Model } } -///////////////////////// DEADLINE ///////////////////////// - -static void deadlineTest(const sp& device, const Model& model) { - const std::string message = "deadlineTest: deadline not supported"; - const auto noop = [](Model*) {}; - validate(device, message, model, noop, ExecutionPreference::FAST_SINGLE_ANSWER, - /*testDeadline=*/true); -} - ////////////////////////// ENTRY POINT ////////////////////////////// -void validateModel(const sp& device, const Model& model, - bool prepareModelDeadlineSupported) { +void validateModel(const sp& device, const Model& model) { mutateOperandTypeTest(device, model); mutateOperandRankTest(device, model); mutateOperandScaleTest(device, model); @@ -772,9 +755,6 @@ void validateModel(const sp& device, const Model& model, addOperationInputTest(device, model); addOperationOutputTest(device, model); mutateExecutionPreferenceTest(device, model); - if (!prepareModelDeadlineSupported) { - deadlineTest(device, model); - } } } // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp index 20f4fe2c00..5e806e5c9b 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp @@ -45,8 +45,7 @@ static bool badTiming(Timing timing) { // that use the request. Note that the request here is passed by value, and any // mutation to the request does not leave this function. static void validate(const sp& preparedModel, const std::string& message, - Request request, const std::function& mutation, - bool testDeadline = false) { + Request request, const std::function& mutation) { mutation(&request); // We'd like to test both with timing requested and without timing @@ -59,18 +58,13 @@ static void validate(const sp& preparedModel, const std::string& }; MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO; - OptionalTimePoint deadline; - if (testDeadline) { - deadline.nanosecondsSinceEpoch(std::numeric_limits::max()); - } - // asynchronous { SCOPED_TRACE(message + " [execute_1_3]"); sp executionCallback = new ExecutionCallback(); Return executeLaunchStatus = - preparedModel->execute_1_3(request, measure, deadline, {}, executionCallback); + preparedModel->execute_1_3(request, measure, {}, {}, executionCallback); ASSERT_TRUE(executeLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(executeLaunchStatus)); @@ -88,7 +82,7 @@ static void validate(const sp& preparedModel, const std::string& SCOPED_TRACE(message + " [executeSynchronously_1_3]"); Return executeStatus = preparedModel->executeSynchronously_1_3( - request, measure, deadline, {}, + request, measure, {}, {}, [](ErrorStatus error, const hidl_vec& outputShapes, const Timing& timing) { ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); @@ -100,7 +94,7 @@ static void validate(const sp& preparedModel, const std::string& // burst // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2. - if (!testDeadline) { + { SCOPED_TRACE(message + " [burst]"); ASSERT_TRUE(nn::compliantWithV1_0(request)); @@ -143,7 +137,7 @@ static void validate(const sp& preparedModel, const std::string& { SCOPED_TRACE(message + " [executeFenced]"); Return ret = - preparedModel->executeFenced(request, {}, MeasureTiming::NO, deadline, {}, {}, + preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {}, [](ErrorStatus error, const hidl_handle& handle, const sp& callback) { ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); @@ -174,23 +168,11 @@ static void removeOutputTest(const sp& preparedModel, const Requ } } -///////////////////////// DEADLINE //////////////////////////////////// - -static void deadlineTest(const sp& preparedModel, const Request& request) { - const std::string message = "deadlineTest: deadline not supported"; - const auto noop = [](Request*) {}; - validate(preparedModel, message, request, noop, /*testDeadline=*/true); -} - ///////////////////////////// ENTRY POINT ////////////////////////////////// -void validateRequest(const sp& preparedModel, const Request& request, - bool executionDeadlineSupported) { +void validateRequest(const sp& preparedModel, const Request& request) { removeInputTest(preparedModel, request); removeOutputTest(preparedModel, request); - if (!executionDeadlineSupported) { - deadlineTest(preparedModel, request); - } } void validateRequestFailure(const sp& preparedModel, const Request& request) { diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp index 16341dafc7..5b07034296 100644 --- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp @@ -123,11 +123,9 @@ std::string printNeuralnetworksHidlTest( INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest); // Forward declaration from ValidateModel.cpp -void validateModel(const sp& device, const Model& model, - bool prepareModelDeadlineSupported); +void validateModel(const sp& device, const Model& model); // Forward declaration from ValidateRequest.cpp -void validateRequest(const sp& preparedModel, const Request& request, - bool executionDeadlineSupported); +void validateRequest(const sp& preparedModel, const Request& request); // Forward declaration from ValidateRequest.cpp void validateRequestFailure(const sp& preparedModel, const Request& request); // Forward declaration from ValidateBurst.cpp @@ -147,17 +145,15 @@ void validateExecuteFenced(const sp& preparedModel, const Reques ASSERT_TRUE(ret_null.isOk()); } -void validateEverything(const sp& device, const Model& model, const Request& request, - std::pair supportsDeadlines) { - const auto [prepareModelDeadlineSupported, executionDeadlineSupported] = supportsDeadlines; - validateModel(device, model, prepareModelDeadlineSupported); +void validateEverything(const sp& device, const Model& model, const Request& request) { + validateModel(device, model); // Create IPreparedModel. sp preparedModel; createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; - validateRequest(preparedModel, request, executionDeadlineSupported); + validateRequest(preparedModel, request); validateExecuteFenced(preparedModel, request); // TODO(butlermichael): Check if we need to test burst in V1_3 if the interface remains V1_2. @@ -166,12 +162,10 @@ void validateEverything(const sp& device, const Model& model, const Req validateBurst(preparedModel, request10); } -void validateFailure(const sp& device, const Model& model, const Request& request, - std::pair supportsDeadlines) { - const bool prepareModelDeadlineSupported = supportsDeadlines.first; +void validateFailure(const sp& device, const Model& model, const Request& request) { // TODO: Should this always succeed? // What if the invalid input is part of the model (i.e., a parameter). - validateModel(device, model, prepareModelDeadlineSupported); + validateModel(device, model); // Create IPreparedModel. sp preparedModel; @@ -185,9 +179,9 @@ TEST_P(ValidationTest, Test) { const Model model = createModel(kTestModel); const Request request = nn::convertToV1_3(createRequest(kTestModel)); if (kTestModel.expectFailure) { - validateFailure(kDevice, model, request, mSupportsDeadlines); + validateFailure(kDevice, model, request); } else { - validateEverything(kDevice, model, request, mSupportsDeadlines); + validateEverything(kDevice, model, request); } }