From 4024d8f4d826f305c97e0e178e9323b03d03d9d8 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Fri, 4 Dec 2020 17:38:20 -0800 Subject: [PATCH 1/4] Change NN canonical timings to nanoseconds -- hal A sibling CL to this CL changes the definition of nn::TimePoint to the same type as std::chrono::steady_clock::time_point but has changed the underlying duration representation to use uint64_t. That sibling CL also renames nn::OptionalTimeoutDuration to nn::OptionalDuration, and changes the definition to the same type as std::nanoseconds except the underlying duration representation now uses uint64_t. This CL makes changes to the NN HAL utility code in response to the changes in the sibling CL. Bug: 174297663 Test: mma Test: NeuralNetworksTest_static Change-Id: If44d9aefadb2c78b632ff289b5ff5a49f766525c Merged-In: If44d9aefadb2c78b632ff289b5ff5a49f766525c (cherry picked from commit ca11420785834a3ca2588bb9df12f7a83299a058) --- .../include/nnapi/hal/1.0/PreparedModel.h | 6 +-- .../1.0/utils/src/PreparedModel.cpp | 13 ++--- .../include/nnapi/hal/1.2/PreparedModel.h | 6 +-- neuralnetworks/1.2/utils/src/Conversions.cpp | 24 +++++++++- .../1.2/utils/src/PreparedModel.cpp | 13 ++--- .../utils/include/nnapi/hal/1.3/Conversions.h | 8 ++-- .../include/nnapi/hal/1.3/PreparedModel.h | 6 +-- neuralnetworks/1.3/utils/src/Conversions.cpp | 48 ++++--------------- .../1.3/utils/src/PreparedModel.cpp | 6 +-- .../include/nnapi/hal/InvalidPreparedModel.h | 6 +-- .../nnapi/hal/ResilientPreparedModel.h | 6 +-- .../utils/common/src/InvalidPreparedModel.cpp | 6 +-- .../common/src/ResilientPreparedModel.cpp | 13 ++--- 13 files changed, 76 insertions(+), 85 deletions(-) diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h index 31f366dadc..198cbc8e81 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h @@ -44,13 +44,13 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index 46dd3f8254..add827567e 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -55,7 +55,7 @@ PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( @@ -81,11 +81,12 @@ nn::ExecutionResult, nn::Timing>> Prepare } nn::GeneralResult> -PreparedModel::executeFenced( - const nn::Request& /*request*/, const std::vector& /*waitFor*/, - nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, - const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { +PreparedModel::executeFenced(const nn::Request& /*request*/, + const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::executeFenced is not supported on 1.0 HAL service"; } diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 65e1e8aa3f..53bd4d12ef 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -45,13 +45,13 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index f11474fd60..3790d1f61e 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -43,7 +43,9 @@ constexpr std::underlying_type_t underlyingType(Type value) { return static_cast>(value); } +using HalDuration = std::chrono::duration; constexpr auto kVersion = android::nn::Version::ANDROID_Q; +constexpr uint64_t kNoTiming = std::numeric_limits::max(); } // namespace @@ -270,7 +272,18 @@ GeneralResult unvalidatedConvert(const hal::V1_2::MeasureTiming& } GeneralResult unvalidatedConvert(const hal::V1_2::Timing& timing) { - return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; + constexpr uint64_t kMaxTiming = std::chrono::floor(Duration::max()).count(); + constexpr auto convertTiming = [](uint64_t halTiming) -> OptionalDuration { + if (halTiming == kNoTiming) { + return {}; + } + if (halTiming > kMaxTiming) { + return Duration::max(); + } + return HalDuration{halTiming}; + }; + return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice), + .timeInDriver = convertTiming(timing.timeInDriver)}; } GeneralResult unvalidatedConvert(const hal::V1_2::Extension& extension) { @@ -547,7 +560,14 @@ nn::GeneralResult unvalidatedConvert(const nn::MeasureTiming& mea } nn::GeneralResult unvalidatedConvert(const nn::Timing& timing) { - return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; + constexpr auto convertTiming = [](nn::OptionalDuration canonicalTiming) -> uint64_t { + if (!canonicalTiming.has_value()) { + return kNoTiming; + } + return std::chrono::ceil(*canonicalTiming).count(); + }; + return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice), + .timeInDriver = convertTiming(timing.timeInDriver)}; } nn::GeneralResult unvalidatedConvert(const nn::Extension& extension) { diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index dad9a7e74b..32c2651950 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -106,7 +106,7 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming nn::ExecutionResult, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( @@ -140,11 +140,12 @@ nn::ExecutionResult, nn::Timing>> Prepare } nn::GeneralResult> -PreparedModel::executeFenced( - const nn::Request& /*request*/, const std::vector& /*waitFor*/, - nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, - const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { +PreparedModel::executeFenced(const nn::Request& /*request*/, + const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::executeFenced is not supported on 1.2 HAL service"; } diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h index 9653a05da7..477bb7b6e0 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h @@ -44,7 +44,7 @@ GeneralResult unvalidatedConvert( const hal::V1_3::Request::MemoryPool& memoryPool); GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimePoint& optionalTimePoint); -GeneralResult unvalidatedConvert( +GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration); GeneralResult unvalidatedConvert(const hal::V1_3::ErrorStatus& errorStatus); @@ -54,7 +54,7 @@ GeneralResult convert(const hal::V1_3::Model& model); GeneralResult convert(const hal::V1_3::BufferDesc& bufferDesc); GeneralResult convert(const hal::V1_3::Request& request); GeneralResult convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint); -GeneralResult convert( +GeneralResult convert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration); GeneralResult convert(const hal::V1_3::ErrorStatus& errorStatus); @@ -86,7 +86,7 @@ nn::GeneralResult unvalidatedConvert( nn::GeneralResult unvalidatedConvert( const nn::OptionalTimePoint& optionalTimePoint); nn::GeneralResult unvalidatedConvert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration); + const nn::OptionalDuration& optionalTimeoutDuration); nn::GeneralResult unvalidatedConvert(const nn::ErrorStatus& errorStatus); nn::GeneralResult convert(const nn::Priority& priority); @@ -96,7 +96,7 @@ nn::GeneralResult convert(const nn::BufferDesc& bufferDesc); nn::GeneralResult convert(const nn::Request& request); nn::GeneralResult convert(const nn::OptionalTimePoint& optionalTimePoint); nn::GeneralResult convert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration); + const nn::OptionalDuration& optionalTimeoutDuration); nn::GeneralResult convert(const nn::ErrorStatus& errorStatus); nn::GeneralResult convert(const nn::SharedHandle& handle); diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h index e0d69dd7c6..09360eceb8 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -44,13 +44,13 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index 949dd0d1ed..c89a69f28b 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -272,47 +272,26 @@ GeneralResult unvalidatedConvert( GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimePoint& optionalTimePoint) { - constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count(); - const auto makeTimePoint = [](uint64_t count) -> GeneralResult { - if (count > kTimePointMaxCount) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimePoint because the count exceeds " - "the max"; - } - const auto nanoseconds = std::chrono::nanoseconds{count}; - return TimePoint{nanoseconds}; - }; - using Discriminator = hal::V1_3::OptionalTimePoint::hidl_discriminator; switch (optionalTimePoint.getDiscriminator()) { case Discriminator::none: - return std::nullopt; + return {}; case Discriminator::nanosecondsSinceEpoch: - return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch()); + return TimePoint{Duration{optionalTimePoint.nanosecondsSinceEpoch()}}; } return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OptionalTimePoint discriminator " << underlyingType(optionalTimePoint.getDiscriminator()); } -GeneralResult unvalidatedConvert( +GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) { - constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count(); - const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult { - if (count > kTimeoutDurationMaxCount) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimeoutDuration because the count " - "exceeds the max"; - } - return TimeoutDuration{count}; - }; - using Discriminator = hal::V1_3::OptionalTimeoutDuration::hidl_discriminator; switch (optionalTimeoutDuration.getDiscriminator()) { case Discriminator::none: - return std::nullopt; + return {}; case Discriminator::nanoseconds: - return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds()); + return Duration(optionalTimeoutDuration.nanoseconds()); } return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OptionalTimeoutDuration discriminator " @@ -360,7 +339,7 @@ GeneralResult convert(const hal::V1_3::OptionalTimePoint& opt return validatedConvert(optionalTimePoint); } -GeneralResult convert( +GeneralResult convert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) { return validatedConvert(optionalTimeoutDuration); } @@ -629,27 +608,16 @@ nn::GeneralResult unvalidatedConvert( OptionalTimePoint ret; if (optionalTimePoint.has_value()) { const auto count = optionalTimePoint.value().time_since_epoch().count(); - if (count < 0) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimePoint because time since epoch " - "count is " - "negative"; - } ret.nanosecondsSinceEpoch(count); } return ret; } nn::GeneralResult unvalidatedConvert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration) { + const nn::OptionalDuration& optionalTimeoutDuration) { OptionalTimeoutDuration ret; if (optionalTimeoutDuration.has_value()) { const auto count = optionalTimeoutDuration.value().count(); - if (count < 0) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimeoutDuration because count is " - "negative"; - } ret.nanoseconds(count); } return ret; @@ -697,7 +665,7 @@ nn::GeneralResult convert(const nn::OptionalTimePoint& option } nn::GeneralResult convert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration) { + const nn::OptionalDuration& optionalTimeoutDuration) { return validatedConvert(optionalTimeoutDuration); } diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index 49b9b0bcc3..124a8db263 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -159,7 +159,7 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming nn::ExecutionResult, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( @@ -200,8 +200,8 @@ nn::ExecutionResult, nn::Timing>> Prepare nn::GeneralResult> PreparedModel::executeFenced(const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const { + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h index 4b32b4e3af..985cddb2c2 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h @@ -32,13 +32,13 @@ class InvalidPreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; }; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h index c2940d16bc..d86c88be32 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h @@ -49,13 +49,13 @@ class ResilientPreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp index 9ae7a63949..a46f4ac574 100644 --- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp @@ -29,7 +29,7 @@ namespace android::hardware::neuralnetworks::utils { nn::ExecutionResult, nn::Timing>> InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/) const { return NN_ERROR() << "InvalidPreparedModel"; } @@ -37,8 +37,8 @@ nn::GeneralResult> InvalidPreparedModel::executeFenced( const nn::Request& /*request*/, const std::vector& /*waitFor*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, - const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR() << "InvalidPreparedModel"; } diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp index 1c9ecba4f6..012a1dedc3 100644 --- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp @@ -64,16 +64,17 @@ nn::SharedPreparedModel ResilientPreparedModel::recover( nn::ExecutionResult, nn::Timing>> ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration) const { return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration); } nn::GeneralResult> -ResilientPreparedModel::executeFenced( - const nn::Request& request, const std::vector& waitFor, - nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const { +ResilientPreparedModel::executeFenced(const nn::Request& request, + const std::vector& waitFor, + nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const { return getPreparedModel()->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration, timeoutDurationAfterFence); } From aad934baa7894472c5b1e177fb101ce0b2f61f51 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Sun, 13 Dec 2020 23:06:06 -0800 Subject: [PATCH 2/4] Add HIDL lifetime and protecting callback info to NN README This CL copies information from packages/modules/NeuralNetworks/runtime/VersionedInterfaces.cpp and modifies the description to be more appropriate for the NN HAL utility code. Specific sections added to the README: * "HIDL Interface Lifetimes across Processes" * "Protecting Asynchronous Calls across HIDL" Bug: 170289677 Test: mma Change-Id: Id381895535d708b627f4746687b4d12e16560639 Merged-In: Id381895535d708b627f4746687b4d12e16560639 (cherry picked from commit 7a655bb3d4752e0c373ad3fdbcf4508eb7050afc) --- .../utils/include/nnapi/hal/1.0/Callbacks.h | 3 + .../1.0/utils/include/nnapi/hal/1.0/Device.h | 4 ++ .../include/nnapi/hal/1.0/PreparedModel.h | 4 ++ neuralnetworks/1.0/utils/src/Callbacks.cpp | 3 + neuralnetworks/1.0/utils/src/Device.cpp | 3 + .../1.0/utils/src/PreparedModel.cpp | 3 + .../1.1/utils/include/nnapi/hal/1.1/Device.h | 4 ++ neuralnetworks/1.1/utils/src/Device.cpp | 3 + .../utils/include/nnapi/hal/1.2/Callbacks.h | 3 + .../1.2/utils/include/nnapi/hal/1.2/Device.h | 4 ++ .../include/nnapi/hal/1.2/PreparedModel.h | 4 ++ neuralnetworks/1.2/utils/src/Callbacks.cpp | 3 + neuralnetworks/1.2/utils/src/Device.cpp | 3 + .../1.2/utils/src/PreparedModel.cpp | 3 + .../1.3/utils/include/nnapi/hal/1.3/Buffer.h | 4 ++ .../utils/include/nnapi/hal/1.3/Callbacks.h | 3 + .../1.3/utils/include/nnapi/hal/1.3/Device.h | 4 ++ .../include/nnapi/hal/1.3/PreparedModel.h | 4 ++ neuralnetworks/1.3/utils/src/Buffer.cpp | 3 + neuralnetworks/1.3/utils/src/Callbacks.cpp | 3 + neuralnetworks/1.3/utils/src/Device.cpp | 3 + .../1.3/utils/src/PreparedModel.cpp | 3 + neuralnetworks/utils/README.md | 57 +++++++++++++++++-- .../include/nnapi/hal/ProtectCallback.h | 3 + 24 files changed, 129 insertions(+), 5 deletions(-) diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h index 65b75e5d82..2e00fcecf3 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -27,6 +27,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { class PreparedModelCallback final : public IPreparedModelCallback, diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h index ee103bacf5..db3b2ad44f 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h @@ -32,8 +32,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { +// Class that adapts V1_0::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h index 198cbc8e81..2de182871d 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h @@ -29,8 +29,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { +// Class that adapts V1_0::IPreparedModel to nn::IPreparedModel. class PreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp index b1259c3c56..a0bdb3cd99 100644 --- a/neuralnetworks/1.0/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -32,6 +32,9 @@ #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { namespace { diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 285c515c20..83e0015689 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -38,6 +38,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { namespace { diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index add827567e..c1dd1d9e70 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -34,6 +34,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult> PreparedModel::create( diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h index c1e95fe1a5..5e224b5018 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h @@ -32,8 +32,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_1::utils { +// Class that adapts V1_1::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index f73d3f8253..b57c7f4c54 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -39,6 +39,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_1::utils { namespace { diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h index bc7d92ac83..1162bc33b3 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -31,6 +31,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { class PreparedModelCallback final : public IPreparedModelCallback, diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h index a68830d86e..79c3b041ad 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -32,6 +32,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { nn::GeneralResult initVersionString(V1_2::IDevice* device); @@ -40,6 +43,7 @@ nn::GeneralResult> initExtensions(V1_2::IDevice* devi nn::GeneralResult> initNumberOfCacheFilesNeeded( V1_2::IDevice* device); +// Class that adapts V1_2::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 53bd4d12ef..8ed5ca7f97 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -30,8 +30,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { +// Class that adapts V1_2::IPreparedModel to nn::IPreparedModel. class PreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp index 39f88c2c5e..ab3e0ca879 100644 --- a/neuralnetworks/1.2/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -36,6 +36,9 @@ #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { namespace { diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index 0061065f0b..6cca841aba 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -41,6 +41,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { namespace { diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index 32c2651950..b422cedefa 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -37,6 +37,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { namespace { diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h index 637179de33..fda79c88c1 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h @@ -24,8 +24,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Class that adapts V1_3::IBuffer to nn::IBuffer. class Buffer final : public nn::IBuffer { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h index d46b111701..cb2a56a2e2 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -34,6 +34,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { class PreparedModelCallback final : public IPreparedModelCallback, diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h index 0f5234bd26..84f606a357 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h @@ -32,8 +32,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Class that adapts V1_3::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h index 09360eceb8..c4ba483463 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -29,8 +29,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Class that adapts V1_3::IPreparedModel to nn::IPreparedModel. class PreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp index ffdeccdf62..4ef54a2c93 100644 --- a/neuralnetworks/1.3/utils/src/Buffer.cpp +++ b/neuralnetworks/1.3/utils/src/Buffer.cpp @@ -33,6 +33,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes. + namespace android::hardware::neuralnetworks::V1_3::utils { nn::GeneralResult> Buffer::create( diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp index e3c6074549..17c20fba68 100644 --- a/neuralnetworks/1.3/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -39,6 +39,9 @@ #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { namespace { diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index 82837bac73..60564985de 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -47,6 +47,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { namespace { diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index 124a8db263..0bae95de87 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -39,6 +39,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { namespace { diff --git a/neuralnetworks/utils/README.md b/neuralnetworks/utils/README.md index 0dee103811..45ca0b442f 100644 --- a/neuralnetworks/utils/README.md +++ b/neuralnetworks/utils/README.md @@ -1,11 +1,11 @@ # NNAPI Conversions `convert` fails if either the source type or the destination type is invalid, and it yields a valid -object if the conversion succeeds. For example, let's say that an enumeration in the current -version has fewer possible values than the "same" canonical enumeration, such as `OperationType`. -The new value of `HARD_SWISH` (introduced in Android R / NN HAL 1.3) does not map to any valid -existing value in `OperationType`, but an older value of `ADD` (introduced in Android OC-MR1 / NN -HAL 1.0) is valid. This can be seen in the following model conversions: +object if the conversion succeeds. For example, let's say that an enumeration in the current version +has fewer possible values than the "same" canonical enumeration, such as `OperationType`. The new +value of `HARD_SWISH` (introduced in Android R / NN HAL 1.3) does not map to any valid existing +value in `OperationType`, but an older value of `ADD` (introduced in Android OC-MR1 / NN HAL 1.0) is +valid. This can be seen in the following model conversions: ```cpp // Unsuccessful conversion @@ -48,3 +48,50 @@ The `convert` functions operate only on types that used in a HIDL method call di `unvalidatedConvert` functions operate on types that are either used in a HIDL method call directly (i.e., not as a nested class) or used in a subsequent version of the NN HAL. Prefer using `convert` over `unvalidatedConvert`. + +# HIDL Interface Lifetimes across Processes + +Some notes about HIDL interface objects and lifetimes across processes: + +All HIDL interface objects inherit from `IBase`, which itself inherits from `::android::RefBase`. As +such, all HIDL interface objects are reference counted and must be owned through `::android::sp` (or +referenced through `::android::wp`). Allocating `RefBase` objects on the stack will log errors and +may result in crashes, and deleting a `RefBase` object through another means (e.g., "delete", +"free", or RAII-cleanup through `std::unique_ptr` or some equivalent) will result in double-free +and/or use-after-free undefined behavior. + +HIDL/Binder manages the reference count of HIDL interface objects automatically across processes. If +a process that references (but did not create) the HIDL interface object dies, HIDL/Binder ensures +any reference count it held is properly released. (Caveat: it might be possible that HIDL/Binder +behave strangely with `::android::wp` references.) + +If the process which created the HIDL interface object dies, any call on this object from another +process will result in a HIDL transport error with the code `DEAD_OBJECT`. + +# Protecting Asynchronous Calls across HIDL + +Some notes about asynchronous calls across HIDL: + +For synchronous calls across HIDL, if an error occurs after the function was called but before it +returns, HIDL will return a transport error. For example, if the message cannot be delivered to the +server process or if the server process dies before returning a result, HIDL will return from the +function with the appropriate transport error in the `Return<>` object, which can be queried with +`Return<>::isOk()`, `Return<>::isDeadObject()`, `Return<>::description()`, etc. + +However, HIDL offers no such error management in the case of asynchronous calls. By default, if the +client launches an asynchronous task and the server fails to return a result through the callback, +the client will be left waiting indefinitely for a result it will never receive. + +In the NNAPI, `IDevice::prepareModel*` and `IPreparedModel::execute*` (but not +`IPreparedModel::executeSynchronously*`) are asynchronous calls across HIDL. Specifically, these +asynchronous functions are called with a HIDL interface callback object (`IPrepareModelCallback` for +`IDevice::prepareModel*` and `IExecutionCallback` for `IPreparedModel::execute*`) and are expected +to quickly return, and the results are returned at a later time through these callback objects. + +To protect against the case when the server dies after the asynchronous task was called successfully +but before the results could be returned, HIDL provides an object called a "`hidl_death_recipient`," +which can be used to detect when an interface object (and more generally, the server process) has +died. nnapi/hal/ProtectCallback.h's `DeathHandler` uses `hidl_death_recipient`s to detect when the +driver process has died, and `DeathHandler` will unblock any thread waiting on the results of an +`IProtectedCallback` callback object that may otherwise not be signaled. In order for this to work, +the `IProtectedCallback` object must have been registered via `DeathHandler::protectCallback()`. diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h index 85bd6137ee..c9218857ac 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h @@ -28,6 +28,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::utils { class IProtectedCallback { From 7fd03c265edbfd345fb70c6ae76fa93a81a26741 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Sun, 6 Dec 2020 21:50:59 -0800 Subject: [PATCH 3/4] Cleanup NN callback error handling This CL introduces a new templated class CallbackValue to handle HIDL "return value" callbacks in a terser and more readable way. This CL also introduces a new macro HANDLE_HAL_STATUS to return from the current function when an error is present with the ability to append a more descriptive error message. Finally, this CL changes the behavior of synchronous executions. Prior to this CL, IPreparedModel fell back to an asynchronous execution if the synchronous execution was allowed and failed. This change instead returns a failure if synchronous execution is allowed and fails. Bug: 173084343 Test: mma Change-Id: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707 Merged-In: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707 (cherry picked from commit 98ed9baf5de85599847b2b2f53585243c3b7b776) --- .../utils/include/nnapi/hal/1.0/Callbacks.h | 25 ++- neuralnetworks/1.0/utils/src/Callbacks.cpp | 50 +++--- neuralnetworks/1.0/utils/src/Device.cpp | 48 ++---- .../1.0/utils/src/PreparedModel.cpp | 8 +- .../utils/include/nnapi/hal/1.1/Conversions.h | 4 + neuralnetworks/1.1/utils/src/Conversions.cpp | 12 ++ neuralnetworks/1.1/utils/src/Device.cpp | 48 ++---- .../utils/include/nnapi/hal/1.2/Callbacks.h | 18 +- .../utils/include/nnapi/hal/1.2/Conversions.h | 6 + .../1.2/utils/include/nnapi/hal/1.2/Device.h | 19 +- .../include/nnapi/hal/1.2/PreparedModel.h | 7 +- .../1.2/utils/include/nnapi/hal/1.2/Utils.h | 2 + neuralnetworks/1.2/utils/src/Callbacks.cpp | 78 +++------ neuralnetworks/1.2/utils/src/Conversions.cpp | 18 ++ neuralnetworks/1.2/utils/src/Device.cpp | 163 +++++++----------- .../1.2/utils/src/PreparedModel.cpp | 78 +++------ .../utils/include/nnapi/hal/1.3/Callbacks.h | 25 ++- .../utils/include/nnapi/hal/1.3/Conversions.h | 11 ++ .../include/nnapi/hal/1.3/PreparedModel.h | 7 +- neuralnetworks/1.3/utils/src/Buffer.cpp | 16 +- neuralnetworks/1.3/utils/src/Callbacks.cpp | 108 ++++-------- neuralnetworks/1.3/utils/src/Conversions.cpp | 34 ++++ neuralnetworks/1.3/utils/src/Device.cpp | 100 ++++------- .../1.3/utils/src/PreparedModel.cpp | 121 ++++--------- .../common/include/nnapi/hal/CommonUtils.h | 11 +- .../common/include/nnapi/hal/HandleError.h | 7 + .../common/include/nnapi/hal/TransferValue.h | 62 ++++++- 27 files changed, 508 insertions(+), 578 deletions(-) diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h index 2e00fcecf3..3b32e1dbf9 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -32,6 +32,26 @@ namespace android::hardware::neuralnetworks::V1_0::utils { +// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, +// this function returns with the supported operations as indicated by a driver. On failure, this +// function returns with the appropriate nn::GeneralError. +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations); + +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate +// nn::GeneralError. +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with an empty output shape vector and no timing information. On failure, this function +// returns with the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status); + +// A HIDL callback class to receive the results of IDevice::prepareModel asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -44,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -60,8 +79,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp index a0bdb3cd99..ea3ea56de6 100644 --- a/neuralnetworks/1.0/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -36,63 +37,52 @@ // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { -namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations) { + HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + return supportedOperations; } -} // namespace +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + return {}; +} Return PreparedModelCallback::notify(ErrorStatus status, const sp& preparedModel) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(ErrorStatus status) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(executionCallback(status)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 83e0015689..93bd81a19c 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -44,24 +45,21 @@ namespace android::hardware::neuralnetworks::V1_0::utils { namespace { -nn::GeneralResult initCapabilities(V1_0::IDevice* device) { +nn::GeneralResult capabilitiesCallback(ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult getCapabilitiesFrom(V1_0::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace @@ -77,7 +75,7 @@ nn::GeneralResult> Device::create(std::string name << "V1_0::utils::Device::create must have non-null device"; } - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared(PrivateConstructorTag{}, std::move(name), @@ -134,27 +132,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -173,10 +156,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel(hidlModel, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index c1dd1d9e70..c0c22fbd6a 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -42,8 +42,7 @@ namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult> PreparedModel::create( sp preparedModel) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); @@ -71,10 +70,7 @@ nn::ExecutionResult, nn::Timing>> Prepare const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "execute failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); auto result = NN_TRY(cb->get()); NN_TRY(hal::utils::makeExecutionFailure( diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h index f64646257f..5d0769f14c 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h @@ -51,6 +51,10 @@ nn::GeneralResult convert(const nn::Capabilities& capabilities); nn::GeneralResult convert(const nn::Model& model); nn::GeneralResult convert(const nn::ExecutionPreference& executionPreference); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); + } // namespace android::hardware::neuralnetworks::V1_1::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp index 359f68ad4d..b47f25a68c 100644 --- a/neuralnetworks/1.1/utils/src/Conversions.cpp +++ b/neuralnetworks/1.1/utils/src/Conversions.cpp @@ -275,4 +275,16 @@ nn::GeneralResult convert(const nn::ExecutionPreference& ex return validatedConvert(executionPreference); } +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus) { + return V1_0::utils::convert(deviceStatus); +} + +nn::GeneralResult convert(const nn::Request& request) { + return V1_0::utils::convert(request); +} + +nn::GeneralResult convert(const nn::ErrorStatus& status) { + return V1_0::utils::convert(status); +} + } // namespace android::hardware::neuralnetworks::V1_1::utils diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index b57c7f4c54..3197ef4ac3 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -45,24 +45,21 @@ namespace android::hardware::neuralnetworks::V1_1::utils { namespace { -nn::GeneralResult initCapabilities(V1_1::IDevice* device) { +nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult getCapabilitiesFrom(V1_1::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_1(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace @@ -78,7 +75,7 @@ nn::GeneralResult> Device::create(std::string name << "V1_1::utils::Device::create must have non-null device"; } - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared(PrivateConstructorTag{}, std::move(name), @@ -135,28 +132,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](V1_0::ErrorStatus status, - const hidl_vec& supportedOperations) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations_1_1 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations_1_1 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -176,10 +157,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h index 1162bc33b3..ba3c1ba1db 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -36,6 +36,19 @@ namespace android::hardware::neuralnetworks::V1_2::utils { +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_Q. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult prepareModelCallback( + V1_0::ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with the output shapes and the timing information. On failure, this function returns with +// the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing); + +// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -51,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute_1_2 asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -69,8 +81,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h index 5dcbc0bb79..6fd13379ef 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h @@ -97,6 +97,12 @@ nn::GeneralResult> convert(const std::vector& nn::GeneralResult> convert(const std::vector& handles); nn::GeneralResult> convert(const std::vector& outputShapes); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference); + } // namespace android::hardware::neuralnetworks::V1_2::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_CONVERSIONS_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h index 79c3b041ad..b4bef5ee0a 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -37,10 +37,21 @@ namespace android::hardware::neuralnetworks::V1_2::utils { -nn::GeneralResult initVersionString(V1_2::IDevice* device); -nn::GeneralResult initDeviceType(V1_2::IDevice* device); -nn::GeneralResult> initExtensions(V1_2::IDevice* device); -nn::GeneralResult> initNumberOfCacheFilesNeeded( +// Retrieves the version string from the provided device object. On failure, this function returns +// with the appropriate nn::GeneralError. +nn::GeneralResult getVersionStringFrom(V1_2::IDevice* device); + +// Retrieves the device type from the provided device object. On failure, this function returns with +// the appropriate nn::GeneralError. +nn::GeneralResult getDeviceTypeFrom(V1_2::IDevice* device); + +// Retrieves the extensions supported by the provided device object. On failure, this function +// returns with the appropriate nn::GeneralError. +nn::GeneralResult> getSupportedExtensionsFrom(V1_2::IDevice* device); + +// Retrieves the number of model cache files and data cache files needed by the provided device +// object. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult> getNumberOfCacheFilesNeededFrom( V1_2::IDevice* device); // Class that adapts V1_2::IDevice to nn::IDevice. diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 8ed5ca7f97..6a56a82f99 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -41,10 +41,10 @@ class PreparedModel final : public nn::IPreparedModel { public: static nn::GeneralResult> create( - sp preparedModel); + sp preparedModel, bool executeSynchronously); - PreparedModel(PrivateConstructorTag tag, sp preparedModel, - hal::utils::DeathHandler deathHandler); + PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler); nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, @@ -65,6 +65,7 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> executeAsynchronously( const V1_0::Request& request, MeasureTiming measure) const; + const bool kExecuteSynchronously; const sp kPreparedModel; const hal::utils::DeathHandler kDeathHandler; }; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h index 70149a2d3a..c289fc89ab 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h @@ -30,6 +30,8 @@ namespace android::hardware::neuralnetworks::V1_2::utils { +using CacheToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; + constexpr auto kDefaultMesaureTiming = MeasureTiming::NO; constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits::max(), .timeInDriver = std::numeric_limits::max()}; diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp index ab3e0ca879..fefa122101 100644 --- a/neuralnetworks/1.2/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -42,104 +43,73 @@ namespace android::hardware::neuralnetworks::V1_2::utils { namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); -} - nn::GeneralResult, nn::Timing>> convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); } -nn::ExecutionResult, nn::Timing>> -convertExecutionGeneralResults(const hidl_vec& outputShapes, const Timing& timing) { +} // namespace + +nn::GeneralResult prepareModelCallback( + V1_0::ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { + if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + auto canonicalOutputShapes = + nn::convert(outputShapes).value_or(std::vector{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << toString(status); + } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return hal::utils::makeExecutionFailure( convertExecutionGeneralResultsHelper(outputShapes, timing)); } -} // namespace - Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_0::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(V1_0::ErrorStatus status) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(V1_0::utils::executionCallback(status)); return Void(); } Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(executionCallback(status, outputShapes, timing)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index 3790d1f61e..062f6f712f 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -622,4 +623,21 @@ nn::GeneralResult> convert(const std::vector convert(const nn::DeviceStatus& deviceStatus) { + return V1_1::utils::convert(deviceStatus); +} + +nn::GeneralResult convert(const nn::Request& request) { + return V1_1::utils::convert(request); +} + +nn::GeneralResult convert(const nn::ErrorStatus& status) { + return V1_1::utils::convert(status); +} + +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference) { + return V1_1::utils::convert(executionPreference); +} + } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index 6cca841aba..9fe0de25b3 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -47,109 +47,102 @@ namespace android::hardware::neuralnetworks::V1_2::utils { namespace { -nn::GeneralResult initCapabilities(V1_2::IDevice* device) { +nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult versionStringCallback(V1_0::ErrorStatus status, + const hidl_string& versionString) { + HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status); + return versionString; +} + +nn::GeneralResult deviceTypeCallback(V1_0::ErrorStatus status, + DeviceType deviceType) { + HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status); + return nn::convert(deviceType); +} + +nn::GeneralResult> supportedExtensionsCallback( + V1_0::ErrorStatus status, const hidl_vec& extensions) { + HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status); + return nn::convert(extensions); +} + +nn::GeneralResult> numberOfCacheFilesNeededCallback( + V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status); + if (numModelCache > nn::kMaxNumberOfCacheFiles) { + return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater " + "than allowed max (" + << numModelCache << " vs " << nn::kMaxNumberOfCacheFiles << ")"; + } + if (numDataCache > nn::kMaxNumberOfCacheFiles) { + return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numDataCache files greater " + "than allowed max (" + << numDataCache << " vs " << nn::kMaxNumberOfCacheFiles << ")"; + } + return std::make_pair(numModelCache, numDataCache); +} + +nn::GeneralResult getCapabilitiesFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_2(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace -nn::GeneralResult initVersionString(V1_2::IDevice* device) { +nn::GeneralResult getVersionStringFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status); - } else { - result = versionString; - } - }; + auto cb = hal::utils::CallbackValue(versionStringCallback); const auto ret = device->getVersionString(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult initDeviceType(V1_2::IDevice* device) { +nn::GeneralResult getDeviceTypeFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status); - } else { - result = nn::convert(deviceType); - } - }; + auto cb = hal::utils::CallbackValue(deviceTypeCallback); const auto ret = device->getType(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult> initExtensions(V1_2::IDevice* device) { +nn::GeneralResult> getSupportedExtensionsFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& extensions) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status); - } else { - result = nn::convert(extensions); - } - }; + auto cb = hal::utils::CallbackValue(supportedExtensionsCallback); const auto ret = device->getSupportedExtensions(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult> initNumberOfCacheFilesNeeded( +nn::GeneralResult> getNumberOfCacheFilesNeededFrom( V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache, - uint32_t numDataCache) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getNumberOfCacheFilesNeeded failed with " << toString(status); - } else { - result = std::make_pair(numModelCache, numDataCache); - } - }; + auto cb = hal::utils::CallbackValue(numberOfCacheFilesNeededCallback); const auto ret = device->getNumberOfCacheFilesNeeded(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult> Device::create(std::string name, @@ -163,11 +156,11 @@ nn::GeneralResult> Device::create(std::string name << "V1_2::utils::Device::create must have non-null device"; } - auto versionString = NN_TRY(initVersionString(device.get())); - const auto deviceType = NN_TRY(initDeviceType(device.get())); - auto extensions = NN_TRY(initExtensions(device.get())); - auto capabilities = NN_TRY(initCapabilities(device.get())); - const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get())); + auto versionString = NN_TRY(getVersionStringFrom(device.get())); + const auto deviceType = NN_TRY(getDeviceTypeFrom(device.get())); + auto extensions = NN_TRY(getSupportedExtensionsFrom(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); + const auto numberOfCacheFilesNeeded = NN_TRY(getNumberOfCacheFilesNeededFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared( @@ -232,28 +225,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](V1_0::ErrorStatus status, - const hidl_vec& supportedOperations) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations_1_2 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations_1_2 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -266,10 +243,10 @@ nn::GeneralResult Device::prepareModel( NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); - const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPreference = NN_TRY(convert(preference)); const auto hidlModelCache = NN_TRY(convert(modelCache)); const auto hidlDataCache = NN_TRY(convert(dataCache)); - const auto hidlToken = token; + const auto hidlToken = CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -277,10 +254,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -290,17 +264,14 @@ nn::GeneralResult Device::prepareModelFromCache( const std::vector& dataCache, const nn::CacheToken& token) const { const auto hidlModelCache = NN_TRY(convert(modelCache)); const auto hidlDataCache = NN_TRY(convert(dataCache)); - const auto hidlToken = token; + const auto hidlToken = CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index b422cedefa..6d00082a5f 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -41,54 +41,33 @@ // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_2::utils { -namespace { - -nn::GeneralResult, nn::Timing>> -convertExecutionResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { - return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); -} - -nn::ExecutionResult, nn::Timing>> convertExecutionResults( - const hidl_vec& outputShapes, const Timing& timing) { - return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); -} - -} // namespace nn::GeneralResult> PreparedModel::create( - sp preparedModel) { + sp preparedModel, bool executeSynchronously) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); - return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), - std::move(deathHandler)); + return std::make_shared(PrivateConstructorTag{}, executeSynchronously, + std::move(preparedModel), std::move(deathHandler)); } -PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler) - : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + : kExecuteSynchronously(executeSynchronously), + kPreparedModel(std::move(preparedModel)), + kDeathHandler(std::move(deathHandler)) {} nn::ExecutionResult, nn::Timing>> PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const { - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& outputShapes, - const Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); - } else { - result = convertExecutionResults(outputShapes, timing); - } - }; + auto cb = hal::utils::CallbackValue(executionCallback); const auto ret = kPreparedModel->executeSynchronously(request, measure, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::ExecutionResult, nn::Timing>> @@ -98,9 +77,8 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming const auto ret = kPreparedModel->execute_1_2(request, measure, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "execute failed with " << toString(status); + if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); } return cb->get(); @@ -115,31 +93,17 @@ nn::ExecutionResult, nn::Timing>> Prepare const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); - const auto hidlRequest = - NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared))); + const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const bool preferSynchronous = true; + auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure) + : executeAsynchronously(hidlRequest, hidlMeasure); + auto [outputShapes, timing] = NN_TRY(std::move(result)); - // Execute synchronously if allowed. - if (preferSynchronous) { - result = executeSynchronously(hidlRequest, hidlMeasure); - } + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - // Run asymchronous execution if execution has not already completed. - if (!result.has_value()) { - result = executeAsynchronously(hidlRequest, hidlMeasure); - } - - // Flush output buffers if suxcessful execution. - if (result.has_value()) { - NN_TRY(hal::utils::makeExecutionFailure( - hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - } - - return result; + return std::make_pair(std::move(outputShapes), timing); } nn::GeneralResult> @@ -154,7 +118,7 @@ PreparedModel::executeFenced(const nn::Request& /*request*/, } std::any PreparedModel::getUnderlyingResource() const { - sp resource = kPreparedModel; + sp resource = kPreparedModel; return resource; } diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h index cb2a56a2e2..643172e192 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -39,6 +39,26 @@ namespace android::hardware::neuralnetworks::V1_3::utils { +// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, +// this function returns with the supported operations as indicated by a driver. On failure, this +// function returns with the appropriate nn::GeneralError. +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations); + +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_R. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with the output shapes and the timing information. On failure, this function returns with +// the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing); + +// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -55,11 +75,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute_1_3 asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -76,8 +95,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h index 477bb7b6e0..74a6534aff 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h @@ -103,6 +103,17 @@ nn::GeneralResult convert(const nn::SharedHandle& handle); nn::GeneralResult convert(const nn::Memory& memory); nn::GeneralResult> convert(const std::vector& bufferRoles); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference); +nn::GeneralResult> convert(const std::vector& extensions); +nn::GeneralResult> convert(const std::vector& handles); +nn::GeneralResult> convert( + const std::vector& outputShapes); +nn::GeneralResult convert(const nn::DeviceType& deviceType); +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming); +nn::GeneralResult convert(const nn::Timing& timing); + } // namespace android::hardware::neuralnetworks::V1_3::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h index c4ba483463..664d87a7c2 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -40,10 +40,10 @@ class PreparedModel final : public nn::IPreparedModel { public: static nn::GeneralResult> create( - sp preparedModel); + sp preparedModel, bool executeSynchronously); - PreparedModel(PrivateConstructorTag tag, sp preparedModel, - hal::utils::DeathHandler deathHandler); + PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler); nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, @@ -66,6 +66,7 @@ class PreparedModel final : public nn::IPreparedModel { const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalTimeoutDuration& loopTimeoutDuration) const; + const bool kExecuteSynchronously; const sp kPreparedModel; const hal::utils::DeathHandler kDeathHandler; }; diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp index 4ef54a2c93..614033e268 100644 --- a/neuralnetworks/1.3/utils/src/Buffer.cpp +++ b/neuralnetworks/1.3/utils/src/Buffer.cpp @@ -41,12 +41,10 @@ namespace android::hardware::neuralnetworks::V1_3::utils { nn::GeneralResult> Buffer::create( sp buffer, nn::Request::MemoryDomainToken token) { if (buffer == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::Buffer::create must have non-null buffer"; + return NN_ERROR() << "V1_3::utils::Buffer::create must have non-null buffer"; } if (token == static_cast(0)) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::Buffer::create must have non-zero token"; + return NN_ERROR() << "V1_3::utils::Buffer::create must have non-zero token"; } return std::make_shared(PrivateConstructorTag{}, std::move(buffer), token); @@ -68,10 +66,7 @@ nn::GeneralResult Buffer::copyTo(const nn::Memory& dst) const { const auto ret = kBuffer->copyTo(hidlDst); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status); return {}; } @@ -83,10 +78,7 @@ nn::GeneralResult Buffer::copyFrom(const nn::Memory& src, const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status); return {}; } diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp index 17c20fba68..af76e6a87e 100644 --- a/neuralnetworks/1.3/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -45,136 +46,93 @@ namespace android::hardware::neuralnetworks::V1_3::utils { namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); -} - nn::GeneralResult, nn::Timing>> convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, const V1_2::Timing& timing) { return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); } -nn::ExecutionResult, nn::Timing>> -convertExecutionGeneralResults(const hidl_vec& outputShapes, - const V1_2::Timing& timing) { +} // namespace + +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations) { + HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + return supportedOperations; +} + +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + auto canonicalOutputShapes = + nn::convert(outputShapes).value_or(std::vector{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << toString(status); + } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return hal::utils::makeExecutionFailure( convertExecutionGeneralResultsHelper(outputShapes, timing)); } -} // namespace - Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_0::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_2::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_3(ErrorStatus status, const sp& preparedModel) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(V1_0::ErrorStatus status) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(V1_0::utils::executionCallback(status)); return Void(); } Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, const V1_2::Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(V1_2::utils::executionCallback(status, outputShapes, timing)); return Void(); } Return ExecutionCallback::notify_1_3(ErrorStatus status, const hidl_vec& outputShapes, const V1_2::Timing& timing) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(executionCallback(status, outputShapes, timing)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index c89a69f28b..8b7db2b90e 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -685,4 +685,38 @@ nn::GeneralResult> convert(const std::vector convert(const nn::DeviceStatus& deviceStatus) { + return V1_2::utils::convert(deviceStatus); +} + +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference) { + return V1_2::utils::convert(executionPreference); +} + +nn::GeneralResult> convert(const std::vector& extensions) { + return V1_2::utils::convert(extensions); +} + +nn::GeneralResult> convert(const std::vector& handles) { + return V1_2::utils::convert(handles); +} + +nn::GeneralResult> convert( + const std::vector& outputShapes) { + return V1_2::utils::convert(outputShapes); +} + +nn::GeneralResult convert(const nn::DeviceType& deviceType) { + return V1_2::utils::convert(deviceType); +} + +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming) { + return V1_2::utils::convert(measureTiming); +} + +nn::GeneralResult convert(const nn::Timing& timing) { + return V1_2::utils::convert(timing); +} + } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index 60564985de..d710b85070 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -69,29 +70,27 @@ nn::GeneralResult>> convert( return hidlPreparedModels; } -nn::GeneralResult convert( - nn::GeneralResult> result) { - return NN_TRY(std::move(result)); +nn::GeneralResult capabilitiesCallback(ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); } -nn::GeneralResult initCapabilities(V1_3::IDevice* device) { +nn::GeneralResult getCapabilitiesFrom(V1_3::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_3 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_3(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); +} + +nn::GeneralResult allocationCallback(ErrorStatus status, + const sp& buffer, uint32_t token) { + HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status); + return Buffer::create(buffer, static_cast(token)); } } // namespace @@ -107,12 +106,12 @@ nn::GeneralResult> Device::create(std::string name << "V1_3::utils::Device::create must have non-null device"; } - auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get())); - const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get())); - auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get())); - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto versionString = NN_TRY(V1_2::utils::getVersionStringFrom(device.get())); + const auto deviceType = NN_TRY(V1_2::utils::getDeviceTypeFrom(device.get())); + auto extensions = NN_TRY(V1_2::utils::getSupportedExtensionsFrom(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); const auto numberOfCacheFilesNeeded = - NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get())); + NN_TRY(V1_2::utils::getNumberOfCacheFilesNeededFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared( @@ -177,27 +176,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "IDevice::getSupportedOperations_1_3 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "IDevice::getSupportedOperations_1_3 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -210,12 +194,12 @@ nn::GeneralResult Device::prepareModel( NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); - const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPreference = NN_TRY(convert(preference)); const auto hidlPriority = NN_TRY(convert(priority)); const auto hidlDeadline = NN_TRY(convert(deadline)); - const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); - const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); - const auto hidlToken = token; + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = V1_2::utils::CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -224,10 +208,7 @@ nn::GeneralResult Device::prepareModel( kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -236,9 +217,9 @@ nn::GeneralResult Device::prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { const auto hidlDeadline = NN_TRY(convert(deadline)); - const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); - const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); - const auto hidlToken = token; + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = V1_2::utils::CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -246,10 +227,7 @@ nn::GeneralResult Device::prepareModelFromCache( const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } @@ -263,27 +241,13 @@ nn::GeneralResult Device::allocate( const auto hidlInputRoles = NN_TRY(convert(inputRoles)); const auto hidlOutputRoles = NN_TRY(convert(outputRoles)); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result](ErrorStatus status, const sp& buffer, uint32_t token) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status); - } else if (buffer == nullptr) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr"; - } else if (token == 0) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)"; - } else { - result = convert( - Buffer::create(buffer, static_cast(token))); - } - }; + auto cb = hal::utils::CallbackValue(allocationCallback); const auto ret = kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index 0bae95de87..7b4b7bac3b 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -45,25 +45,17 @@ namespace android::hardware::neuralnetworks::V1_3::utils { namespace { -nn::GeneralResult, nn::Timing>> -convertExecutionResultsHelper(const hidl_vec& outputShapes, - const V1_2::Timing& timing) { - return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); -} - -nn::ExecutionResult, nn::Timing>> convertExecutionResults( - const hidl_vec& outputShapes, const V1_2::Timing& timing) { - return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); -} - nn::GeneralResult> convertFencedExecutionCallbackResults( - const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status); return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); } -nn::GeneralResult> -convertExecuteFencedResults(const hidl_handle& syncFence, - const sp& callback) { +nn::GeneralResult> fencedExecutionCallback( + ErrorStatus status, const hidl_handle& syncFence, + const sp& callback) { + HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status); + auto resultSyncFence = nn::SyncFence::createAsSignaled(); if (syncFence.getNativeHandle() != nullptr) { auto sharedHandle = NN_TRY(nn::convert(syncFence)); @@ -78,23 +70,12 @@ convertExecuteFencedResults(const hidl_handle& syncFence, // Create callback which can be used to retrieve the execution error status and timings. nn::ExecuteFencedInfoCallback resultCallback = [callback]() -> nn::GeneralResult> { - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched, - const V1_2::Timing& timingFenced) { - if (status != ErrorStatus::NONE) { - const auto canonical = - nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status); - } else { - result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced); - } - }; + auto cb = hal::utils::CallbackValue(convertFencedExecutionCallbackResults); const auto ret = callback->getExecutionInfo(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); }; return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); @@ -103,42 +84,34 @@ convertExecuteFencedResults(const hidl_handle& syncFence, } // namespace nn::GeneralResult> PreparedModel::create( - sp preparedModel) { + sp preparedModel, bool executeSynchronously) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); - return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), - std::move(deathHandler)); + return std::make_shared(PrivateConstructorTag{}, executeSynchronously, + std::move(preparedModel), std::move(deathHandler)); } -PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler) - : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + : kExecuteSynchronously(executeSynchronously), + kPreparedModel(std::move(preparedModel)), + kDeathHandler(std::move(deathHandler)) {} nn::ExecutionResult, nn::Timing>> PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalTimeoutDuration& loopTimeoutDuration) const { - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const hidl_vec& outputShapes, - const V1_2::Timing& timing) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); - } else { - result = convertExecutionResults(outputShapes, timing); - } - }; + auto cb = hal::utils::CallbackValue(executionCallback); const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline, loopTimeoutDuration, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::ExecutionResult, nn::Timing>> @@ -151,9 +124,8 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming const auto ret = kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status); + if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); } return cb->get(); @@ -169,35 +141,22 @@ nn::ExecutionResult, nn::Timing>> Prepare hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); - const auto hidlMeasure = - NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure))); + const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline))); const auto hidlLoopTimeoutDuration = NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration))); - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const bool preferSynchronous = true; + auto result = kExecuteSynchronously + ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration) + : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration); + auto [outputShapes, timing] = NN_TRY(std::move(result)); - // Execute synchronously if allowed. - if (preferSynchronous) { - result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, - hidlLoopTimeoutDuration); - } + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - // Run asymchronous execution if execution has not already completed. - if (!result.has_value()) { - result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, - hidlLoopTimeoutDuration); - } - - // Flush output buffers if suxcessful execution. - if (result.has_value()) { - NN_TRY(hal::utils::makeExecutionFailure( - hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - } - - return result; + return std::make_pair(std::move(outputShapes), timing); } nn::GeneralResult> @@ -212,28 +171,18 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence, - const sp& callback) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status); - } else { - result = convertExecuteFencedResults(syncFence, callback); - } - }; + auto cb = hal::utils::CallbackValue(fencedExecutionCallback); const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration, hidlTimeoutDurationAfterFence, cb); HANDLE_TRANSPORT_FAILURE(ret); - auto [syncFence, callback] = NN_TRY(std::move(result)); + auto [syncFence, callback] = NN_TRY(cb.take()); // If executeFenced required the request memory to be moved into shared memory, block here until // the fenced execution has completed and flush the memory back. diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h index 43bb0c677a..b3989e5878 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h @@ -44,9 +44,18 @@ nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWit bool hasNoPointerData(const nn::Model& model); bool hasNoPointerData(const nn::Request& request); -// Relocate pointer-based data to shared memory. +// Relocate pointer-based data to shared memory. If `model` has no Operand::LifeTime::POINTER data, +// the function returns with a reference to `model`. If `model` has Operand::LifeTime::POINTER data, +// the model is copied to `maybeModelInSharedOut` with the POINTER data relocated to a memory pool, +// and the function returns with a reference to `*maybeModelInSharedOut`. nn::GeneralResult> flushDataFromPointerToShared( const nn::Model* model, std::optional* maybeModelInSharedOut); + +// Relocate pointer-based data to shared memory. If `request` has no +// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If +// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to +// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function +// returns with a reference to `*maybeRequestInSharedOut`. nn::GeneralResult> flushDataFromPointerToShared( const nn::Request* request, std::optional* maybeRequestInSharedOut); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h index 78b2a12918..95a20a8f80 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h @@ -79,4 +79,11 @@ nn::ExecutionResult makeExecutionFailure(nn::Result result, nn::Erro return makeExecutionFailure(makeGeneralFailure(result, status)); } +#define HANDLE_HAL_STATUS(status) \ + if (const auto canonical = ::android::nn::convert(status).value_or( \ + ::android::nn::ErrorStatus::GENERAL_FAILURE); \ + canonical == ::android::nn::ErrorStatus::NONE) { \ + } else \ + return NN_ERROR(canonical) + } // namespace android::hardware::neuralnetworks::utils \ No newline at end of file diff --git a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h index 7103c6b375..6679afefec 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h @@ -17,19 +17,60 @@ #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H +#include #include #include +#include #include #include +#include namespace android::hardware::neuralnetworks::utils { -// This class is thread safe. +// This class adapts a function pointer and offers two affordances: +// 1) This class object can be used to generate a callback (via the implicit conversion operator) +// that can be used to send the result to `CallbackValue` when called. +// 2) This class object can be used to retrieve the result of the callback with `take`. +// +// This class is thread compatible. +template +class CallbackValue final { + public: + using FunctionType = std::add_pointer_t; + using CallbackType = std::function; + + explicit CallbackValue(FunctionType fn); + + // Creates a callback that forwards its arguments to `mFunction` and stores the result in + // `mReturnValue`. + /*implicit*/ operator CallbackType(); // NOLINT(google-explicit-constructor) + + // Take the result of calling `mFunction`. + // Precondition: mReturnValue.has_value() + // Postcondition: !mReturnValue.has_value() + [[nodiscard]] ReturnType take(); + + private: + std::optional mReturnValue; + FunctionType mFunction; +}; + +// Deduction guidelines for CallbackValue when constructed with a function pointer. +template +CallbackValue(ReturnType (*)(ArgTypes...))->CallbackValue; + +// Thread-safe container to pass a value between threads. template class TransferValue final { public: + // Put the value in `TransferValue`. If `TransferValue` already has a value, this function is a + // no-op. void put(Type object) const; + + // Take the value stored in `TransferValue`. If no value is available, this function will block + // until the value becomes available. + // Postcondition: !mObject.has_value() [[nodiscard]] Type take() const; private: @@ -38,7 +79,23 @@ class TransferValue final { mutable std::optional mObject GUARDED_BY(mMutex); }; -// template implementation +// template implementations + +template +CallbackValue::CallbackValue(FunctionType fn) : mFunction(fn) {} + +template +CallbackValue::operator CallbackType() { + return [this](ArgTypes... args) { mReturnValue = mFunction(args...); }; +} + +template +ReturnType CallbackValue::take() { + CHECK(mReturnValue.has_value()); + std::optional object; + std::swap(object, mReturnValue); + return std::move(object).value(); +} template void TransferValue::put(Type object) const { @@ -56,6 +113,7 @@ Type TransferValue::take() const { std::unique_lock lock(mMutex); base::ScopedLockAssertion lockAssertion(mMutex); mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); }); + CHECK(mObject.has_value()); std::optional object; std::swap(object, mObject); return std::move(object).value(); From bf59946c61f57fd3a17c7b0f5a7e77588c343394 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Tue, 15 Dec 2020 15:20:26 -0800 Subject: [PATCH 4/4] Remove 'blocking' param from NN ResilientPreparedModel and *Buffer This change removes the 'blocking' parameter for the ResilientPreparedModel::Factory and ResilientBuffer::Factory. The 'blocking' parameter is only useful for ResilientDevice::Factory, which behind the scenes chooses between the HIDL calls IDevice::getService and IDevice::tryGetService. The equivalent calls for IPreparedModel and IBuffer are not used, as both are created from the IDevice object. This change also modifies the ResilientDevice's device recovery behavior. Prior to this change, ResilientDevice's recovery mechanism had the following behavior: * attempt to call a function * if the function did not return a DEAD_OBJECT error, return * if the function returned a DEAD_OBJECT error, attempt to recover the device * whether or not the recovery succeeded, call the function again This CL changes the behavior so that if device recovery fails, ResilientDevice will not call the function the second time. Bug: N/A Test: mma Change-Id: Icf37d05c884c740178324fcd046ea56914ef7d44 Merged-In: Icf37d05c884c740178324fcd046ea56914ef7d44 (cherry picked from commit 11761e37a864761382303a75ab29bb0fc0f716c3) --- .../include/nnapi/hal/ResilientBuffer.h | 2 +- .../include/nnapi/hal/ResilientDevice.h | 15 ++--- .../nnapi/hal/ResilientPreparedModel.h | 2 +- .../utils/common/src/ResilientBuffer.cpp | 2 +- .../utils/common/src/ResilientDevice.cpp | 65 +++++++++---------- .../common/src/ResilientPreparedModel.cpp | 2 +- 6 files changed, 42 insertions(+), 46 deletions(-) diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h index 996ec1ee81..9d5e3e6a05 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h @@ -34,7 +34,7 @@ class ResilientBuffer final : public nn::IBuffer { struct PrivateConstructorTag {}; public: - using Factory = std::function(bool blocking)>; + using Factory = std::function()>; static nn::GeneralResult> create(Factory makeBuffer); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h index 4bfed6cd51..84ae799aad 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h @@ -46,8 +46,8 @@ class ResilientDevice final : public nn::IDevice, nn::Capabilities capabilities, nn::SharedDevice device); nn::SharedDevice getDevice() const EXCLUDES(mMutex); - nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const - EXCLUDES(mMutex); + nn::GeneralResult recover(const nn::IDevice* failingDevice, + bool blocking) const EXCLUDES(mMutex); const std::string& getName() const override; const std::string& getVersionString() const override; @@ -81,17 +81,14 @@ class ResilientDevice final : public nn::IDevice, private: bool isValidInternal() const EXCLUDES(mMutex); nn::GeneralResult prepareModelInternal( - bool blocking, const nn::Model& model, nn::ExecutionPreference preference, - nn::Priority priority, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const; nn::GeneralResult prepareModelFromCacheInternal( - bool blocking, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const; nn::GeneralResult allocateInternal( - bool blocking, const nn::BufferDesc& desc, - const std::vector& preparedModels, + const nn::BufferDesc& desc, const std::vector& preparedModels, const std::vector& inputRoles, const std::vector& outputRoles) const; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h index d86c88be32..faae673ba7 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h @@ -34,7 +34,7 @@ class ResilientPreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; public: - using Factory = std::function(bool blocking)>; + using Factory = std::function()>; static nn::GeneralResult> create( Factory makePreparedModel); diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp index 984295b729..cf5496ac39 100644 --- a/neuralnetworks/utils/common/src/ResilientBuffer.cpp +++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp @@ -36,7 +36,7 @@ nn::GeneralResult> ResilientBuffer::creat return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "utils::ResilientBuffer::create must have non-empty makeBuffer"; } - auto buffer = NN_TRY(makeBuffer(/*blocking=*/true)); + auto buffer = NN_TRY(makeBuffer()); CHECK(buffer != nullptr); return std::make_shared(PrivateConstructorTag{}, std::move(makeBuffer), std::move(buffer)); diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp index 2f83c5c5bd..6ad3fadee6 100644 --- a/neuralnetworks/utils/common/src/ResilientDevice.cpp +++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp @@ -49,7 +49,17 @@ auto protect(const ResilientDevice& resilientDevice, const FnType& fn, bool bloc return result; } - device = resilientDevice.recover(device.get(), blocking); + // Attempt recovery and return if it fails. + auto maybeDevice = resilientDevice.recover(device.get(), blocking); + if (!maybeDevice.has_value()) { + const auto& [resultErrorMessage, resultErrorCode] = result.error(); + const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeDevice.error(); + return nn::error(resultErrorCode) + << resultErrorMessage << ", and failed to recover dead device with error " + << recoveryErrorCode << ": " << recoveryErrorMessage; + } + device = std::move(maybeDevice).value(); + return fn(*device); } @@ -94,7 +104,8 @@ nn::SharedDevice ResilientDevice::getDevice() const { return mDevice; } -nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool blocking) const { +nn::GeneralResult ResilientDevice::recover(const nn::IDevice* failingDevice, + bool blocking) const { std::lock_guard guard(mMutex); // Another caller updated the failing device. @@ -102,13 +113,7 @@ nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool return mDevice; } - auto maybeDevice = kMakeDevice(blocking); - if (!maybeDevice.has_value()) { - const auto& [message, code] = maybeDevice.error(); - LOG(ERROR) << "Failed to recover dead device with error " << code << ": " << message; - return mDevice; - } - auto device = std::move(maybeDevice).value(); + auto device = NN_TRY(kMakeDevice(blocking)); // If recovered device has different metadata than what is cached (i.e., because it was // updated), mark the device as invalid and preserve the cached data. @@ -176,11 +181,11 @@ nn::GeneralResult ResilientDevice::prepareModel( nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { auto self = shared_from_this(); - ResilientPreparedModel::Factory makePreparedModel = - [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache, - token](bool blocking) -> nn::GeneralResult { - return device->prepareModelInternal(blocking, model, preference, priority, deadline, - modelCache, dataCache, token); + ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model, + preference, priority, deadline, modelCache, + dataCache, token] { + return device->prepareModelInternal(model, preference, priority, deadline, modelCache, + dataCache, token); }; return ResilientPreparedModel::create(std::move(makePreparedModel)); } @@ -189,11 +194,9 @@ nn::GeneralResult ResilientDevice::prepareModelFromCach nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { auto self = shared_from_this(); - ResilientPreparedModel::Factory makePreparedModel = - [device = std::move(self), deadline, modelCache, dataCache, - token](bool blocking) -> nn::GeneralResult { - return device->prepareModelFromCacheInternal(blocking, deadline, modelCache, dataCache, - token); + ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), deadline, + modelCache, dataCache, token] { + return device->prepareModelFromCacheInternal(deadline, modelCache, dataCache, token); }; return ResilientPreparedModel::create(std::move(makePreparedModel)); } @@ -203,10 +206,9 @@ nn::GeneralResult ResilientDevice::allocate( const std::vector& inputRoles, const std::vector& outputRoles) const { auto self = shared_from_this(); - ResilientBuffer::Factory makeBuffer = - [device = std::move(self), desc, preparedModels, inputRoles, - outputRoles](bool blocking) -> nn::GeneralResult { - return device->allocateInternal(blocking, desc, preparedModels, inputRoles, outputRoles); + ResilientBuffer::Factory makeBuffer = [device = std::move(self), desc, preparedModels, + inputRoles, outputRoles] { + return device->allocateInternal(desc, preparedModels, inputRoles, outputRoles); }; return ResilientBuffer::create(std::move(makeBuffer)); } @@ -217,9 +219,8 @@ bool ResilientDevice::isValidInternal() const { } nn::GeneralResult ResilientDevice::prepareModelInternal( - bool blocking, const nn::Model& model, nn::ExecutionPreference preference, - nn::Priority priority, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { if (!isValidInternal()) { return std::make_shared(); @@ -229,12 +230,11 @@ nn::GeneralResult ResilientDevice::prepareModelInternal return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache, token); }; - return protect(*this, fn, blocking); + return protect(*this, fn, /*blocking=*/false); } nn::GeneralResult ResilientDevice::prepareModelFromCacheInternal( - bool blocking, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { if (!isValidInternal()) { return std::make_shared(); @@ -242,12 +242,11 @@ nn::GeneralResult ResilientDevice::prepareModelFromCach const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) { return device.prepareModelFromCache(deadline, modelCache, dataCache, token); }; - return protect(*this, fn, blocking); + return protect(*this, fn, /*blocking=*/false); } nn::GeneralResult ResilientDevice::allocateInternal( - bool blocking, const nn::BufferDesc& desc, - const std::vector& preparedModels, + const nn::BufferDesc& desc, const std::vector& preparedModels, const std::vector& inputRoles, const std::vector& outputRoles) const { if (!isValidInternal()) { @@ -256,7 +255,7 @@ nn::GeneralResult ResilientDevice::allocateInternal( const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) { return device.allocate(desc, preparedModels, inputRoles, outputRoles); }; - return protect(*this, fn, blocking); + return protect(*this, fn, /*blocking=*/false); } } // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp index 012a1dedc3..b8acee16c9 100644 --- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp @@ -36,7 +36,7 @@ nn::GeneralResult> ResilientPrepar return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "utils::ResilientPreparedModel::create must have non-empty makePreparedModel"; } - auto preparedModel = NN_TRY(makePreparedModel(/*blocking=*/true)); + auto preparedModel = NN_TRY(makePreparedModel()); CHECK(preparedModel != nullptr); return std::make_shared( PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel));