diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h index 65b75e5d82..3b32e1dbf9 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -27,8 +27,31 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { +// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, +// this function returns with the supported operations as indicated by a driver. On failure, this +// function returns with the appropriate nn::GeneralError. +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations); + +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate +// nn::GeneralError. +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with an empty output shape vector and no timing information. On failure, this function +// returns with the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status); + +// A HIDL callback class to receive the results of IDevice::prepareModel asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -41,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -57,8 +79,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h index ee103bacf5..db3b2ad44f 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h @@ -32,8 +32,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { +// Class that adapts V1_0::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h index 31f366dadc..2de182871d 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h @@ -29,8 +29,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { +// Class that adapts V1_0::IPreparedModel to nn::IPreparedModel. class PreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; @@ -44,13 +48,13 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp index b1259c3c56..ea3ea56de6 100644 --- a/neuralnetworks/1.0/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -27,69 +27,62 @@ #include #include #include +#include #include #include #include -namespace android::hardware::neuralnetworks::V1_0::utils { -namespace { +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); +namespace android::hardware::neuralnetworks::V1_0::utils { + +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations) { + HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + return supportedOperations; } -} // namespace +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + return {}; +} Return PreparedModelCallback::notify(ErrorStatus status, const sp& preparedModel) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(ErrorStatus status) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(executionCallback(status)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 285c515c20..93bd81a19c 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -38,27 +39,27 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { namespace { -nn::GeneralResult initCapabilities(V1_0::IDevice* device) { +nn::GeneralResult capabilitiesCallback(ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult getCapabilitiesFrom(V1_0::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace @@ -74,7 +75,7 @@ nn::GeneralResult> Device::create(std::string name << "V1_0::utils::Device::create must have non-null device"; } - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared(PrivateConstructorTag{}, std::move(name), @@ -131,27 +132,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -170,10 +156,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel(hidlModel, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index 46dd3f8254..c0c22fbd6a 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -34,13 +34,15 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult> PreparedModel::create( sp preparedModel) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); @@ -55,7 +57,7 @@ PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( @@ -68,10 +70,7 @@ nn::ExecutionResult, nn::Timing>> Prepare const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "execute failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); auto result = NN_TRY(cb->get()); NN_TRY(hal::utils::makeExecutionFailure( @@ -81,11 +80,12 @@ nn::ExecutionResult, nn::Timing>> Prepare } nn::GeneralResult> -PreparedModel::executeFenced( - const nn::Request& /*request*/, const std::vector& /*waitFor*/, - nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, - const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { +PreparedModel::executeFenced(const nn::Request& /*request*/, + const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::executeFenced is not supported on 1.0 HAL service"; } diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h index f64646257f..5d0769f14c 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h @@ -51,6 +51,10 @@ nn::GeneralResult convert(const nn::Capabilities& capabilities); nn::GeneralResult convert(const nn::Model& model); nn::GeneralResult convert(const nn::ExecutionPreference& executionPreference); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); + } // namespace android::hardware::neuralnetworks::V1_1::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h index c1e95fe1a5..5e224b5018 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h @@ -32,8 +32,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_1::utils { +// Class that adapts V1_1::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp index 359f68ad4d..b47f25a68c 100644 --- a/neuralnetworks/1.1/utils/src/Conversions.cpp +++ b/neuralnetworks/1.1/utils/src/Conversions.cpp @@ -275,4 +275,16 @@ nn::GeneralResult convert(const nn::ExecutionPreference& ex return validatedConvert(executionPreference); } +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus) { + return V1_0::utils::convert(deviceStatus); +} + +nn::GeneralResult convert(const nn::Request& request) { + return V1_0::utils::convert(request); +} + +nn::GeneralResult convert(const nn::ErrorStatus& status) { + return V1_0::utils::convert(status); +} + } // namespace android::hardware::neuralnetworks::V1_1::utils diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index f73d3f8253..3197ef4ac3 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -39,27 +39,27 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_1::utils { namespace { -nn::GeneralResult initCapabilities(V1_1::IDevice* device) { +nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult getCapabilitiesFrom(V1_1::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_1(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace @@ -75,7 +75,7 @@ nn::GeneralResult> Device::create(std::string name << "V1_1::utils::Device::create must have non-null device"; } - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared(PrivateConstructorTag{}, std::move(name), @@ -132,28 +132,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](V1_0::ErrorStatus status, - const hidl_vec& supportedOperations) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations_1_1 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations_1_1 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -173,10 +157,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h index bc7d92ac83..ba3c1ba1db 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -31,8 +31,24 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_Q. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult prepareModelCallback( + V1_0::ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with the output shapes and the timing information. On failure, this function returns with +// the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing); + +// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -48,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute_1_2 asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -66,8 +81,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h index 5dcbc0bb79..6fd13379ef 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h @@ -97,6 +97,12 @@ nn::GeneralResult> convert(const std::vector& nn::GeneralResult> convert(const std::vector& handles); nn::GeneralResult> convert(const std::vector& outputShapes); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference); + } // namespace android::hardware::neuralnetworks::V1_2::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_CONVERSIONS_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h index a68830d86e..b4bef5ee0a 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -32,14 +32,29 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { -nn::GeneralResult initVersionString(V1_2::IDevice* device); -nn::GeneralResult initDeviceType(V1_2::IDevice* device); -nn::GeneralResult> initExtensions(V1_2::IDevice* device); -nn::GeneralResult> initNumberOfCacheFilesNeeded( +// Retrieves the version string from the provided device object. On failure, this function returns +// with the appropriate nn::GeneralError. +nn::GeneralResult getVersionStringFrom(V1_2::IDevice* device); + +// Retrieves the device type from the provided device object. On failure, this function returns with +// the appropriate nn::GeneralError. +nn::GeneralResult getDeviceTypeFrom(V1_2::IDevice* device); + +// Retrieves the extensions supported by the provided device object. On failure, this function +// returns with the appropriate nn::GeneralError. +nn::GeneralResult> getSupportedExtensionsFrom(V1_2::IDevice* device); + +// Retrieves the number of model cache files and data cache files needed by the provided device +// object. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult> getNumberOfCacheFilesNeededFrom( V1_2::IDevice* device); +// Class that adapts V1_2::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 65e1e8aa3f..6a56a82f99 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -30,28 +30,32 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { +// Class that adapts V1_2::IPreparedModel to nn::IPreparedModel. class PreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; public: static nn::GeneralResult> create( - sp preparedModel); + sp preparedModel, bool executeSynchronously); - PreparedModel(PrivateConstructorTag tag, sp preparedModel, - hal::utils::DeathHandler deathHandler); + PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler); nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; @@ -61,6 +65,7 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> executeAsynchronously( const V1_0::Request& request, MeasureTiming measure) const; + const bool kExecuteSynchronously; const sp kPreparedModel; const hal::utils::DeathHandler kDeathHandler; }; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h index 70149a2d3a..c289fc89ab 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h @@ -30,6 +30,8 @@ namespace android::hardware::neuralnetworks::V1_2::utils { +using CacheToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; + constexpr auto kDefaultMesaureTiming = MeasureTiming::NO; constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits::max(), .timeInDriver = std::numeric_limits::max()}; diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp index 39f88c2c5e..fefa122101 100644 --- a/neuralnetworks/1.2/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -36,107 +37,79 @@ #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); -} - nn::GeneralResult, nn::Timing>> convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); } -nn::ExecutionResult, nn::Timing>> -convertExecutionGeneralResults(const hidl_vec& outputShapes, const Timing& timing) { +} // namespace + +nn::GeneralResult prepareModelCallback( + V1_0::ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { + if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + auto canonicalOutputShapes = + nn::convert(outputShapes).value_or(std::vector{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << toString(status); + } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return hal::utils::makeExecutionFailure( convertExecutionGeneralResultsHelper(outputShapes, timing)); } -} // namespace - Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_0::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(V1_0::ErrorStatus status) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(V1_0::utils::executionCallback(status)); return Void(); } Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(executionCallback(status, outputShapes, timing)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index f11474fd60..062f6f712f 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -43,7 +44,9 @@ constexpr std::underlying_type_t underlyingType(Type value) { return static_cast>(value); } +using HalDuration = std::chrono::duration; constexpr auto kVersion = android::nn::Version::ANDROID_Q; +constexpr uint64_t kNoTiming = std::numeric_limits::max(); } // namespace @@ -270,7 +273,18 @@ GeneralResult unvalidatedConvert(const hal::V1_2::MeasureTiming& } GeneralResult unvalidatedConvert(const hal::V1_2::Timing& timing) { - return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; + constexpr uint64_t kMaxTiming = std::chrono::floor(Duration::max()).count(); + constexpr auto convertTiming = [](uint64_t halTiming) -> OptionalDuration { + if (halTiming == kNoTiming) { + return {}; + } + if (halTiming > kMaxTiming) { + return Duration::max(); + } + return HalDuration{halTiming}; + }; + return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice), + .timeInDriver = convertTiming(timing.timeInDriver)}; } GeneralResult unvalidatedConvert(const hal::V1_2::Extension& extension) { @@ -547,7 +561,14 @@ nn::GeneralResult unvalidatedConvert(const nn::MeasureTiming& mea } nn::GeneralResult unvalidatedConvert(const nn::Timing& timing) { - return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; + constexpr auto convertTiming = [](nn::OptionalDuration canonicalTiming) -> uint64_t { + if (!canonicalTiming.has_value()) { + return kNoTiming; + } + return std::chrono::ceil(*canonicalTiming).count(); + }; + return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice), + .timeInDriver = convertTiming(timing.timeInDriver)}; } nn::GeneralResult unvalidatedConvert(const nn::Extension& extension) { @@ -602,4 +623,21 @@ nn::GeneralResult> convert(const std::vector convert(const nn::DeviceStatus& deviceStatus) { + return V1_1::utils::convert(deviceStatus); +} + +nn::GeneralResult convert(const nn::Request& request) { + return V1_1::utils::convert(request); +} + +nn::GeneralResult convert(const nn::ErrorStatus& status) { + return V1_1::utils::convert(status); +} + +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference) { + return V1_1::utils::convert(executionPreference); +} + } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index 0061065f0b..9fe0de25b3 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -41,112 +41,108 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { namespace { -nn::GeneralResult initCapabilities(V1_2::IDevice* device) { +nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult versionStringCallback(V1_0::ErrorStatus status, + const hidl_string& versionString) { + HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status); + return versionString; +} + +nn::GeneralResult deviceTypeCallback(V1_0::ErrorStatus status, + DeviceType deviceType) { + HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status); + return nn::convert(deviceType); +} + +nn::GeneralResult> supportedExtensionsCallback( + V1_0::ErrorStatus status, const hidl_vec& extensions) { + HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status); + return nn::convert(extensions); +} + +nn::GeneralResult> numberOfCacheFilesNeededCallback( + V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status); + if (numModelCache > nn::kMaxNumberOfCacheFiles) { + return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater " + "than allowed max (" + << numModelCache << " vs " << nn::kMaxNumberOfCacheFiles << ")"; + } + if (numDataCache > nn::kMaxNumberOfCacheFiles) { + return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numDataCache files greater " + "than allowed max (" + << numDataCache << " vs " << nn::kMaxNumberOfCacheFiles << ")"; + } + return std::make_pair(numModelCache, numDataCache); +} + +nn::GeneralResult getCapabilitiesFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_2(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace -nn::GeneralResult initVersionString(V1_2::IDevice* device) { +nn::GeneralResult getVersionStringFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status); - } else { - result = versionString; - } - }; + auto cb = hal::utils::CallbackValue(versionStringCallback); const auto ret = device->getVersionString(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult initDeviceType(V1_2::IDevice* device) { +nn::GeneralResult getDeviceTypeFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status); - } else { - result = nn::convert(deviceType); - } - }; + auto cb = hal::utils::CallbackValue(deviceTypeCallback); const auto ret = device->getType(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult> initExtensions(V1_2::IDevice* device) { +nn::GeneralResult> getSupportedExtensionsFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& extensions) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status); - } else { - result = nn::convert(extensions); - } - }; + auto cb = hal::utils::CallbackValue(supportedExtensionsCallback); const auto ret = device->getSupportedExtensions(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult> initNumberOfCacheFilesNeeded( +nn::GeneralResult> getNumberOfCacheFilesNeededFrom( V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache, - uint32_t numDataCache) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getNumberOfCacheFilesNeeded failed with " << toString(status); - } else { - result = std::make_pair(numModelCache, numDataCache); - } - }; + auto cb = hal::utils::CallbackValue(numberOfCacheFilesNeededCallback); const auto ret = device->getNumberOfCacheFilesNeeded(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult> Device::create(std::string name, @@ -160,11 +156,11 @@ nn::GeneralResult> Device::create(std::string name << "V1_2::utils::Device::create must have non-null device"; } - auto versionString = NN_TRY(initVersionString(device.get())); - const auto deviceType = NN_TRY(initDeviceType(device.get())); - auto extensions = NN_TRY(initExtensions(device.get())); - auto capabilities = NN_TRY(initCapabilities(device.get())); - const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get())); + auto versionString = NN_TRY(getVersionStringFrom(device.get())); + const auto deviceType = NN_TRY(getDeviceTypeFrom(device.get())); + auto extensions = NN_TRY(getSupportedExtensionsFrom(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); + const auto numberOfCacheFilesNeeded = NN_TRY(getNumberOfCacheFilesNeededFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared( @@ -229,28 +225,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](V1_0::ErrorStatus status, - const hidl_vec& supportedOperations) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations_1_2 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations_1_2 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -263,10 +243,10 @@ nn::GeneralResult Device::prepareModel( NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); - const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPreference = NN_TRY(convert(preference)); const auto hidlModelCache = NN_TRY(convert(modelCache)); const auto hidlDataCache = NN_TRY(convert(dataCache)); - const auto hidlToken = token; + const auto hidlToken = CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -274,10 +254,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -287,17 +264,14 @@ nn::GeneralResult Device::prepareModelFromCache( const std::vector& dataCache, const nn::CacheToken& token) const { const auto hidlModelCache = NN_TRY(convert(modelCache)); const auto hidlDataCache = NN_TRY(convert(dataCache)); - const auto hidlToken = token; + const auto hidlToken = CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index dad9a7e74b..6d00082a5f 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -37,55 +37,37 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_2::utils { -namespace { - -nn::GeneralResult, nn::Timing>> -convertExecutionResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { - return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); -} - -nn::ExecutionResult, nn::Timing>> convertExecutionResults( - const hidl_vec& outputShapes, const Timing& timing) { - return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); -} - -} // namespace nn::GeneralResult> PreparedModel::create( - sp preparedModel) { + sp preparedModel, bool executeSynchronously) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); - return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), - std::move(deathHandler)); + return std::make_shared(PrivateConstructorTag{}, executeSynchronously, + std::move(preparedModel), std::move(deathHandler)); } -PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler) - : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + : kExecuteSynchronously(executeSynchronously), + kPreparedModel(std::move(preparedModel)), + kDeathHandler(std::move(deathHandler)) {} nn::ExecutionResult, nn::Timing>> PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const { - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& outputShapes, - const Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); - } else { - result = convertExecutionResults(outputShapes, timing); - } - }; + auto cb = hal::utils::CallbackValue(executionCallback); const auto ret = kPreparedModel->executeSynchronously(request, measure, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::ExecutionResult, nn::Timing>> @@ -95,9 +77,8 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming const auto ret = kPreparedModel->execute_1_2(request, measure, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "execute failed with " << toString(status); + if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); } return cb->get(); @@ -106,51 +87,38 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming nn::ExecutionResult, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); - const auto hidlRequest = - NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared))); + const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const bool preferSynchronous = true; + auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure) + : executeAsynchronously(hidlRequest, hidlMeasure); + auto [outputShapes, timing] = NN_TRY(std::move(result)); - // Execute synchronously if allowed. - if (preferSynchronous) { - result = executeSynchronously(hidlRequest, hidlMeasure); - } + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - // Run asymchronous execution if execution has not already completed. - if (!result.has_value()) { - result = executeAsynchronously(hidlRequest, hidlMeasure); - } - - // Flush output buffers if suxcessful execution. - if (result.has_value()) { - NN_TRY(hal::utils::makeExecutionFailure( - hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - } - - return result; + return std::make_pair(std::move(outputShapes), timing); } nn::GeneralResult> -PreparedModel::executeFenced( - const nn::Request& /*request*/, const std::vector& /*waitFor*/, - nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, - const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { +PreparedModel::executeFenced(const nn::Request& /*request*/, + const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::executeFenced is not supported on 1.2 HAL service"; } std::any PreparedModel::getUnderlyingResource() const { - sp resource = kPreparedModel; + sp resource = kPreparedModel; return resource; } diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h index 637179de33..fda79c88c1 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h @@ -24,8 +24,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Class that adapts V1_3::IBuffer to nn::IBuffer. class Buffer final : public nn::IBuffer { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h index d46b111701..643172e192 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -34,8 +34,31 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, +// this function returns with the supported operations as indicated by a driver. On failure, this +// function returns with the appropriate nn::GeneralError. +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations); + +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_R. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with the output shapes and the timing information. On failure, this function returns with +// the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing); + +// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -52,11 +75,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute_1_3 asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -73,8 +95,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h index 9653a05da7..74a6534aff 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h @@ -44,7 +44,7 @@ GeneralResult unvalidatedConvert( const hal::V1_3::Request::MemoryPool& memoryPool); GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimePoint& optionalTimePoint); -GeneralResult unvalidatedConvert( +GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration); GeneralResult unvalidatedConvert(const hal::V1_3::ErrorStatus& errorStatus); @@ -54,7 +54,7 @@ GeneralResult convert(const hal::V1_3::Model& model); GeneralResult convert(const hal::V1_3::BufferDesc& bufferDesc); GeneralResult convert(const hal::V1_3::Request& request); GeneralResult convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint); -GeneralResult convert( +GeneralResult convert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration); GeneralResult convert(const hal::V1_3::ErrorStatus& errorStatus); @@ -86,7 +86,7 @@ nn::GeneralResult unvalidatedConvert( nn::GeneralResult unvalidatedConvert( const nn::OptionalTimePoint& optionalTimePoint); nn::GeneralResult unvalidatedConvert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration); + const nn::OptionalDuration& optionalTimeoutDuration); nn::GeneralResult unvalidatedConvert(const nn::ErrorStatus& errorStatus); nn::GeneralResult convert(const nn::Priority& priority); @@ -96,13 +96,24 @@ nn::GeneralResult convert(const nn::BufferDesc& bufferDesc); nn::GeneralResult convert(const nn::Request& request); nn::GeneralResult convert(const nn::OptionalTimePoint& optionalTimePoint); nn::GeneralResult convert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration); + const nn::OptionalDuration& optionalTimeoutDuration); nn::GeneralResult convert(const nn::ErrorStatus& errorStatus); nn::GeneralResult convert(const nn::SharedHandle& handle); nn::GeneralResult convert(const nn::Memory& memory); nn::GeneralResult> convert(const std::vector& bufferRoles); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference); +nn::GeneralResult> convert(const std::vector& extensions); +nn::GeneralResult> convert(const std::vector& handles); +nn::GeneralResult> convert( + const std::vector& outputShapes); +nn::GeneralResult convert(const nn::DeviceType& deviceType); +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming); +nn::GeneralResult convert(const nn::Timing& timing); + } // namespace android::hardware::neuralnetworks::V1_3::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h index 0f5234bd26..84f606a357 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h @@ -32,8 +32,12 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Class that adapts V1_3::IDevice to nn::IDevice. class Device final : public nn::IDevice { struct PrivateConstructorTag {}; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h index e0d69dd7c6..664d87a7c2 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -29,28 +29,32 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { +// Class that adapts V1_3::IPreparedModel to nn::IPreparedModel. class PreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; public: static nn::GeneralResult> create( - sp preparedModel); + sp preparedModel, bool executeSynchronously); - PreparedModel(PrivateConstructorTag tag, sp preparedModel, - hal::utils::DeathHandler deathHandler); + PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler); nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; @@ -62,6 +66,7 @@ class PreparedModel final : public nn::IPreparedModel { const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalTimeoutDuration& loopTimeoutDuration) const; + const bool kExecuteSynchronously; const sp kPreparedModel; const hal::utils::DeathHandler kDeathHandler; }; diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp index ffdeccdf62..614033e268 100644 --- a/neuralnetworks/1.3/utils/src/Buffer.cpp +++ b/neuralnetworks/1.3/utils/src/Buffer.cpp @@ -33,17 +33,18 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes. + namespace android::hardware::neuralnetworks::V1_3::utils { nn::GeneralResult> Buffer::create( sp buffer, nn::Request::MemoryDomainToken token) { if (buffer == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::Buffer::create must have non-null buffer"; + return NN_ERROR() << "V1_3::utils::Buffer::create must have non-null buffer"; } if (token == static_cast(0)) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::Buffer::create must have non-zero token"; + return NN_ERROR() << "V1_3::utils::Buffer::create must have non-zero token"; } return std::make_shared(PrivateConstructorTag{}, std::move(buffer), token); @@ -65,10 +66,7 @@ nn::GeneralResult Buffer::copyTo(const nn::Memory& dst) const { const auto ret = kBuffer->copyTo(hidlDst); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status); return {}; } @@ -80,10 +78,7 @@ nn::GeneralResult Buffer::copyFrom(const nn::Memory& src, const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status); return {}; } diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp index e3c6074549..af76e6a87e 100644 --- a/neuralnetworks/1.3/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -39,139 +40,99 @@ #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); -} - nn::GeneralResult, nn::Timing>> convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, const V1_2::Timing& timing) { return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); } -nn::ExecutionResult, nn::Timing>> -convertExecutionGeneralResults(const hidl_vec& outputShapes, - const V1_2::Timing& timing) { +} // namespace + +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations) { + HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + return supportedOperations; +} + +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + auto canonicalOutputShapes = + nn::convert(outputShapes).value_or(std::vector{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << toString(status); + } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return hal::utils::makeExecutionFailure( convertExecutionGeneralResultsHelper(outputShapes, timing)); } -} // namespace - Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_0::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_2::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_3(ErrorStatus status, const sp& preparedModel) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(V1_0::ErrorStatus status) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(V1_0::utils::executionCallback(status)); return Void(); } Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, const V1_2::Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(V1_2::utils::executionCallback(status, outputShapes, timing)); return Void(); } Return ExecutionCallback::notify_1_3(ErrorStatus status, const hidl_vec& outputShapes, const V1_2::Timing& timing) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(executionCallback(status, outputShapes, timing)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index 949dd0d1ed..8b7db2b90e 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -272,47 +272,26 @@ GeneralResult unvalidatedConvert( GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimePoint& optionalTimePoint) { - constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count(); - const auto makeTimePoint = [](uint64_t count) -> GeneralResult { - if (count > kTimePointMaxCount) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimePoint because the count exceeds " - "the max"; - } - const auto nanoseconds = std::chrono::nanoseconds{count}; - return TimePoint{nanoseconds}; - }; - using Discriminator = hal::V1_3::OptionalTimePoint::hidl_discriminator; switch (optionalTimePoint.getDiscriminator()) { case Discriminator::none: - return std::nullopt; + return {}; case Discriminator::nanosecondsSinceEpoch: - return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch()); + return TimePoint{Duration{optionalTimePoint.nanosecondsSinceEpoch()}}; } return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OptionalTimePoint discriminator " << underlyingType(optionalTimePoint.getDiscriminator()); } -GeneralResult unvalidatedConvert( +GeneralResult unvalidatedConvert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) { - constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count(); - const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult { - if (count > kTimeoutDurationMaxCount) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimeoutDuration because the count " - "exceeds the max"; - } - return TimeoutDuration{count}; - }; - using Discriminator = hal::V1_3::OptionalTimeoutDuration::hidl_discriminator; switch (optionalTimeoutDuration.getDiscriminator()) { case Discriminator::none: - return std::nullopt; + return {}; case Discriminator::nanoseconds: - return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds()); + return Duration(optionalTimeoutDuration.nanoseconds()); } return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OptionalTimeoutDuration discriminator " @@ -360,7 +339,7 @@ GeneralResult convert(const hal::V1_3::OptionalTimePoint& opt return validatedConvert(optionalTimePoint); } -GeneralResult convert( +GeneralResult convert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) { return validatedConvert(optionalTimeoutDuration); } @@ -629,27 +608,16 @@ nn::GeneralResult unvalidatedConvert( OptionalTimePoint ret; if (optionalTimePoint.has_value()) { const auto count = optionalTimePoint.value().time_since_epoch().count(); - if (count < 0) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimePoint because time since epoch " - "count is " - "negative"; - } ret.nanosecondsSinceEpoch(count); } return ret; } nn::GeneralResult unvalidatedConvert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration) { + const nn::OptionalDuration& optionalTimeoutDuration) { OptionalTimeoutDuration ret; if (optionalTimeoutDuration.has_value()) { const auto count = optionalTimeoutDuration.value().count(); - if (count < 0) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Unable to unvalidatedConvert OptionalTimeoutDuration because count is " - "negative"; - } ret.nanoseconds(count); } return ret; @@ -697,7 +665,7 @@ nn::GeneralResult convert(const nn::OptionalTimePoint& option } nn::GeneralResult convert( - const nn::OptionalTimeoutDuration& optionalTimeoutDuration) { + const nn::OptionalDuration& optionalTimeoutDuration) { return validatedConvert(optionalTimeoutDuration); } @@ -717,4 +685,38 @@ nn::GeneralResult> convert(const std::vector convert(const nn::DeviceStatus& deviceStatus) { + return V1_2::utils::convert(deviceStatus); +} + +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference) { + return V1_2::utils::convert(executionPreference); +} + +nn::GeneralResult> convert(const std::vector& extensions) { + return V1_2::utils::convert(extensions); +} + +nn::GeneralResult> convert(const std::vector& handles) { + return V1_2::utils::convert(handles); +} + +nn::GeneralResult> convert( + const std::vector& outputShapes) { + return V1_2::utils::convert(outputShapes); +} + +nn::GeneralResult convert(const nn::DeviceType& deviceType) { + return V1_2::utils::convert(deviceType); +} + +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming) { + return V1_2::utils::convert(measureTiming); +} + +nn::GeneralResult convert(const nn::Timing& timing) { + return V1_2::utils::convert(timing); +} + } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index 82837bac73..d710b85070 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -47,6 +48,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { namespace { @@ -66,29 +70,27 @@ nn::GeneralResult>> convert( return hidlPreparedModels; } -nn::GeneralResult convert( - nn::GeneralResult> result) { - return NN_TRY(std::move(result)); +nn::GeneralResult capabilitiesCallback(ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); } -nn::GeneralResult initCapabilities(V1_3::IDevice* device) { +nn::GeneralResult getCapabilitiesFrom(V1_3::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_3 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_3(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); +} + +nn::GeneralResult allocationCallback(ErrorStatus status, + const sp& buffer, uint32_t token) { + HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status); + return Buffer::create(buffer, static_cast(token)); } } // namespace @@ -104,12 +106,12 @@ nn::GeneralResult> Device::create(std::string name << "V1_3::utils::Device::create must have non-null device"; } - auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get())); - const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get())); - auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get())); - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto versionString = NN_TRY(V1_2::utils::getVersionStringFrom(device.get())); + const auto deviceType = NN_TRY(V1_2::utils::getDeviceTypeFrom(device.get())); + auto extensions = NN_TRY(V1_2::utils::getSupportedExtensionsFrom(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); const auto numberOfCacheFilesNeeded = - NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get())); + NN_TRY(V1_2::utils::getNumberOfCacheFilesNeededFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared( @@ -174,27 +176,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "IDevice::getSupportedOperations_1_3 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "IDevice::getSupportedOperations_1_3 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -207,12 +194,12 @@ nn::GeneralResult Device::prepareModel( NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); - const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPreference = NN_TRY(convert(preference)); const auto hidlPriority = NN_TRY(convert(priority)); const auto hidlDeadline = NN_TRY(convert(deadline)); - const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); - const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); - const auto hidlToken = token; + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = V1_2::utils::CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -221,10 +208,7 @@ nn::GeneralResult Device::prepareModel( kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -233,9 +217,9 @@ nn::GeneralResult Device::prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { const auto hidlDeadline = NN_TRY(convert(deadline)); - const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); - const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); - const auto hidlToken = token; + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = V1_2::utils::CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -243,10 +227,7 @@ nn::GeneralResult Device::prepareModelFromCache( const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } @@ -260,27 +241,13 @@ nn::GeneralResult Device::allocate( const auto hidlInputRoles = NN_TRY(convert(inputRoles)); const auto hidlOutputRoles = NN_TRY(convert(outputRoles)); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result](ErrorStatus status, const sp& buffer, uint32_t token) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status); - } else if (buffer == nullptr) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr"; - } else if (token == 0) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)"; - } else { - result = convert( - Buffer::create(buffer, static_cast(token))); - } - }; + auto cb = hal::utils::CallbackValue(allocationCallback); const auto ret = kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index 49b9b0bcc3..7b4b7bac3b 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -39,28 +39,23 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::V1_3::utils { namespace { -nn::GeneralResult, nn::Timing>> -convertExecutionResultsHelper(const hidl_vec& outputShapes, - const V1_2::Timing& timing) { - return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); -} - -nn::ExecutionResult, nn::Timing>> convertExecutionResults( - const hidl_vec& outputShapes, const V1_2::Timing& timing) { - return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); -} - nn::GeneralResult> convertFencedExecutionCallbackResults( - const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status); return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); } -nn::GeneralResult> -convertExecuteFencedResults(const hidl_handle& syncFence, - const sp& callback) { +nn::GeneralResult> fencedExecutionCallback( + ErrorStatus status, const hidl_handle& syncFence, + const sp& callback) { + HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status); + auto resultSyncFence = nn::SyncFence::createAsSignaled(); if (syncFence.getNativeHandle() != nullptr) { auto sharedHandle = NN_TRY(nn::convert(syncFence)); @@ -75,23 +70,12 @@ convertExecuteFencedResults(const hidl_handle& syncFence, // Create callback which can be used to retrieve the execution error status and timings. nn::ExecuteFencedInfoCallback resultCallback = [callback]() -> nn::GeneralResult> { - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched, - const V1_2::Timing& timingFenced) { - if (status != ErrorStatus::NONE) { - const auto canonical = - nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status); - } else { - result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced); - } - }; + auto cb = hal::utils::CallbackValue(convertFencedExecutionCallbackResults); const auto ret = callback->getExecutionInfo(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); }; return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); @@ -100,42 +84,34 @@ convertExecuteFencedResults(const hidl_handle& syncFence, } // namespace nn::GeneralResult> PreparedModel::create( - sp preparedModel) { + sp preparedModel, bool executeSynchronously) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); - return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), - std::move(deathHandler)); + return std::make_shared(PrivateConstructorTag{}, executeSynchronously, + std::move(preparedModel), std::move(deathHandler)); } -PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler) - : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + : kExecuteSynchronously(executeSynchronously), + kPreparedModel(std::move(preparedModel)), + kDeathHandler(std::move(deathHandler)) {} nn::ExecutionResult, nn::Timing>> PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalTimeoutDuration& loopTimeoutDuration) const { - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const hidl_vec& outputShapes, - const V1_2::Timing& timing) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); - } else { - result = convertExecutionResults(outputShapes, timing); - } - }; + auto cb = hal::utils::CallbackValue(executionCallback); const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline, loopTimeoutDuration, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::ExecutionResult, nn::Timing>> @@ -148,9 +124,8 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming const auto ret = kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status); + if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); } return cb->get(); @@ -159,49 +134,36 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming nn::ExecutionResult, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); - const auto hidlMeasure = - NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure))); + const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline))); const auto hidlLoopTimeoutDuration = NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration))); - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const bool preferSynchronous = true; + auto result = kExecuteSynchronously + ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration) + : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration); + auto [outputShapes, timing] = NN_TRY(std::move(result)); - // Execute synchronously if allowed. - if (preferSynchronous) { - result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, - hidlLoopTimeoutDuration); - } + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - // Run asymchronous execution if execution has not already completed. - if (!result.has_value()) { - result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, - hidlLoopTimeoutDuration); - } - - // Flush output buffers if suxcessful execution. - if (result.has_value()) { - NN_TRY(hal::utils::makeExecutionFailure( - hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - } - - return result; + return std::make_pair(std::move(outputShapes), timing); } nn::GeneralResult> PreparedModel::executeFenced(const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const { + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const { // Ensure that request is ready for IPC. std::optional maybeRequestInShared; const nn::Request& requestInShared = @@ -209,28 +171,18 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence, - const sp& callback) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status); - } else { - result = convertExecuteFencedResults(syncFence, callback); - } - }; + auto cb = hal::utils::CallbackValue(fencedExecutionCallback); const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration, hidlTimeoutDurationAfterFence, cb); HANDLE_TRANSPORT_FAILURE(ret); - auto [syncFence, callback] = NN_TRY(std::move(result)); + auto [syncFence, callback] = NN_TRY(cb.take()); // If executeFenced required the request memory to be moved into shared memory, block here until // the fenced execution has completed and flush the memory back. diff --git a/neuralnetworks/utils/README.md b/neuralnetworks/utils/README.md index 0dee103811..45ca0b442f 100644 --- a/neuralnetworks/utils/README.md +++ b/neuralnetworks/utils/README.md @@ -1,11 +1,11 @@ # NNAPI Conversions `convert` fails if either the source type or the destination type is invalid, and it yields a valid -object if the conversion succeeds. For example, let's say that an enumeration in the current -version has fewer possible values than the "same" canonical enumeration, such as `OperationType`. -The new value of `HARD_SWISH` (introduced in Android R / NN HAL 1.3) does not map to any valid -existing value in `OperationType`, but an older value of `ADD` (introduced in Android OC-MR1 / NN -HAL 1.0) is valid. This can be seen in the following model conversions: +object if the conversion succeeds. For example, let's say that an enumeration in the current version +has fewer possible values than the "same" canonical enumeration, such as `OperationType`. The new +value of `HARD_SWISH` (introduced in Android R / NN HAL 1.3) does not map to any valid existing +value in `OperationType`, but an older value of `ADD` (introduced in Android OC-MR1 / NN HAL 1.0) is +valid. This can be seen in the following model conversions: ```cpp // Unsuccessful conversion @@ -48,3 +48,50 @@ The `convert` functions operate only on types that used in a HIDL method call di `unvalidatedConvert` functions operate on types that are either used in a HIDL method call directly (i.e., not as a nested class) or used in a subsequent version of the NN HAL. Prefer using `convert` over `unvalidatedConvert`. + +# HIDL Interface Lifetimes across Processes + +Some notes about HIDL interface objects and lifetimes across processes: + +All HIDL interface objects inherit from `IBase`, which itself inherits from `::android::RefBase`. As +such, all HIDL interface objects are reference counted and must be owned through `::android::sp` (or +referenced through `::android::wp`). Allocating `RefBase` objects on the stack will log errors and +may result in crashes, and deleting a `RefBase` object through another means (e.g., "delete", +"free", or RAII-cleanup through `std::unique_ptr` or some equivalent) will result in double-free +and/or use-after-free undefined behavior. + +HIDL/Binder manages the reference count of HIDL interface objects automatically across processes. If +a process that references (but did not create) the HIDL interface object dies, HIDL/Binder ensures +any reference count it held is properly released. (Caveat: it might be possible that HIDL/Binder +behave strangely with `::android::wp` references.) + +If the process which created the HIDL interface object dies, any call on this object from another +process will result in a HIDL transport error with the code `DEAD_OBJECT`. + +# Protecting Asynchronous Calls across HIDL + +Some notes about asynchronous calls across HIDL: + +For synchronous calls across HIDL, if an error occurs after the function was called but before it +returns, HIDL will return a transport error. For example, if the message cannot be delivered to the +server process or if the server process dies before returning a result, HIDL will return from the +function with the appropriate transport error in the `Return<>` object, which can be queried with +`Return<>::isOk()`, `Return<>::isDeadObject()`, `Return<>::description()`, etc. + +However, HIDL offers no such error management in the case of asynchronous calls. By default, if the +client launches an asynchronous task and the server fails to return a result through the callback, +the client will be left waiting indefinitely for a result it will never receive. + +In the NNAPI, `IDevice::prepareModel*` and `IPreparedModel::execute*` (but not +`IPreparedModel::executeSynchronously*`) are asynchronous calls across HIDL. Specifically, these +asynchronous functions are called with a HIDL interface callback object (`IPrepareModelCallback` for +`IDevice::prepareModel*` and `IExecutionCallback` for `IPreparedModel::execute*`) and are expected +to quickly return, and the results are returned at a later time through these callback objects. + +To protect against the case when the server dies after the asynchronous task was called successfully +but before the results could be returned, HIDL provides an object called a "`hidl_death_recipient`," +which can be used to detect when an interface object (and more generally, the server process) has +died. nnapi/hal/ProtectCallback.h's `DeathHandler` uses `hidl_death_recipient`s to detect when the +driver process has died, and `DeathHandler` will unblock any thread waiting on the results of an +`IProtectedCallback` callback object that may otherwise not be signaled. In order for this to work, +the `IProtectedCallback` object must have been registered via `DeathHandler::protectCallback()`. diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h index 43bb0c677a..b3989e5878 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h @@ -44,9 +44,18 @@ nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWit bool hasNoPointerData(const nn::Model& model); bool hasNoPointerData(const nn::Request& request); -// Relocate pointer-based data to shared memory. +// Relocate pointer-based data to shared memory. If `model` has no Operand::LifeTime::POINTER data, +// the function returns with a reference to `model`. If `model` has Operand::LifeTime::POINTER data, +// the model is copied to `maybeModelInSharedOut` with the POINTER data relocated to a memory pool, +// and the function returns with a reference to `*maybeModelInSharedOut`. nn::GeneralResult> flushDataFromPointerToShared( const nn::Model* model, std::optional* maybeModelInSharedOut); + +// Relocate pointer-based data to shared memory. If `request` has no +// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If +// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to +// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function +// returns with a reference to `*maybeRequestInSharedOut`. nn::GeneralResult> flushDataFromPointerToShared( const nn::Request* request, std::optional* maybeRequestInSharedOut); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h index 78b2a12918..95a20a8f80 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h @@ -79,4 +79,11 @@ nn::ExecutionResult makeExecutionFailure(nn::Result result, nn::Erro return makeExecutionFailure(makeGeneralFailure(result, status)); } +#define HANDLE_HAL_STATUS(status) \ + if (const auto canonical = ::android::nn::convert(status).value_or( \ + ::android::nn::ErrorStatus::GENERAL_FAILURE); \ + canonical == ::android::nn::ErrorStatus::NONE) { \ + } else \ + return NN_ERROR(canonical) + } // namespace android::hardware::neuralnetworks::utils \ No newline at end of file diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h index 4b32b4e3af..985cddb2c2 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h @@ -32,13 +32,13 @@ class InvalidPreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; }; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h index 85bd6137ee..c9218857ac 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h @@ -28,6 +28,9 @@ #include #include +// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface +// lifetimes across processes and for protecting asynchronous calls across HIDL. + namespace android::hardware::neuralnetworks::utils { class IProtectedCallback { diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h index 996ec1ee81..9d5e3e6a05 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h @@ -34,7 +34,7 @@ class ResilientBuffer final : public nn::IBuffer { struct PrivateConstructorTag {}; public: - using Factory = std::function(bool blocking)>; + using Factory = std::function()>; static nn::GeneralResult> create(Factory makeBuffer); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h index 4bfed6cd51..84ae799aad 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h @@ -46,8 +46,8 @@ class ResilientDevice final : public nn::IDevice, nn::Capabilities capabilities, nn::SharedDevice device); nn::SharedDevice getDevice() const EXCLUDES(mMutex); - nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const - EXCLUDES(mMutex); + nn::GeneralResult recover(const nn::IDevice* failingDevice, + bool blocking) const EXCLUDES(mMutex); const std::string& getName() const override; const std::string& getVersionString() const override; @@ -81,17 +81,14 @@ class ResilientDevice final : public nn::IDevice, private: bool isValidInternal() const EXCLUDES(mMutex); nn::GeneralResult prepareModelInternal( - bool blocking, const nn::Model& model, nn::ExecutionPreference preference, - nn::Priority priority, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const; nn::GeneralResult prepareModelFromCacheInternal( - bool blocking, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const; nn::GeneralResult allocateInternal( - bool blocking, const nn::BufferDesc& desc, - const std::vector& preparedModels, + const nn::BufferDesc& desc, const std::vector& preparedModels, const std::vector& inputRoles, const std::vector& outputRoles) const; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h index c2940d16bc..faae673ba7 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h @@ -34,7 +34,7 @@ class ResilientPreparedModel final : public nn::IPreparedModel { struct PrivateConstructorTag {}; public: - using Factory = std::function(bool blocking)>; + using Factory = std::function()>; static nn::GeneralResult> create( Factory makePreparedModel); @@ -49,13 +49,13 @@ class ResilientPreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration) const override; nn::GeneralResult> executeFenced( const nn::Request& request, const std::vector& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const override; std::any getUnderlyingResource() const override; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h index 7103c6b375..6679afefec 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h @@ -17,19 +17,60 @@ #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H +#include #include #include +#include #include #include +#include namespace android::hardware::neuralnetworks::utils { -// This class is thread safe. +// This class adapts a function pointer and offers two affordances: +// 1) This class object can be used to generate a callback (via the implicit conversion operator) +// that can be used to send the result to `CallbackValue` when called. +// 2) This class object can be used to retrieve the result of the callback with `take`. +// +// This class is thread compatible. +template +class CallbackValue final { + public: + using FunctionType = std::add_pointer_t; + using CallbackType = std::function; + + explicit CallbackValue(FunctionType fn); + + // Creates a callback that forwards its arguments to `mFunction` and stores the result in + // `mReturnValue`. + /*implicit*/ operator CallbackType(); // NOLINT(google-explicit-constructor) + + // Take the result of calling `mFunction`. + // Precondition: mReturnValue.has_value() + // Postcondition: !mReturnValue.has_value() + [[nodiscard]] ReturnType take(); + + private: + std::optional mReturnValue; + FunctionType mFunction; +}; + +// Deduction guidelines for CallbackValue when constructed with a function pointer. +template +CallbackValue(ReturnType (*)(ArgTypes...))->CallbackValue; + +// Thread-safe container to pass a value between threads. template class TransferValue final { public: + // Put the value in `TransferValue`. If `TransferValue` already has a value, this function is a + // no-op. void put(Type object) const; + + // Take the value stored in `TransferValue`. If no value is available, this function will block + // until the value becomes available. + // Postcondition: !mObject.has_value() [[nodiscard]] Type take() const; private: @@ -38,7 +79,23 @@ class TransferValue final { mutable std::optional mObject GUARDED_BY(mMutex); }; -// template implementation +// template implementations + +template +CallbackValue::CallbackValue(FunctionType fn) : mFunction(fn) {} + +template +CallbackValue::operator CallbackType() { + return [this](ArgTypes... args) { mReturnValue = mFunction(args...); }; +} + +template +ReturnType CallbackValue::take() { + CHECK(mReturnValue.has_value()); + std::optional object; + std::swap(object, mReturnValue); + return std::move(object).value(); +} template void TransferValue::put(Type object) const { @@ -56,6 +113,7 @@ Type TransferValue::take() const { std::unique_lock lock(mMutex); base::ScopedLockAssertion lockAssertion(mMutex); mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); }); + CHECK(mObject.has_value()); std::optional object; std::swap(object, mObject); return std::move(object).value(); diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp index 9ae7a63949..a46f4ac574 100644 --- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp @@ -29,7 +29,7 @@ namespace android::hardware::neuralnetworks::utils { nn::ExecutionResult, nn::Timing>> InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/) const { return NN_ERROR() << "InvalidPreparedModel"; } @@ -37,8 +37,8 @@ nn::GeneralResult> InvalidPreparedModel::executeFenced( const nn::Request& /*request*/, const std::vector& /*waitFor*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, - const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { return NN_ERROR() << "InvalidPreparedModel"; } diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp index 984295b729..cf5496ac39 100644 --- a/neuralnetworks/utils/common/src/ResilientBuffer.cpp +++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp @@ -36,7 +36,7 @@ nn::GeneralResult> ResilientBuffer::creat return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "utils::ResilientBuffer::create must have non-empty makeBuffer"; } - auto buffer = NN_TRY(makeBuffer(/*blocking=*/true)); + auto buffer = NN_TRY(makeBuffer()); CHECK(buffer != nullptr); return std::make_shared(PrivateConstructorTag{}, std::move(makeBuffer), std::move(buffer)); diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp index 2f83c5c5bd..6ad3fadee6 100644 --- a/neuralnetworks/utils/common/src/ResilientDevice.cpp +++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp @@ -49,7 +49,17 @@ auto protect(const ResilientDevice& resilientDevice, const FnType& fn, bool bloc return result; } - device = resilientDevice.recover(device.get(), blocking); + // Attempt recovery and return if it fails. + auto maybeDevice = resilientDevice.recover(device.get(), blocking); + if (!maybeDevice.has_value()) { + const auto& [resultErrorMessage, resultErrorCode] = result.error(); + const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeDevice.error(); + return nn::error(resultErrorCode) + << resultErrorMessage << ", and failed to recover dead device with error " + << recoveryErrorCode << ": " << recoveryErrorMessage; + } + device = std::move(maybeDevice).value(); + return fn(*device); } @@ -94,7 +104,8 @@ nn::SharedDevice ResilientDevice::getDevice() const { return mDevice; } -nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool blocking) const { +nn::GeneralResult ResilientDevice::recover(const nn::IDevice* failingDevice, + bool blocking) const { std::lock_guard guard(mMutex); // Another caller updated the failing device. @@ -102,13 +113,7 @@ nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool return mDevice; } - auto maybeDevice = kMakeDevice(blocking); - if (!maybeDevice.has_value()) { - const auto& [message, code] = maybeDevice.error(); - LOG(ERROR) << "Failed to recover dead device with error " << code << ": " << message; - return mDevice; - } - auto device = std::move(maybeDevice).value(); + auto device = NN_TRY(kMakeDevice(blocking)); // If recovered device has different metadata than what is cached (i.e., because it was // updated), mark the device as invalid and preserve the cached data. @@ -176,11 +181,11 @@ nn::GeneralResult ResilientDevice::prepareModel( nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { auto self = shared_from_this(); - ResilientPreparedModel::Factory makePreparedModel = - [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache, - token](bool blocking) -> nn::GeneralResult { - return device->prepareModelInternal(blocking, model, preference, priority, deadline, - modelCache, dataCache, token); + ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model, + preference, priority, deadline, modelCache, + dataCache, token] { + return device->prepareModelInternal(model, preference, priority, deadline, modelCache, + dataCache, token); }; return ResilientPreparedModel::create(std::move(makePreparedModel)); } @@ -189,11 +194,9 @@ nn::GeneralResult ResilientDevice::prepareModelFromCach nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { auto self = shared_from_this(); - ResilientPreparedModel::Factory makePreparedModel = - [device = std::move(self), deadline, modelCache, dataCache, - token](bool blocking) -> nn::GeneralResult { - return device->prepareModelFromCacheInternal(blocking, deadline, modelCache, dataCache, - token); + ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), deadline, + modelCache, dataCache, token] { + return device->prepareModelFromCacheInternal(deadline, modelCache, dataCache, token); }; return ResilientPreparedModel::create(std::move(makePreparedModel)); } @@ -203,10 +206,9 @@ nn::GeneralResult ResilientDevice::allocate( const std::vector& inputRoles, const std::vector& outputRoles) const { auto self = shared_from_this(); - ResilientBuffer::Factory makeBuffer = - [device = std::move(self), desc, preparedModels, inputRoles, - outputRoles](bool blocking) -> nn::GeneralResult { - return device->allocateInternal(blocking, desc, preparedModels, inputRoles, outputRoles); + ResilientBuffer::Factory makeBuffer = [device = std::move(self), desc, preparedModels, + inputRoles, outputRoles] { + return device->allocateInternal(desc, preparedModels, inputRoles, outputRoles); }; return ResilientBuffer::create(std::move(makeBuffer)); } @@ -217,9 +219,8 @@ bool ResilientDevice::isValidInternal() const { } nn::GeneralResult ResilientDevice::prepareModelInternal( - bool blocking, const nn::Model& model, nn::ExecutionPreference preference, - nn::Priority priority, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { if (!isValidInternal()) { return std::make_shared(); @@ -229,12 +230,11 @@ nn::GeneralResult ResilientDevice::prepareModelInternal return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache, token); }; - return protect(*this, fn, blocking); + return protect(*this, fn, /*blocking=*/false); } nn::GeneralResult ResilientDevice::prepareModelFromCacheInternal( - bool blocking, nn::OptionalTimePoint deadline, - const std::vector& modelCache, + nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { if (!isValidInternal()) { return std::make_shared(); @@ -242,12 +242,11 @@ nn::GeneralResult ResilientDevice::prepareModelFromCach const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) { return device.prepareModelFromCache(deadline, modelCache, dataCache, token); }; - return protect(*this, fn, blocking); + return protect(*this, fn, /*blocking=*/false); } nn::GeneralResult ResilientDevice::allocateInternal( - bool blocking, const nn::BufferDesc& desc, - const std::vector& preparedModels, + const nn::BufferDesc& desc, const std::vector& preparedModels, const std::vector& inputRoles, const std::vector& outputRoles) const { if (!isValidInternal()) { @@ -256,7 +255,7 @@ nn::GeneralResult ResilientDevice::allocateInternal( const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) { return device.allocate(desc, preparedModels, inputRoles, outputRoles); }; - return protect(*this, fn, blocking); + return protect(*this, fn, /*blocking=*/false); } } // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp index 1c9ecba4f6..b8acee16c9 100644 --- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp @@ -36,7 +36,7 @@ nn::GeneralResult> ResilientPrepar return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "utils::ResilientPreparedModel::create must have non-empty makePreparedModel"; } - auto preparedModel = NN_TRY(makePreparedModel(/*blocking=*/true)); + auto preparedModel = NN_TRY(makePreparedModel()); CHECK(preparedModel != nullptr); return std::make_shared( PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel)); @@ -64,16 +64,17 @@ nn::SharedPreparedModel ResilientPreparedModel::recover( nn::ExecutionResult, nn::Timing>> ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration) const { return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration); } nn::GeneralResult> -ResilientPreparedModel::executeFenced( - const nn::Request& request, const std::vector& waitFor, - nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalTimeoutDuration& loopTimeoutDuration, - const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const { +ResilientPreparedModel::executeFenced(const nn::Request& request, + const std::vector& waitFor, + nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence) const { return getPreparedModel()->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration, timeoutDurationAfterFence); }