diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h index 2e00fcecf3..3b32e1dbf9 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -32,6 +32,26 @@ namespace android::hardware::neuralnetworks::V1_0::utils { +// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, +// this function returns with the supported operations as indicated by a driver. On failure, this +// function returns with the appropriate nn::GeneralError. +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations); + +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate +// nn::GeneralError. +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with an empty output shape vector and no timing information. On failure, this function +// returns with the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status); + +// A HIDL callback class to receive the results of IDevice::prepareModel asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -44,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -60,8 +79,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp index a0bdb3cd99..ea3ea56de6 100644 --- a/neuralnetworks/1.0/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -36,63 +37,52 @@ // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_0::utils { -namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations) { + HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + return supportedOperations; } -} // namespace +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + return {}; +} Return PreparedModelCallback::notify(ErrorStatus status, const sp& preparedModel) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(ErrorStatus status) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(executionCallback(status)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 83e0015689..93bd81a19c 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -44,24 +45,21 @@ namespace android::hardware::neuralnetworks::V1_0::utils { namespace { -nn::GeneralResult initCapabilities(V1_0::IDevice* device) { +nn::GeneralResult capabilitiesCallback(ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult getCapabilitiesFrom(V1_0::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace @@ -77,7 +75,7 @@ nn::GeneralResult> Device::create(std::string name << "V1_0::utils::Device::create must have non-null device"; } - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared(PrivateConstructorTag{}, std::move(name), @@ -134,27 +132,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -173,10 +156,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel(hidlModel, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index c1dd1d9e70..c0c22fbd6a 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -42,8 +42,7 @@ namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult> PreparedModel::create( sp preparedModel) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); @@ -71,10 +70,7 @@ nn::ExecutionResult, nn::Timing>> Prepare const auto ret = kPreparedModel->execute(hidlRequest, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "execute failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); auto result = NN_TRY(cb->get()); NN_TRY(hal::utils::makeExecutionFailure( diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h index f64646257f..5d0769f14c 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h @@ -51,6 +51,10 @@ nn::GeneralResult convert(const nn::Capabilities& capabilities); nn::GeneralResult convert(const nn::Model& model); nn::GeneralResult convert(const nn::ExecutionPreference& executionPreference); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); + } // namespace android::hardware::neuralnetworks::V1_1::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp index 359f68ad4d..b47f25a68c 100644 --- a/neuralnetworks/1.1/utils/src/Conversions.cpp +++ b/neuralnetworks/1.1/utils/src/Conversions.cpp @@ -275,4 +275,16 @@ nn::GeneralResult convert(const nn::ExecutionPreference& ex return validatedConvert(executionPreference); } +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus) { + return V1_0::utils::convert(deviceStatus); +} + +nn::GeneralResult convert(const nn::Request& request) { + return V1_0::utils::convert(request); +} + +nn::GeneralResult convert(const nn::ErrorStatus& status) { + return V1_0::utils::convert(status); +} + } // namespace android::hardware::neuralnetworks::V1_1::utils diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index b57c7f4c54..3197ef4ac3 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -45,24 +45,21 @@ namespace android::hardware::neuralnetworks::V1_1::utils { namespace { -nn::GeneralResult initCapabilities(V1_1::IDevice* device) { +nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult getCapabilitiesFrom(V1_1::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_1(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace @@ -78,7 +75,7 @@ nn::GeneralResult> Device::create(std::string name << "V1_1::utils::Device::create must have non-null device"; } - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared(PrivateConstructorTag{}, std::move(name), @@ -135,28 +132,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](V1_0::ErrorStatus status, - const hidl_vec& supportedOperations) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations_1_1 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations_1_1 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -176,10 +157,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h index 1162bc33b3..ba3c1ba1db 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -36,6 +36,19 @@ namespace android::hardware::neuralnetworks::V1_2::utils { +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_Q. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult prepareModelCallback( + V1_0::ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with the output shapes and the timing information. On failure, this function returns with +// the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing); + +// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -51,11 +64,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute_1_2 asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -69,8 +81,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h index 5dcbc0bb79..6fd13379ef 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h @@ -97,6 +97,12 @@ nn::GeneralResult> convert(const std::vector& nn::GeneralResult> convert(const std::vector& handles); nn::GeneralResult> convert(const std::vector& outputShapes); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference); + } // namespace android::hardware::neuralnetworks::V1_2::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_CONVERSIONS_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h index 79c3b041ad..b4bef5ee0a 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -37,10 +37,21 @@ namespace android::hardware::neuralnetworks::V1_2::utils { -nn::GeneralResult initVersionString(V1_2::IDevice* device); -nn::GeneralResult initDeviceType(V1_2::IDevice* device); -nn::GeneralResult> initExtensions(V1_2::IDevice* device); -nn::GeneralResult> initNumberOfCacheFilesNeeded( +// Retrieves the version string from the provided device object. On failure, this function returns +// with the appropriate nn::GeneralError. +nn::GeneralResult getVersionStringFrom(V1_2::IDevice* device); + +// Retrieves the device type from the provided device object. On failure, this function returns with +// the appropriate nn::GeneralError. +nn::GeneralResult getDeviceTypeFrom(V1_2::IDevice* device); + +// Retrieves the extensions supported by the provided device object. On failure, this function +// returns with the appropriate nn::GeneralError. +nn::GeneralResult> getSupportedExtensionsFrom(V1_2::IDevice* device); + +// Retrieves the number of model cache files and data cache files needed by the provided device +// object. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult> getNumberOfCacheFilesNeededFrom( V1_2::IDevice* device); // Class that adapts V1_2::IDevice to nn::IDevice. diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 8ed5ca7f97..6a56a82f99 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -41,10 +41,10 @@ class PreparedModel final : public nn::IPreparedModel { public: static nn::GeneralResult> create( - sp preparedModel); + sp preparedModel, bool executeSynchronously); - PreparedModel(PrivateConstructorTag tag, sp preparedModel, - hal::utils::DeathHandler deathHandler); + PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler); nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, @@ -65,6 +65,7 @@ class PreparedModel final : public nn::IPreparedModel { nn::ExecutionResult, nn::Timing>> executeAsynchronously( const V1_0::Request& request, MeasureTiming measure) const; + const bool kExecuteSynchronously; const sp kPreparedModel; const hal::utils::DeathHandler kDeathHandler; }; diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h index 70149a2d3a..c289fc89ab 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h @@ -30,6 +30,8 @@ namespace android::hardware::neuralnetworks::V1_2::utils { +using CacheToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; + constexpr auto kDefaultMesaureTiming = MeasureTiming::NO; constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits::max(), .timeInDriver = std::numeric_limits::max()}; diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp index ab3e0ca879..fefa122101 100644 --- a/neuralnetworks/1.2/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -42,104 +43,73 @@ namespace android::hardware::neuralnetworks::V1_2::utils { namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); -} - nn::GeneralResult, nn::Timing>> convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); } -nn::ExecutionResult, nn::Timing>> -convertExecutionGeneralResults(const hidl_vec& outputShapes, const Timing& timing) { +} // namespace + +nn::GeneralResult prepareModelCallback( + V1_0::ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { + if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + auto canonicalOutputShapes = + nn::convert(outputShapes).value_or(std::vector{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << toString(status); + } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return hal::utils::makeExecutionFailure( convertExecutionGeneralResultsHelper(outputShapes, timing)); } -} // namespace - Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_0::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(V1_0::ErrorStatus status) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(V1_0::utils::executionCallback(status)); return Void(); } Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(executionCallback(status, outputShapes, timing)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index 3790d1f61e..062f6f712f 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -622,4 +623,21 @@ nn::GeneralResult> convert(const std::vector convert(const nn::DeviceStatus& deviceStatus) { + return V1_1::utils::convert(deviceStatus); +} + +nn::GeneralResult convert(const nn::Request& request) { + return V1_1::utils::convert(request); +} + +nn::GeneralResult convert(const nn::ErrorStatus& status) { + return V1_1::utils::convert(status); +} + +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference) { + return V1_1::utils::convert(executionPreference); +} + } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index 6cca841aba..9fe0de25b3 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -47,109 +47,102 @@ namespace android::hardware::neuralnetworks::V1_2::utils { namespace { -nn::GeneralResult initCapabilities(V1_2::IDevice* device) { +nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); +} + +nn::GeneralResult versionStringCallback(V1_0::ErrorStatus status, + const hidl_string& versionString) { + HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status); + return versionString; +} + +nn::GeneralResult deviceTypeCallback(V1_0::ErrorStatus status, + DeviceType deviceType) { + HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status); + return nn::convert(deviceType); +} + +nn::GeneralResult> supportedExtensionsCallback( + V1_0::ErrorStatus status, const hidl_vec& extensions) { + HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status); + return nn::convert(extensions); +} + +nn::GeneralResult> numberOfCacheFilesNeededCallback( + V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status); + if (numModelCache > nn::kMaxNumberOfCacheFiles) { + return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater " + "than allowed max (" + << numModelCache << " vs " << nn::kMaxNumberOfCacheFiles << ")"; + } + if (numDataCache > nn::kMaxNumberOfCacheFiles) { + return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numDataCache files greater " + "than allowed max (" + << numDataCache << " vs " << nn::kMaxNumberOfCacheFiles << ")"; + } + return std::make_pair(numModelCache, numDataCache); +} + +nn::GeneralResult getCapabilitiesFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_2(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace -nn::GeneralResult initVersionString(V1_2::IDevice* device) { +nn::GeneralResult getVersionStringFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status); - } else { - result = versionString; - } - }; + auto cb = hal::utils::CallbackValue(versionStringCallback); const auto ret = device->getVersionString(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult initDeviceType(V1_2::IDevice* device) { +nn::GeneralResult getDeviceTypeFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status); - } else { - result = nn::convert(deviceType); - } - }; + auto cb = hal::utils::CallbackValue(deviceTypeCallback); const auto ret = device->getType(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult> initExtensions(V1_2::IDevice* device) { +nn::GeneralResult> getSupportedExtensionsFrom(V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& extensions) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status); - } else { - result = nn::convert(extensions); - } - }; + auto cb = hal::utils::CallbackValue(supportedExtensionsCallback); const auto ret = device->getSupportedExtensions(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } -nn::GeneralResult> initNumberOfCacheFilesNeeded( +nn::GeneralResult> getNumberOfCacheFilesNeededFrom( V1_2::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache, - uint32_t numDataCache) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getNumberOfCacheFilesNeeded failed with " << toString(status); - } else { - result = std::make_pair(numModelCache, numDataCache); - } - }; + auto cb = hal::utils::CallbackValue(numberOfCacheFilesNeededCallback); const auto ret = device->getNumberOfCacheFilesNeeded(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult> Device::create(std::string name, @@ -163,11 +156,11 @@ nn::GeneralResult> Device::create(std::string name << "V1_2::utils::Device::create must have non-null device"; } - auto versionString = NN_TRY(initVersionString(device.get())); - const auto deviceType = NN_TRY(initDeviceType(device.get())); - auto extensions = NN_TRY(initExtensions(device.get())); - auto capabilities = NN_TRY(initCapabilities(device.get())); - const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get())); + auto versionString = NN_TRY(getVersionStringFrom(device.get())); + const auto deviceType = NN_TRY(getDeviceTypeFrom(device.get())); + auto extensions = NN_TRY(getSupportedExtensionsFrom(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); + const auto numberOfCacheFilesNeeded = NN_TRY(getNumberOfCacheFilesNeededFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared( @@ -232,28 +225,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](V1_0::ErrorStatus status, - const hidl_vec& supportedOperations) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "getSupportedOperations_1_2 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "getSupportedOperations_1_2 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -266,10 +243,10 @@ nn::GeneralResult Device::prepareModel( NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); - const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPreference = NN_TRY(convert(preference)); const auto hidlModelCache = NN_TRY(convert(modelCache)); const auto hidlDataCache = NN_TRY(convert(dataCache)); - const auto hidlToken = token; + const auto hidlToken = CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -277,10 +254,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -290,17 +264,14 @@ nn::GeneralResult Device::prepareModelFromCache( const std::vector& dataCache, const nn::CacheToken& token) const { const auto hidlModelCache = NN_TRY(convert(modelCache)); const auto hidlDataCache = NN_TRY(convert(dataCache)); - const auto hidlToken = token; + const auto hidlToken = CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index b422cedefa..6d00082a5f 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -41,54 +41,33 @@ // lifetimes across processes and for protecting asynchronous calls across HIDL. namespace android::hardware::neuralnetworks::V1_2::utils { -namespace { - -nn::GeneralResult, nn::Timing>> -convertExecutionResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { - return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); -} - -nn::ExecutionResult, nn::Timing>> convertExecutionResults( - const hidl_vec& outputShapes, const Timing& timing) { - return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); -} - -} // namespace nn::GeneralResult> PreparedModel::create( - sp preparedModel) { + sp preparedModel, bool executeSynchronously) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); - return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), - std::move(deathHandler)); + return std::make_shared(PrivateConstructorTag{}, executeSynchronously, + std::move(preparedModel), std::move(deathHandler)); } -PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler) - : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + : kExecuteSynchronously(executeSynchronously), + kPreparedModel(std::move(preparedModel)), + kDeathHandler(std::move(deathHandler)) {} nn::ExecutionResult, nn::Timing>> PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const { - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& outputShapes, - const Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); - } else { - result = convertExecutionResults(outputShapes, timing); - } - }; + auto cb = hal::utils::CallbackValue(executionCallback); const auto ret = kPreparedModel->executeSynchronously(request, measure, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::ExecutionResult, nn::Timing>> @@ -98,9 +77,8 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming const auto ret = kPreparedModel->execute_1_2(request, measure, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "execute failed with " << toString(status); + if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); } return cb->get(); @@ -115,31 +93,17 @@ nn::ExecutionResult, nn::Timing>> Prepare const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); - const auto hidlRequest = - NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared))); + const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const bool preferSynchronous = true; + auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure) + : executeAsynchronously(hidlRequest, hidlMeasure); + auto [outputShapes, timing] = NN_TRY(std::move(result)); - // Execute synchronously if allowed. - if (preferSynchronous) { - result = executeSynchronously(hidlRequest, hidlMeasure); - } + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - // Run asymchronous execution if execution has not already completed. - if (!result.has_value()) { - result = executeAsynchronously(hidlRequest, hidlMeasure); - } - - // Flush output buffers if suxcessful execution. - if (result.has_value()) { - NN_TRY(hal::utils::makeExecutionFailure( - hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - } - - return result; + return std::make_pair(std::move(outputShapes), timing); } nn::GeneralResult> @@ -154,7 +118,7 @@ PreparedModel::executeFenced(const nn::Request& /*request*/, } std::any PreparedModel::getUnderlyingResource() const { - sp resource = kPreparedModel; + sp resource = kPreparedModel; return resource; } diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h index cb2a56a2e2..643172e192 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -39,6 +39,26 @@ namespace android::hardware::neuralnetworks::V1_3::utils { +// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success, +// this function returns with the supported operations as indicated by a driver. On failure, this +// function returns with the appropriate nn::GeneralError. +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations); + +// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this +// function returns with a non-null nn::SharedPreparedModel with a feature level of +// nn::Version::ANDROID_R. On failure, this function returns with the appropriate nn::GeneralError. +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel); + +// Converts the results of IDevice::execute* to the NN canonical format. On success, this function +// returns with the output shapes and the timing information. On failure, this function returns with +// the appropriate nn::ExecutionError. +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing); + +// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously. class PreparedModelCallback final : public IPreparedModelCallback, public hal::utils::IProtectedCallback { public: @@ -55,11 +75,10 @@ class PreparedModelCallback final : public IPreparedModelCallback, Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; +// A HIDL callback class to receive the results of IDevice::execute_1_3 asynchronously. class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { public: using Data = nn::ExecutionResult, nn::Timing>>; @@ -76,8 +95,6 @@ class ExecutionCallback final : public IExecutionCallback, public hal::utils::IP Data get(); private: - void notifyInternal(Data result); - hal::utils::TransferValue mData; }; diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h index 477bb7b6e0..74a6534aff 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h @@ -103,6 +103,17 @@ nn::GeneralResult convert(const nn::SharedHandle& handle); nn::GeneralResult convert(const nn::Memory& memory); nn::GeneralResult> convert(const std::vector& bufferRoles); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference); +nn::GeneralResult> convert(const std::vector& extensions); +nn::GeneralResult> convert(const std::vector& handles); +nn::GeneralResult> convert( + const std::vector& outputShapes); +nn::GeneralResult convert(const nn::DeviceType& deviceType); +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming); +nn::GeneralResult convert(const nn::Timing& timing); + } // namespace android::hardware::neuralnetworks::V1_3::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h index c4ba483463..664d87a7c2 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -40,10 +40,10 @@ class PreparedModel final : public nn::IPreparedModel { public: static nn::GeneralResult> create( - sp preparedModel); + sp preparedModel, bool executeSynchronously); - PreparedModel(PrivateConstructorTag tag, sp preparedModel, - hal::utils::DeathHandler deathHandler); + PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler); nn::ExecutionResult, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, @@ -66,6 +66,7 @@ class PreparedModel final : public nn::IPreparedModel { const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalTimeoutDuration& loopTimeoutDuration) const; + const bool kExecuteSynchronously; const sp kPreparedModel; const hal::utils::DeathHandler kDeathHandler; }; diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp index 4ef54a2c93..614033e268 100644 --- a/neuralnetworks/1.3/utils/src/Buffer.cpp +++ b/neuralnetworks/1.3/utils/src/Buffer.cpp @@ -41,12 +41,10 @@ namespace android::hardware::neuralnetworks::V1_3::utils { nn::GeneralResult> Buffer::create( sp buffer, nn::Request::MemoryDomainToken token) { if (buffer == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::Buffer::create must have non-null buffer"; + return NN_ERROR() << "V1_3::utils::Buffer::create must have non-null buffer"; } if (token == static_cast(0)) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::Buffer::create must have non-zero token"; + return NN_ERROR() << "V1_3::utils::Buffer::create must have non-zero token"; } return std::make_shared(PrivateConstructorTag{}, std::move(buffer), token); @@ -68,10 +66,7 @@ nn::GeneralResult Buffer::copyTo(const nn::Memory& dst) const { const auto ret = kBuffer->copyTo(hidlDst); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status); return {}; } @@ -83,10 +78,7 @@ nn::GeneralResult Buffer::copyFrom(const nn::Memory& src, const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status); return {}; } diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp index 17c20fba68..af76e6a87e 100644 --- a/neuralnetworks/1.3/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -45,136 +46,93 @@ namespace android::hardware::neuralnetworks::V1_3::utils { namespace { -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel)); -} - -nn::GeneralResult convertPreparedModel( - const sp& preparedModel) { - return NN_TRY(utils::PreparedModel::create(preparedModel)); -} - nn::GeneralResult, nn::Timing>> convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, const V1_2::Timing& timing) { return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); } -nn::ExecutionResult, nn::Timing>> -convertExecutionGeneralResults(const hidl_vec& outputShapes, - const V1_2::Timing& timing) { +} // namespace + +nn::GeneralResult> supportedOperationsCallback( + ErrorStatus status, const hidl_vec& supportedOperations) { + HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + return supportedOperations; +} + +nn::GeneralResult prepareModelCallback( + ErrorStatus status, const sp& preparedModel) { + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); +} + +nn::ExecutionResult, nn::Timing>> executionCallback( + ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + auto canonicalOutputShapes = + nn::convert(outputShapes).value_or(std::vector{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << toString(status); + } + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); return hal::utils::makeExecutionFailure( convertExecutionGeneralResultsHelper(outputShapes, timing)); } -} // namespace - Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_0::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, const sp& preparedModel) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(V1_2::utils::prepareModelCallback(status, preparedModel)); return Void(); } Return PreparedModelCallback::notify_1_3(ErrorStatus status, const sp& preparedModel) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); - } else if (preparedModel == nullptr) { - notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Returned preparedModel is nullptr"); - } else { - notifyInternal(convertPreparedModel(preparedModel)); - } + mData.put(prepareModelCallback(status, preparedModel)); return Void(); } void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } PreparedModelCallback::Data PreparedModelCallback::get() { return mData.take(); } -void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { - mData.put(std::move(result)); -} - // ExecutionCallback methods begin here Return ExecutionCallback::notify(V1_0::ErrorStatus status) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal({}); - } + mData.put(V1_0::utils::executionCallback(status)); return Void(); } Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, const V1_2::Timing& timing) { - if (status != V1_0::ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(V1_2::utils::executionCallback(status, outputShapes, timing)); return Void(); } Return ExecutionCallback::notify_1_3(ErrorStatus status, const hidl_vec& outputShapes, const V1_2::Timing& timing) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); - } else { - notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); - } + mData.put(executionCallback(status, outputShapes, timing)); return Void(); } void ExecutionCallback::notifyAsDeadObject() { - notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); + mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); } ExecutionCallback::Data ExecutionCallback::get() { return mData.take(); } -void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { - mData.put(std::move(result)); -} - } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index c89a69f28b..8b7db2b90e 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -685,4 +685,38 @@ nn::GeneralResult> convert(const std::vector convert(const nn::DeviceStatus& deviceStatus) { + return V1_2::utils::convert(deviceStatus); +} + +nn::GeneralResult convert( + const nn::ExecutionPreference& executionPreference) { + return V1_2::utils::convert(executionPreference); +} + +nn::GeneralResult> convert(const std::vector& extensions) { + return V1_2::utils::convert(extensions); +} + +nn::GeneralResult> convert(const std::vector& handles) { + return V1_2::utils::convert(handles); +} + +nn::GeneralResult> convert( + const std::vector& outputShapes) { + return V1_2::utils::convert(outputShapes); +} + +nn::GeneralResult convert(const nn::DeviceType& deviceType) { + return V1_2::utils::convert(deviceType); +} + +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming) { + return V1_2::utils::convert(measureTiming); +} + +nn::GeneralResult convert(const nn::Timing& timing) { + return V1_2::utils::convert(timing); +} + } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index 60564985de..d710b85070 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -69,29 +70,27 @@ nn::GeneralResult>> convert( return hidlPreparedModels; } -nn::GeneralResult convert( - nn::GeneralResult> result) { - return NN_TRY(std::move(result)); +nn::GeneralResult capabilitiesCallback(ErrorStatus status, + const Capabilities& capabilities) { + HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + return nn::convert(capabilities); } -nn::GeneralResult initCapabilities(V1_3::IDevice* device) { +nn::GeneralResult getCapabilitiesFrom(V1_3::IDevice* device) { CHECK(device != nullptr); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getCapabilities_1_3 failed with " << toString(status); - } else { - result = nn::convert(capabilities); - } - }; + auto cb = hal::utils::CallbackValue(capabilitiesCallback); const auto ret = device->getCapabilities_1_3(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); +} + +nn::GeneralResult allocationCallback(ErrorStatus status, + const sp& buffer, uint32_t token) { + HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status); + return Buffer::create(buffer, static_cast(token)); } } // namespace @@ -107,12 +106,12 @@ nn::GeneralResult> Device::create(std::string name << "V1_3::utils::Device::create must have non-null device"; } - auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get())); - const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get())); - auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get())); - auto capabilities = NN_TRY(initCapabilities(device.get())); + auto versionString = NN_TRY(V1_2::utils::getVersionStringFrom(device.get())); + const auto deviceType = NN_TRY(V1_2::utils::getDeviceTypeFrom(device.get())); + auto extensions = NN_TRY(V1_2::utils::getSupportedExtensionsFrom(device.get())); + auto capabilities = NN_TRY(getCapabilitiesFrom(device.get())); const auto numberOfCacheFilesNeeded = - NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get())); + NN_TRY(V1_2::utils::getNumberOfCacheFilesNeededFrom(device.get())); auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); return std::make_shared( @@ -177,27 +176,12 @@ nn::GeneralResult> Device::getSupportedOperations(const nn::Mo const auto hidlModel = NN_TRY(convert(modelInShared)); - nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) - << "IDevice::getSupportedOperations_1_3 failed with " << toString(status); - } else if (supportedOperations.size() != model.main.operations.size()) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "IDevice::getSupportedOperations_1_3 returned vector of size " - << supportedOperations.size() << " but expected " - << model.main.operations.size(); - } else { - result = supportedOperations; - } - }; + auto cb = hal::utils::CallbackValue(supportedOperationsCallback); const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::GeneralResult Device::prepareModel( @@ -210,12 +194,12 @@ nn::GeneralResult Device::prepareModel( NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); const auto hidlModel = NN_TRY(convert(modelInShared)); - const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPreference = NN_TRY(convert(preference)); const auto hidlPriority = NN_TRY(convert(priority)); const auto hidlDeadline = NN_TRY(convert(deadline)); - const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); - const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); - const auto hidlToken = token; + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = V1_2::utils::CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -224,10 +208,7 @@ nn::GeneralResult Device::prepareModel( kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -236,9 +217,9 @@ nn::GeneralResult Device::prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { const auto hidlDeadline = NN_TRY(convert(deadline)); - const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); - const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); - const auto hidlToken = token; + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = V1_2::utils::CacheToken{token}; const auto cb = sp::make(); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -246,10 +227,7 @@ nn::GeneralResult Device::prepareModelFromCache( const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status); - } + HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } @@ -263,27 +241,13 @@ nn::GeneralResult Device::allocate( const auto hidlInputRoles = NN_TRY(convert(inputRoles)); const auto hidlOutputRoles = NN_TRY(convert(outputRoles)); - nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "uninitialized"; - auto cb = [&result](ErrorStatus status, const sp& buffer, uint32_t token) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status); - } else if (buffer == nullptr) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr"; - } else if (token == 0) { - result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)"; - } else { - result = convert( - Buffer::create(buffer, static_cast(token))); - } - }; + auto cb = hal::utils::CallbackValue(allocationCallback); const auto ret = kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index 0bae95de87..7b4b7bac3b 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -45,25 +45,17 @@ namespace android::hardware::neuralnetworks::V1_3::utils { namespace { -nn::GeneralResult, nn::Timing>> -convertExecutionResultsHelper(const hidl_vec& outputShapes, - const V1_2::Timing& timing) { - return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); -} - -nn::ExecutionResult, nn::Timing>> convertExecutionResults( - const hidl_vec& outputShapes, const V1_2::Timing& timing) { - return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); -} - nn::GeneralResult> convertFencedExecutionCallbackResults( - const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status); return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); } -nn::GeneralResult> -convertExecuteFencedResults(const hidl_handle& syncFence, - const sp& callback) { +nn::GeneralResult> fencedExecutionCallback( + ErrorStatus status, const hidl_handle& syncFence, + const sp& callback) { + HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status); + auto resultSyncFence = nn::SyncFence::createAsSignaled(); if (syncFence.getNativeHandle() != nullptr) { auto sharedHandle = NN_TRY(nn::convert(syncFence)); @@ -78,23 +70,12 @@ convertExecuteFencedResults(const hidl_handle& syncFence, // Create callback which can be used to retrieve the execution error status and timings. nn::ExecuteFencedInfoCallback resultCallback = [callback]() -> nn::GeneralResult> { - nn::GeneralResult> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched, - const V1_2::Timing& timingFenced) { - if (status != ErrorStatus::NONE) { - const auto canonical = - nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status); - } else { - result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced); - } - }; + auto cb = hal::utils::CallbackValue(convertFencedExecutionCallbackResults); const auto ret = callback->getExecutionInfo(cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); }; return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); @@ -103,42 +84,34 @@ convertExecuteFencedResults(const hidl_handle& syncFence, } // namespace nn::GeneralResult> PreparedModel::create( - sp preparedModel) { + sp preparedModel, bool executeSynchronously) { if (preparedModel == nullptr) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) - << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; + return NN_ERROR() << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; } auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); - return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), - std::move(deathHandler)); + return std::make_shared(PrivateConstructorTag{}, executeSynchronously, + std::move(preparedModel), std::move(deathHandler)); } -PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, + sp preparedModel, hal::utils::DeathHandler deathHandler) - : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + : kExecuteSynchronously(executeSynchronously), + kPreparedModel(std::move(preparedModel)), + kDeathHandler(std::move(deathHandler)) {} nn::ExecutionResult, nn::Timing>> PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalTimeoutDuration& loopTimeoutDuration) const { - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const auto cb = [&result](ErrorStatus status, const hidl_vec& outputShapes, - const V1_2::Timing& timing) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); - } else { - result = convertExecutionResults(outputShapes, timing); - } - }; + auto cb = hal::utils::CallbackValue(executionCallback); const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline, loopTimeoutDuration, cb); HANDLE_TRANSPORT_FAILURE(ret); - return result; + return cb.take(); } nn::ExecutionResult, nn::Timing>> @@ -151,9 +124,8 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming const auto ret = kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status); + if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); } return cb->get(); @@ -169,35 +141,22 @@ nn::ExecutionResult, nn::Timing>> Prepare hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); - const auto hidlMeasure = - NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure))); + const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline))); const auto hidlLoopTimeoutDuration = NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration))); - nn::ExecutionResult, nn::Timing>> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - const bool preferSynchronous = true; + auto result = kExecuteSynchronously + ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration) + : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration); + auto [outputShapes, timing] = NN_TRY(std::move(result)); - // Execute synchronously if allowed. - if (preferSynchronous) { - result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, - hidlLoopTimeoutDuration); - } + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - // Run asymchronous execution if execution has not already completed. - if (!result.has_value()) { - result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, - hidlLoopTimeoutDuration); - } - - // Flush output buffers if suxcessful execution. - if (result.has_value()) { - NN_TRY(hal::utils::makeExecutionFailure( - hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); - } - - return result; + return std::make_pair(std::move(outputShapes), timing); } nn::GeneralResult> @@ -212,28 +171,18 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector> result = - NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; - auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence, - const sp& callback) { - if (status != ErrorStatus::NONE) { - const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); - result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status); - } else { - result = convertExecuteFencedResults(syncFence, callback); - } - }; + auto cb = hal::utils::CallbackValue(fencedExecutionCallback); const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration, hidlTimeoutDurationAfterFence, cb); HANDLE_TRANSPORT_FAILURE(ret); - auto [syncFence, callback] = NN_TRY(std::move(result)); + auto [syncFence, callback] = NN_TRY(cb.take()); // If executeFenced required the request memory to be moved into shared memory, block here until // the fenced execution has completed and flush the memory back. diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h index 43bb0c677a..b3989e5878 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h @@ -44,9 +44,18 @@ nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWit bool hasNoPointerData(const nn::Model& model); bool hasNoPointerData(const nn::Request& request); -// Relocate pointer-based data to shared memory. +// Relocate pointer-based data to shared memory. If `model` has no Operand::LifeTime::POINTER data, +// the function returns with a reference to `model`. If `model` has Operand::LifeTime::POINTER data, +// the model is copied to `maybeModelInSharedOut` with the POINTER data relocated to a memory pool, +// and the function returns with a reference to `*maybeModelInSharedOut`. nn::GeneralResult> flushDataFromPointerToShared( const nn::Model* model, std::optional* maybeModelInSharedOut); + +// Relocate pointer-based data to shared memory. If `request` has no +// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If +// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to +// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function +// returns with a reference to `*maybeRequestInSharedOut`. nn::GeneralResult> flushDataFromPointerToShared( const nn::Request* request, std::optional* maybeRequestInSharedOut); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h index 78b2a12918..95a20a8f80 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h @@ -79,4 +79,11 @@ nn::ExecutionResult makeExecutionFailure(nn::Result result, nn::Erro return makeExecutionFailure(makeGeneralFailure(result, status)); } +#define HANDLE_HAL_STATUS(status) \ + if (const auto canonical = ::android::nn::convert(status).value_or( \ + ::android::nn::ErrorStatus::GENERAL_FAILURE); \ + canonical == ::android::nn::ErrorStatus::NONE) { \ + } else \ + return NN_ERROR(canonical) + } // namespace android::hardware::neuralnetworks::utils \ No newline at end of file diff --git a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h index 7103c6b375..6679afefec 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h @@ -17,19 +17,60 @@ #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H +#include #include #include +#include #include #include +#include namespace android::hardware::neuralnetworks::utils { -// This class is thread safe. +// This class adapts a function pointer and offers two affordances: +// 1) This class object can be used to generate a callback (via the implicit conversion operator) +// that can be used to send the result to `CallbackValue` when called. +// 2) This class object can be used to retrieve the result of the callback with `take`. +// +// This class is thread compatible. +template +class CallbackValue final { + public: + using FunctionType = std::add_pointer_t; + using CallbackType = std::function; + + explicit CallbackValue(FunctionType fn); + + // Creates a callback that forwards its arguments to `mFunction` and stores the result in + // `mReturnValue`. + /*implicit*/ operator CallbackType(); // NOLINT(google-explicit-constructor) + + // Take the result of calling `mFunction`. + // Precondition: mReturnValue.has_value() + // Postcondition: !mReturnValue.has_value() + [[nodiscard]] ReturnType take(); + + private: + std::optional mReturnValue; + FunctionType mFunction; +}; + +// Deduction guidelines for CallbackValue when constructed with a function pointer. +template +CallbackValue(ReturnType (*)(ArgTypes...))->CallbackValue; + +// Thread-safe container to pass a value between threads. template class TransferValue final { public: + // Put the value in `TransferValue`. If `TransferValue` already has a value, this function is a + // no-op. void put(Type object) const; + + // Take the value stored in `TransferValue`. If no value is available, this function will block + // until the value becomes available. + // Postcondition: !mObject.has_value() [[nodiscard]] Type take() const; private: @@ -38,7 +79,23 @@ class TransferValue final { mutable std::optional mObject GUARDED_BY(mMutex); }; -// template implementation +// template implementations + +template +CallbackValue::CallbackValue(FunctionType fn) : mFunction(fn) {} + +template +CallbackValue::operator CallbackType() { + return [this](ArgTypes... args) { mReturnValue = mFunction(args...); }; +} + +template +ReturnType CallbackValue::take() { + CHECK(mReturnValue.has_value()); + std::optional object; + std::swap(object, mReturnValue); + return std::move(object).value(); +} template void TransferValue::put(Type object) const { @@ -56,6 +113,7 @@ Type TransferValue::take() const { std::unique_lock lock(mMutex); base::ScopedLockAssertion lockAssertion(mMutex); mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); }); + CHECK(mObject.has_value()); std::optional object; std::swap(object, mObject); return std::move(object).value();