diff --git a/neuralnetworks/1.0/utils/Android.bp b/neuralnetworks/1.0/utils/Android.bp index 57a052f1f5..4d61fc0a94 100644 --- a/neuralnetworks/1.0/utils/Android.bp +++ b/neuralnetworks/1.0/utils/Android.bp @@ -20,6 +20,7 @@ cc_library_static { srcs: ["src/*"], local_include_dirs: ["include/nnapi/hal/1.0/"], export_include_dirs: ["include"], + cflags: ["-Wthread-safety"], static_libs: [ "neuralnetworks_types", "neuralnetworks_utils_hal_common", diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h new file mode 100644 index 0000000000..65b75e5d82 --- /dev/null +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { + +class PreparedModelCallback final : public IPreparedModelCallback, + public hal::utils::IProtectedCallback { + public: + using Data = nn::GeneralResult; + + Return notify(ErrorStatus status, const sp& preparedModel) override; + + void notifyAsDeadObject() override; + + Data get(); + + private: + void notifyInternal(Data result); + + hal::utils::TransferValue mData; +}; + +class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { + public: + using Data = nn::ExecutionResult, nn::Timing>>; + + Return notify(ErrorStatus status) override; + + void notifyAsDeadObject() override; + + Data get(); + + private: + void notifyInternal(Data result); + + hal::utils::TransferValue mData; +}; + +} // namespace android::hardware::neuralnetworks::V1_0::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h index 8ad98cbafc..fb77cb2475 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h @@ -24,42 +24,44 @@ namespace android::nn { -Result convert(const hal::V1_0::OperandType& operandType); -Result convert(const hal::V1_0::OperationType& operationType); -Result convert(const hal::V1_0::OperandLifeTime& lifetime); -Result convert(const hal::V1_0::DeviceStatus& deviceStatus); -Result convert(const hal::V1_0::PerformanceInfo& performanceInfo); -Result convert(const hal::V1_0::Capabilities& capabilities); -Result convert(const hal::V1_0::DataLocation& location); -Result convert(const hal::V1_0::Operand& operand); -Result convert(const hal::V1_0::Operation& operation); -Result convert(const hardware::hidl_vec& operandValues); -Result convert(const hardware::hidl_memory& memory); -Result convert(const hal::V1_0::Model& model); -Result convert(const hal::V1_0::RequestArgument& requestArgument); -Result convert(const hal::V1_0::Request& request); -Result convert(const hal::V1_0::ErrorStatus& status); +GeneralResult convert(const hal::V1_0::OperandType& operandType); +GeneralResult convert(const hal::V1_0::OperationType& operationType); +GeneralResult convert(const hal::V1_0::OperandLifeTime& lifetime); +GeneralResult convert(const hal::V1_0::DeviceStatus& deviceStatus); +GeneralResult convert( + const hal::V1_0::PerformanceInfo& performanceInfo); +GeneralResult convert(const hal::V1_0::Capabilities& capabilities); +GeneralResult convert(const hal::V1_0::DataLocation& location); +GeneralResult convert(const hal::V1_0::Operand& operand); +GeneralResult convert(const hal::V1_0::Operation& operation); +GeneralResult convert(const hardware::hidl_vec& operandValues); +GeneralResult convert(const hardware::hidl_memory& memory); +GeneralResult convert(const hal::V1_0::Model& model); +GeneralResult convert(const hal::V1_0::RequestArgument& requestArgument); +GeneralResult convert(const hal::V1_0::Request& request); +GeneralResult convert(const hal::V1_0::ErrorStatus& status); } // namespace android::nn namespace android::hardware::neuralnetworks::V1_0::utils { -nn::Result convert(const nn::OperandType& operandType); -nn::Result convert(const nn::OperationType& operationType); -nn::Result convert(const nn::Operand::LifeTime& lifetime); -nn::Result convert(const nn::DeviceStatus& deviceStatus); -nn::Result convert(const nn::Capabilities::PerformanceInfo& performanceInfo); -nn::Result convert(const nn::Capabilities& capabilities); -nn::Result convert(const nn::DataLocation& location); -nn::Result convert(const nn::Operand& operand); -nn::Result convert(const nn::Operation& operation); -nn::Result> convert(const nn::Model::OperandValues& operandValues); -nn::Result convert(const nn::Memory& memory); -nn::Result convert(const nn::Model& model); -nn::Result convert(const nn::Request::Argument& requestArgument); -nn::Result convert(const nn::Request::MemoryPool& memoryPool); -nn::Result convert(const nn::Request& request); -nn::Result convert(const nn::ErrorStatus& status); +nn::GeneralResult convert(const nn::OperandType& operandType); +nn::GeneralResult convert(const nn::OperationType& operationType); +nn::GeneralResult convert(const nn::Operand::LifeTime& lifetime); +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus); +nn::GeneralResult convert( + const nn::Capabilities::PerformanceInfo& performanceInfo); +nn::GeneralResult convert(const nn::Capabilities& capabilities); +nn::GeneralResult convert(const nn::DataLocation& location); +nn::GeneralResult convert(const nn::Operand& operand); +nn::GeneralResult convert(const nn::Operation& operation); +nn::GeneralResult> convert(const nn::Model::OperandValues& operandValues); +nn::GeneralResult convert(const nn::Memory& memory); +nn::GeneralResult convert(const nn::Model& model); +nn::GeneralResult convert(const nn::Request::Argument& requestArgument); +nn::GeneralResult convert(const nn::Request::MemoryPool& memoryPool); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::ErrorStatus& status); } // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h new file mode 100644 index 0000000000..4403a579cc --- /dev/null +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { + +class Device final : public nn::IDevice { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create(std::string name, + sp device); + + Device(PrivateConstructorTag tag, std::string name, nn::Capabilities capabilities, + sp device, hal::utils::DeathHandler deathHandler); + + const std::string& getName() const override; + const std::string& getVersionString() const override; + nn::Version getFeatureLevel() const override; + nn::DeviceType getType() const override; + const std::vector& getSupportedExtensions() const override; + const nn::Capabilities& getCapabilities() const override; + std::pair getNumberOfCacheFilesNeeded() const override; + + nn::GeneralResult wait() const override; + + nn::GeneralResult> getSupportedOperations( + const nn::Model& model) const override; + + nn::GeneralResult prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const override; + + private: + const std::string kName; + const std::string kVersionString = "UNKNOWN"; + const std::vector kExtensions; + const nn::Capabilities kCapabilities; + const sp kDevice; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_0::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h new file mode 100644 index 0000000000..31f366dadc --- /dev/null +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { + +class PreparedModel final : public nn::IPreparedModel { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create( + sp preparedModel); + + PreparedModel(PrivateConstructorTag tag, sp preparedModel, + hal::utils::DeathHandler deathHandler); + + nn::ExecutionResult, nn::Timing>> execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + + nn::GeneralResult> executeFenced( + const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + + std::any getUnderlyingResource() const override; + + private: + const sp kPreparedModel; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_0::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h new file mode 100644 index 0000000000..11fbb9edcf --- /dev/null +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { + +nn::GeneralResult getDevice(const std::string& name); + +} // namespace android::hardware::neuralnetworks::V1_0::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h index ec8da06ca6..baa2b9523e 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -31,10 +32,14 @@ constexpr auto kVersion = nn::Version::ANDROID_OC_MR1; template nn::Result validate(const Type& halObject) { - const auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeCanonical = nn::convert(halObject); + if (!maybeCanonical.has_value()) { + return nn::error() << maybeCanonical.error().message; + } + const auto version = NN_TRY(nn::validate(maybeCanonical.value())); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return {}; } @@ -51,9 +56,14 @@ bool valid(const Type& halObject) { template decltype(nn::convert(std::declval())) validatedConvertToCanonical(const Type& halObject) { auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeVersion = nn::validate(canonical); + if (!maybeVersion.has_value()) { + return nn::error() << maybeVersion.error(); + } + const auto version = maybeVersion.value(); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return canonical; } diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp new file mode 100644 index 0000000000..f286bcc50e --- /dev/null +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Callbacks.h" + +#include "Conversions.h" +#include "PreparedModel.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { +namespace { + +nn::GeneralResult convertPreparedModel( + const sp& preparedModel) { + return NN_TRY(utils::PreparedModel::create(preparedModel)); +} + +} // namespace + +Return PreparedModelCallback::notify(ErrorStatus status, + const sp& preparedModel) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); + } else if (preparedModel == nullptr) { + notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Returned preparedModel is nullptr"); + } else { + notifyInternal(convertPreparedModel(preparedModel)); + } + return Void(); +} + +void PreparedModelCallback::notifyAsDeadObject() { + notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); +} + +PreparedModelCallback::Data PreparedModelCallback::get() { + return mData.take(); +} + +void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { + mData.put(std::move(result)); +} + +// ExecutionCallback methods begin here + +Return ExecutionCallback::notify(ErrorStatus status) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); + } else { + notifyInternal({}); + } + return Void(); +} + +void ExecutionCallback::notifyAsDeadObject() { + notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); +} + +ExecutionCallback::Data ExecutionCallback::get() { + return mData.take(); +} + +void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { + mData.put(std::move(result)); +} + +} // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp index 4a58f3b93c..f301065cf9 100644 --- a/neuralnetworks/1.0/utils/src/Conversions.cpp +++ b/neuralnetworks/1.0/utils/src/Conversions.cpp @@ -52,7 +52,7 @@ template using ConvertOutput = std::decay_t()).value())>; template -Result>> convert(const hidl_vec& arguments) { +GeneralResult>> convert(const hidl_vec& arguments) { std::vector> canonical; canonical.reserve(arguments.size()); for (const auto& argument : arguments) { @@ -63,30 +63,31 @@ Result>> convert(const hidl_vec& arguments } // anonymous namespace -Result convert(const hal::V1_0::OperandType& operandType) { +GeneralResult convert(const hal::V1_0::OperandType& operandType) { return static_cast(operandType); } -Result convert(const hal::V1_0::OperationType& operationType) { +GeneralResult convert(const hal::V1_0::OperationType& operationType) { return static_cast(operationType); } -Result convert(const hal::V1_0::OperandLifeTime& lifetime) { +GeneralResult convert(const hal::V1_0::OperandLifeTime& lifetime) { return static_cast(lifetime); } -Result convert(const hal::V1_0::DeviceStatus& deviceStatus) { +GeneralResult convert(const hal::V1_0::DeviceStatus& deviceStatus) { return static_cast(deviceStatus); } -Result convert(const hal::V1_0::PerformanceInfo& performanceInfo) { +GeneralResult convert( + const hal::V1_0::PerformanceInfo& performanceInfo) { return Capabilities::PerformanceInfo{ .execTime = performanceInfo.execTime, .powerUsage = performanceInfo.powerUsage, }; } -Result convert(const hal::V1_0::Capabilities& capabilities) { +GeneralResult convert(const hal::V1_0::Capabilities& capabilities) { const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance)); const auto float32Performance = NN_TRY(convert(capabilities.float32Performance)); @@ -100,7 +101,7 @@ Result convert(const hal::V1_0::Capabilities& capabilities) { }; } -Result convert(const hal::V1_0::DataLocation& location) { +GeneralResult convert(const hal::V1_0::DataLocation& location) { return DataLocation{ .poolIndex = location.poolIndex, .offset = location.offset, @@ -108,7 +109,7 @@ Result convert(const hal::V1_0::DataLocation& location) { }; } -Result convert(const hal::V1_0::Operand& operand) { +GeneralResult convert(const hal::V1_0::Operand& operand) { return Operand{ .type = NN_TRY(convert(operand.type)), .dimensions = operand.dimensions, @@ -119,7 +120,7 @@ Result convert(const hal::V1_0::Operand& operand) { }; } -Result convert(const hal::V1_0::Operation& operation) { +GeneralResult convert(const hal::V1_0::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -127,15 +128,15 @@ Result convert(const hal::V1_0::Operation& operation) { }; } -Result convert(const hidl_vec& operandValues) { +GeneralResult convert(const hidl_vec& operandValues) { return Model::OperandValues(operandValues.data(), operandValues.size()); } -Result convert(const hidl_memory& memory) { +GeneralResult convert(const hidl_memory& memory) { return createSharedMemoryFromHidlMemory(memory); } -Result convert(const hal::V1_0::Model& model) { +GeneralResult convert(const hal::V1_0::Model& model) { auto operations = NN_TRY(convert(model.operations)); // Verify number of consumers. @@ -144,9 +145,9 @@ Result convert(const hal::V1_0::Model& model) { CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { - return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected " - << numberOfConsumers[i] << " but found " - << model.operands[i].numberOfConsumers; + return NN_ERROR(ErrorStatus::GENERAL_FAILURE) + << "Invalid numberOfConsumers for operand " << i << ", expected " + << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers; } } @@ -164,7 +165,7 @@ Result convert(const hal::V1_0::Model& model) { }; } -Result convert(const hal::V1_0::RequestArgument& argument) { +GeneralResult convert(const hal::V1_0::RequestArgument& argument) { const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE : Request::Argument::LifeTime::POOL; return Request::Argument{ @@ -174,7 +175,7 @@ Result convert(const hal::V1_0::RequestArgument& argument) { }; } -Result convert(const hal::V1_0::Request& request) { +GeneralResult convert(const hal::V1_0::Request& request) { auto memories = NN_TRY(convert(request.pools)); std::vector pools; pools.reserve(memories.size()); @@ -187,7 +188,7 @@ Result convert(const hal::V1_0::Request& request) { }; } -Result convert(const hal::V1_0::ErrorStatus& status) { +GeneralResult convert(const hal::V1_0::ErrorStatus& status) { switch (status) { case hal::V1_0::ErrorStatus::NONE: case hal::V1_0::ErrorStatus::DEVICE_UNAVAILABLE: @@ -196,7 +197,8 @@ Result convert(const hal::V1_0::ErrorStatus& status) { case hal::V1_0::ErrorStatus::INVALID_ARGUMENT: return static_cast(status); } - return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status); + return NN_ERROR(ErrorStatus::GENERAL_FAILURE) + << "Invalid ErrorStatus " << underlyingType(status); } } // namespace android::nn @@ -208,7 +210,7 @@ template using ConvertOutput = std::decay_t()).value())>; template -nn::Result>> convert(const std::vector& arguments) { +nn::GeneralResult>> convert(const std::vector& arguments) { hidl_vec> halObject(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { halObject[i] = NN_TRY(utils::convert(arguments[i])); @@ -218,33 +220,35 @@ nn::Result>> convert(const std::vector& argum } // anonymous namespace -nn::Result convert(const nn::OperandType& operandType) { +nn::GeneralResult convert(const nn::OperandType& operandType) { return static_cast(operandType); } -nn::Result convert(const nn::OperationType& operationType) { +nn::GeneralResult convert(const nn::OperationType& operationType) { return static_cast(operationType); } -nn::Result convert(const nn::Operand::LifeTime& lifetime) { +nn::GeneralResult convert(const nn::Operand::LifeTime& lifetime) { if (lifetime == nn::Operand::LifeTime::POINTER) { - return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Model cannot be converted because it contains pointer-based memory"; } return static_cast(lifetime); } -nn::Result convert(const nn::DeviceStatus& deviceStatus) { +nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus) { return static_cast(deviceStatus); } -nn::Result convert(const nn::Capabilities::PerformanceInfo& performanceInfo) { +nn::GeneralResult convert( + const nn::Capabilities::PerformanceInfo& performanceInfo) { return PerformanceInfo{ .execTime = performanceInfo.execTime, .powerUsage = performanceInfo.powerUsage, }; } -nn::Result convert(const nn::Capabilities& capabilities) { +nn::GeneralResult convert(const nn::Capabilities& capabilities) { return Capabilities{ .float32Performance = NN_TRY(convert( capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))), @@ -253,7 +257,7 @@ nn::Result convert(const nn::Capabilities& capabilities) { }; } -nn::Result convert(const nn::DataLocation& location) { +nn::GeneralResult convert(const nn::DataLocation& location) { return DataLocation{ .poolIndex = location.poolIndex, .offset = location.offset, @@ -261,7 +265,7 @@ nn::Result convert(const nn::DataLocation& location) { }; } -nn::Result convert(const nn::Operand& operand) { +nn::GeneralResult convert(const nn::Operand& operand) { return Operand{ .type = NN_TRY(convert(operand.type)), .dimensions = operand.dimensions, @@ -273,7 +277,7 @@ nn::Result convert(const nn::Operand& operand) { }; } -nn::Result convert(const nn::Operation& operation) { +nn::GeneralResult convert(const nn::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -281,20 +285,21 @@ nn::Result convert(const nn::Operation& operation) { }; } -nn::Result> convert(const nn::Model::OperandValues& operandValues) { +nn::GeneralResult> convert(const nn::Model::OperandValues& operandValues) { return hidl_vec(operandValues.data(), operandValues.data() + operandValues.size()); } -nn::Result convert(const nn::Memory& memory) { +nn::GeneralResult convert(const nn::Memory& memory) { const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size); // Copy memory to force the native_handle_t to be copied. auto copiedMemory = hidlMemory; return copiedMemory; } -nn::Result convert(const nn::Model& model) { +nn::GeneralResult convert(const nn::Model& model) { if (!hal::utils::hasNoPointerData(model)) { - return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Mdoel cannot be converted because it contains pointer-based memory"; } auto operands = NN_TRY(convert(model.main.operands)); @@ -317,9 +322,10 @@ nn::Result convert(const nn::Model& model) { }; } -nn::Result convert(const nn::Request::Argument& requestArgument) { +nn::GeneralResult convert(const nn::Request::Argument& requestArgument) { if (requestArgument.lifetime == nn::Request::Argument::LifeTime::POINTER) { - return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Request cannot be converted because it contains pointer-based memory"; } const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE; return RequestArgument{ @@ -329,13 +335,14 @@ nn::Result convert(const nn::Request::Argument& requestArgument }; } -nn::Result convert(const nn::Request::MemoryPool& memoryPool) { +nn::GeneralResult convert(const nn::Request::MemoryPool& memoryPool) { return convert(std::get(memoryPool)); } -nn::Result convert(const nn::Request& request) { +nn::GeneralResult convert(const nn::Request& request) { if (!hal::utils::hasNoPointerData(request)) { - return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Request cannot be converted because it contains pointer-based memory"; } return Request{ @@ -345,7 +352,7 @@ nn::Result convert(const nn::Request& request) { }; } -nn::Result convert(const nn::ErrorStatus& status) { +nn::GeneralResult convert(const nn::ErrorStatus& status) { switch (status) { case nn::ErrorStatus::NONE: case nn::ErrorStatus::DEVICE_UNAVAILABLE: diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp new file mode 100644 index 0000000000..8292f170c2 --- /dev/null +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -0,0 +1,199 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Device.h" + +#include "Callbacks.h" +#include "Conversions.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { +namespace { + +nn::GeneralResult initCapabilities(V1_0::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status); + } else { + result = validatedConvertToCanonical(capabilities); + } + }; + + const auto ret = device->getCapabilities(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +} // namespace + +nn::GeneralResult> Device::create(std::string name, + sp device) { + if (name.empty()) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_0::utils::Device::create must have non-empty name"; + } + if (device == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_0::utils::Device::create must have non-null device"; + } + + auto capabilities = NN_TRY(initCapabilities(device.get())); + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); + return std::make_shared(PrivateConstructorTag{}, std::move(name), + std::move(capabilities), std::move(device), + std::move(deathHandler)); +} + +Device::Device(PrivateConstructorTag /*tag*/, std::string name, nn::Capabilities capabilities, + sp device, hal::utils::DeathHandler deathHandler) + : kName(std::move(name)), + kCapabilities(std::move(capabilities)), + kDevice(std::move(device)), + kDeathHandler(std::move(deathHandler)) {} + +const std::string& Device::getName() const { + return kName; +} + +const std::string& Device::getVersionString() const { + return kVersionString; +} + +nn::Version Device::getFeatureLevel() const { + return nn::Version::ANDROID_OC_MR1; +} + +nn::DeviceType Device::getType() const { + return nn::DeviceType::OTHER; +} + +const std::vector& Device::getSupportedExtensions() const { + return kExtensions; +} + +const nn::Capabilities& Device::getCapabilities() const { + return kCapabilities; +} + +std::pair Device::getNumberOfCacheFilesNeeded() const { + return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0); +} + +nn::GeneralResult Device::wait() const { + const auto ret = kDevice->ping(); + return hal::utils::handleTransportError(ret); +} + +nn::GeneralResult> Device::getSupportedOperations(const nn::Model& model) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + + nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) + << "getSupportedOperations failed with " << toString(status); + } else if (supportedOperations.size() != model.main.operations.size()) { + result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "getSupportedOperations returned vector of size " + << supportedOperations.size() << " but expected " + << model.main.operations.size(); + } else { + result = supportedOperations; + } + }; + + const auto ret = kDevice->getSupportedOperations(hidlModel, cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult Device::prepareModel( + const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, + nn::OptionalTimePoint /*deadline*/, const std::vector& /*modelCache*/, + const std::vector& /*dataCache*/, const nn::CacheToken& /*token*/) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kDevice->prepareModel(hidlModel, cb); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); + } + + return cb->get(); +} + +nn::GeneralResult Device::prepareModelFromCache( + nn::OptionalTimePoint /*deadline*/, const std::vector& /*modelCache*/, + const std::vector& /*dataCache*/, const nn::CacheToken& /*token*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IDevice::prepareModelFromCache not supported on 1.0 HAL service"; +} + +nn::GeneralResult Device::allocate( + const nn::BufferDesc& /*desc*/, + const std::vector& /*preparedModels*/, + const std::vector& /*inputRoles*/, + const std::vector& /*outputRoles*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IDevice::allocate not supported on 1.0 HAL service"; +} + +} // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp new file mode 100644 index 0000000000..11ccbe3221 --- /dev/null +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PreparedModel.h" + +#include "Callbacks.h" +#include "Conversions.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_0::utils { + +nn::GeneralResult> PreparedModel::create( + sp preparedModel) { + if (preparedModel == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_0::utils::PreparedModel::create must have non-null preparedModel"; + } + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); + return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), + std::move(deathHandler)); +} + +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, + hal::utils::DeathHandler deathHandler) + : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + +nn::ExecutionResult, nn::Timing>> PreparedModel::execute( + const nn::Request& request, nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + // Ensure that request is ready for IPC. + std::optional maybeRequestInShared; + const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); + + const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kPreparedModel->execute(hidlRequest, cb); + const auto status = + NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret))); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "execute failed with " << toString(status); + } + + auto result = NN_TRY(cb->get()); + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); + + return result; +} + +nn::GeneralResult> +PreparedModel::executeFenced( + const nn::Request& /*request*/, const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, + const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IPreparedModel::executeFenced is not supported on 1.0 HAL service"; +} + +std::any PreparedModel::getUnderlyingResource() const { + sp resource = kPreparedModel; + return resource; +} + +} // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.0/utils/src/Service.cpp b/neuralnetworks/1.0/utils/src/Service.cpp new file mode 100644 index 0000000000..ec28b1d206 --- /dev/null +++ b/neuralnetworks/1.0/utils/src/Service.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Service.h" + +#include +#include +#include +#include +#include +#include "Device.h" + +namespace android::hardware::neuralnetworks::V1_0::utils { + +nn::GeneralResult getDevice(const std::string& name) { + hal::utils::ResilientDevice::Factory makeDevice = + [name](bool blocking) -> nn::GeneralResult { + auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name); + if (service == nullptr) { + return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr"; + } + return Device::create(name, std::move(service)); + }; + + return hal::utils::ResilientDevice::create(std::move(makeDevice)); +} + +} // namespace android::hardware::neuralnetworks::V1_0::utils diff --git a/neuralnetworks/1.1/utils/Android.bp b/neuralnetworks/1.1/utils/Android.bp index 85a32c5834..909575b634 100644 --- a/neuralnetworks/1.1/utils/Android.bp +++ b/neuralnetworks/1.1/utils/Android.bp @@ -20,6 +20,7 @@ cc_library_static { srcs: ["src/*"], local_include_dirs: ["include/nnapi/hal/1.1/"], export_include_dirs: ["include"], + cflags: ["-Wthread-safety"], static_libs: [ "neuralnetworks_types", "neuralnetworks_utils_hal_common", diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h index d0c5397faf..16ddd53496 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h @@ -24,21 +24,22 @@ namespace android::nn { -Result convert(const hal::V1_1::OperationType& operationType); -Result convert(const hal::V1_1::Capabilities& capabilities); -Result convert(const hal::V1_1::Operation& operation); -Result convert(const hal::V1_1::Model& model); -Result convert(const hal::V1_1::ExecutionPreference& executionPreference); +GeneralResult convert(const hal::V1_1::OperationType& operationType); +GeneralResult convert(const hal::V1_1::Capabilities& capabilities); +GeneralResult convert(const hal::V1_1::Operation& operation); +GeneralResult convert(const hal::V1_1::Model& model); +GeneralResult convert( + const hal::V1_1::ExecutionPreference& executionPreference); } // namespace android::nn namespace android::hardware::neuralnetworks::V1_1::utils { -nn::Result convert(const nn::OperationType& operationType); -nn::Result convert(const nn::Capabilities& capabilities); -nn::Result convert(const nn::Operation& operation); -nn::Result convert(const nn::Model& model); -nn::Result convert(const nn::ExecutionPreference& executionPreference); +nn::GeneralResult convert(const nn::OperationType& operationType); +nn::GeneralResult convert(const nn::Capabilities& capabilities); +nn::GeneralResult convert(const nn::Operation& operation); +nn::GeneralResult convert(const nn::Model& model); +nn::GeneralResult convert(const nn::ExecutionPreference& executionPreference); } // namespace android::hardware::neuralnetworks::V1_1::utils diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h new file mode 100644 index 0000000000..f55ac6cb6d --- /dev/null +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_1::utils { + +class Device final : public nn::IDevice { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create(std::string name, + sp device); + + Device(PrivateConstructorTag tag, std::string name, nn::Capabilities capabilities, + sp device, hal::utils::DeathHandler deathHandler); + + const std::string& getName() const override; + const std::string& getVersionString() const override; + nn::Version getFeatureLevel() const override; + nn::DeviceType getType() const override; + const std::vector& getSupportedExtensions() const override; + const nn::Capabilities& getCapabilities() const override; + std::pair getNumberOfCacheFilesNeeded() const override; + + nn::GeneralResult wait() const override; + + nn::GeneralResult> getSupportedOperations( + const nn::Model& model) const override; + + nn::GeneralResult prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const override; + + private: + const std::string kName; + const std::string kVersionString = "UNKNOWN"; + const std::vector kExtensions; + const nn::Capabilities kCapabilities; + const sp kDevice; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_1::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h new file mode 100644 index 0000000000..a3ad3cfb2d --- /dev/null +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_1::utils { + +nn::GeneralResult getDevice(const std::string& name); + +} // namespace android::hardware::neuralnetworks::V1_1::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h index 6f9aa602d8..0fee628eb6 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -33,10 +34,14 @@ constexpr auto kVersion = nn::Version::ANDROID_P; template nn::Result validate(const Type& halObject) { - const auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeCanonical = nn::convert(halObject); + if (!maybeCanonical.has_value()) { + return nn::error() << maybeCanonical.error().message; + } + const auto version = NN_TRY(nn::validate(maybeCanonical.value())); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return {}; } @@ -53,9 +58,14 @@ bool valid(const Type& halObject) { template decltype(nn::convert(std::declval())) validatedConvertToCanonical(const Type& halObject) { auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeVersion = nn::validate(canonical); + if (!maybeVersion.has_value()) { + return nn::error() << maybeVersion.error(); + } + const auto version = maybeVersion.value(); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return canonical; } diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp index 7fee16b5f2..ffe0752c11 100644 --- a/neuralnetworks/1.1/utils/src/Conversions.cpp +++ b/neuralnetworks/1.1/utils/src/Conversions.cpp @@ -42,7 +42,7 @@ template using convertOutput = std::decay_t()).value())>; template -Result>> convert(const hidl_vec& arguments) { +GeneralResult>> convert(const hidl_vec& arguments) { std::vector> canonical; canonical.reserve(arguments.size()); for (const auto& argument : arguments) { @@ -53,11 +53,11 @@ Result>> convert(const hidl_vec& arguments } // anonymous namespace -Result convert(const hal::V1_1::OperationType& operationType) { +GeneralResult convert(const hal::V1_1::OperationType& operationType) { return static_cast(operationType); } -Result convert(const hal::V1_1::Capabilities& capabilities) { +GeneralResult convert(const hal::V1_1::Capabilities& capabilities) { const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance)); const auto float32Performance = NN_TRY(convert(capabilities.float32Performance)); const auto relaxedFloat32toFloat16Performance = @@ -73,7 +73,7 @@ Result convert(const hal::V1_1::Capabilities& capabilities) { }; } -Result convert(const hal::V1_1::Operation& operation) { +GeneralResult convert(const hal::V1_1::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -81,7 +81,7 @@ Result convert(const hal::V1_1::Operation& operation) { }; } -Result convert(const hal::V1_1::Model& model) { +GeneralResult convert(const hal::V1_1::Model& model) { auto operations = NN_TRY(convert(model.operations)); // Verify number of consumers. @@ -90,9 +90,9 @@ Result convert(const hal::V1_1::Model& model) { CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { - return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected " - << numberOfConsumers[i] << " but found " - << model.operands[i].numberOfConsumers; + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid numberOfConsumers for operand " << i << ", expected " + << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers; } } @@ -111,7 +111,8 @@ Result convert(const hal::V1_1::Model& model) { }; } -Result convert(const hal::V1_1::ExecutionPreference& executionPreference) { +GeneralResult convert( + const hal::V1_1::ExecutionPreference& executionPreference) { return static_cast(executionPreference); } @@ -122,20 +123,20 @@ namespace { using utils::convert; -nn::Result convert( +nn::GeneralResult convert( const nn::Capabilities::PerformanceInfo& performanceInfo) { return V1_0::utils::convert(performanceInfo); } -nn::Result convert(const nn::Operand& operand) { +nn::GeneralResult convert(const nn::Operand& operand) { return V1_0::utils::convert(operand); } -nn::Result> convert(const nn::Model::OperandValues& operandValues) { +nn::GeneralResult> convert(const nn::Model::OperandValues& operandValues) { return V1_0::utils::convert(operandValues); } -nn::Result convert(const nn::Memory& memory) { +nn::GeneralResult convert(const nn::Memory& memory) { return V1_0::utils::convert(memory); } @@ -143,7 +144,7 @@ template using convertOutput = std::decay_t()).value())>; template -nn::Result>> convert(const std::vector& arguments) { +nn::GeneralResult>> convert(const std::vector& arguments) { hidl_vec> halObject(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { halObject[i] = NN_TRY(convert(arguments[i])); @@ -153,11 +154,11 @@ nn::Result>> convert(const std::vector& argum } // anonymous namespace -nn::Result convert(const nn::OperationType& operationType) { +nn::GeneralResult convert(const nn::OperationType& operationType) { return static_cast(operationType); } -nn::Result convert(const nn::Capabilities& capabilities) { +nn::GeneralResult convert(const nn::Capabilities& capabilities) { return Capabilities{ .float32Performance = NN_TRY(convert( capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))), @@ -168,7 +169,7 @@ nn::Result convert(const nn::Capabilities& capabilities) { }; } -nn::Result convert(const nn::Operation& operation) { +nn::GeneralResult convert(const nn::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -176,9 +177,10 @@ nn::Result convert(const nn::Operation& operation) { }; } -nn::Result convert(const nn::Model& model) { +nn::GeneralResult convert(const nn::Model& model) { if (!hal::utils::hasNoPointerData(model)) { - return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Mdoel cannot be converted because it contains pointer-based memory"; } auto operands = NN_TRY(convert(model.main.operands)); @@ -202,7 +204,7 @@ nn::Result convert(const nn::Model& model) { }; } -nn::Result convert(const nn::ExecutionPreference& executionPreference) { +nn::GeneralResult convert(const nn::ExecutionPreference& executionPreference) { return static_cast(executionPreference); } diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp new file mode 100644 index 0000000000..03b0d6eb8e --- /dev/null +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Device.h" + +#include "Conversions.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_1::utils { +namespace { + +nn::GeneralResult initCapabilities(V1_1::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status); + } else { + result = validatedConvertToCanonical(capabilities); + } + }; + + const auto ret = device->getCapabilities_1_1(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +} // namespace + +nn::GeneralResult> Device::create(std::string name, + sp device) { + if (name.empty()) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_1::utils::Device::create must have non-empty name"; + } + if (device == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_1::utils::Device::create must have non-null device"; + } + + auto capabilities = NN_TRY(initCapabilities(device.get())); + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); + return std::make_shared(PrivateConstructorTag{}, std::move(name), + std::move(capabilities), std::move(device), + std::move(deathHandler)); +} + +Device::Device(PrivateConstructorTag /*tag*/, std::string name, nn::Capabilities capabilities, + sp device, hal::utils::DeathHandler deathHandler) + : kName(std::move(name)), + kCapabilities(std::move(capabilities)), + kDevice(std::move(device)), + kDeathHandler(std::move(deathHandler)) {} + +const std::string& Device::getName() const { + return kName; +} + +const std::string& Device::getVersionString() const { + return kVersionString; +} + +nn::Version Device::getFeatureLevel() const { + return nn::Version::ANDROID_P; +} + +nn::DeviceType Device::getType() const { + return nn::DeviceType::UNKNOWN; +} + +const std::vector& Device::getSupportedExtensions() const { + return kExtensions; +} + +const nn::Capabilities& Device::getCapabilities() const { + return kCapabilities; +} + +std::pair Device::getNumberOfCacheFilesNeeded() const { + return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0); +} + +nn::GeneralResult Device::wait() const { + const auto ret = kDevice->ping(); + return hal::utils::handleTransportError(ret); +} + +nn::GeneralResult> Device::getSupportedOperations(const nn::Model& model) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + + nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + auto cb = [&result, &model](V1_0::ErrorStatus status, + const hidl_vec& supportedOperations) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) + << "getSupportedOperations_1_1 failed with " << toString(status); + } else if (supportedOperations.size() != model.main.operations.size()) { + result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "getSupportedOperations_1_1 returned vector of size " + << supportedOperations.size() << " but expected " + << model.main.operations.size(); + } else { + result = supportedOperations; + } + }; + + const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult Device::prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, + nn::OptionalTimePoint /*deadline*/, const std::vector& /*modelCache*/, + const std::vector& /*dataCache*/, const nn::CacheToken& /*token*/) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + const auto hidlPreference = NN_TRY(convert(preference)); + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "prepareModel failed with " << toString(status); + } + + return cb->get(); +} + +nn::GeneralResult Device::prepareModelFromCache( + nn::OptionalTimePoint /*deadline*/, const std::vector& /*modelCache*/, + const std::vector& /*dataCache*/, const nn::CacheToken& /*token*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IDevice::prepareModelFromCache not supported on 1.1 HAL service"; +} + +nn::GeneralResult Device::allocate( + const nn::BufferDesc& /*desc*/, + const std::vector& /*preparedModels*/, + const std::vector& /*inputRoles*/, + const std::vector& /*outputRoles*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IDevice::allocate not supported on 1.1 HAL service"; +} + +} // namespace android::hardware::neuralnetworks::V1_1::utils diff --git a/neuralnetworks/1.1/utils/src/Service.cpp b/neuralnetworks/1.1/utils/src/Service.cpp new file mode 100644 index 0000000000..e2d3240647 --- /dev/null +++ b/neuralnetworks/1.1/utils/src/Service.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Service.h" + +#include +#include +#include +#include +#include +#include "Device.h" + +namespace android::hardware::neuralnetworks::V1_1::utils { + +nn::GeneralResult getDevice(const std::string& name) { + hal::utils::ResilientDevice::Factory makeDevice = + [name](bool blocking) -> nn::GeneralResult { + auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name); + if (service == nullptr) { + return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr"; + } + return Device::create(name, std::move(service)); + }; + + return hal::utils::ResilientDevice::create(std::move(makeDevice)); +} + +} // namespace android::hardware::neuralnetworks::V1_1::utils diff --git a/neuralnetworks/1.2/utils/Android.bp b/neuralnetworks/1.2/utils/Android.bp index a1dd3d0b0d..22e8659557 100644 --- a/neuralnetworks/1.2/utils/Android.bp +++ b/neuralnetworks/1.2/utils/Android.bp @@ -20,6 +20,7 @@ cc_library_static { srcs: ["src/*"], local_include_dirs: ["include/nnapi/hal/1.2/"], export_include_dirs: ["include"], + cflags: ["-Wthread-safety"], static_libs: [ "neuralnetworks_types", "neuralnetworks_utils_hal_common", diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h new file mode 100644 index 0000000000..bc7d92ac83 --- /dev/null +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { + +class PreparedModelCallback final : public IPreparedModelCallback, + public hal::utils::IProtectedCallback { + public: + using Data = nn::GeneralResult; + + Return notify(V1_0::ErrorStatus status, + const sp& preparedModel) override; + Return notify_1_2(V1_0::ErrorStatus status, + const sp& preparedModel) override; + + void notifyAsDeadObject() override; + + Data get(); + + private: + void notifyInternal(Data result); + + hal::utils::TransferValue mData; +}; + +class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { + public: + using Data = nn::ExecutionResult, nn::Timing>>; + + Return notify(V1_0::ErrorStatus status) override; + Return notify_1_2(V1_0::ErrorStatus status, const hidl_vec& outputShapes, + const Timing& timing) override; + + void notifyAsDeadObject() override; + + Data get(); + + private: + void notifyInternal(Data result); + + hal::utils::TransferValue mData; +}; + +} // namespace android::hardware::neuralnetworks::V1_2::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h index 81bf7928f6..e6de011f9c 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h @@ -24,62 +24,64 @@ namespace android::nn { -Result convert(const hal::V1_2::OperandType& operandType); -Result convert(const hal::V1_2::OperationType& operationType); -Result convert(const hal::V1_2::DeviceType& deviceType); -Result convert(const hal::V1_2::Capabilities& capabilities); -Result convert( +GeneralResult convert(const hal::V1_2::OperandType& operandType); +GeneralResult convert(const hal::V1_2::OperationType& operationType); +GeneralResult convert(const hal::V1_2::DeviceType& deviceType); +GeneralResult convert(const hal::V1_2::Capabilities& capabilities); +GeneralResult convert( const hal::V1_2::Capabilities::OperandPerformance& operandPerformance); -Result convert(const hal::V1_2::Operation& operation); -Result convert( +GeneralResult convert(const hal::V1_2::Operation& operation); +GeneralResult convert( const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams); -Result convert(const hal::V1_2::Operand& operand); -Result convert(const hal::V1_2::Operand::ExtraParams& extraParams); -Result convert(const hal::V1_2::Model& model); -Result convert( +GeneralResult convert(const hal::V1_2::Operand& operand); +GeneralResult convert(const hal::V1_2::Operand::ExtraParams& extraParams); +GeneralResult convert(const hal::V1_2::Model& model); +GeneralResult convert( const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix); -Result convert(const hal::V1_2::OutputShape& outputShape); -Result convert(const hal::V1_2::MeasureTiming& measureTiming); -Result convert(const hal::V1_2::Timing& timing); -Result convert(const hal::V1_2::Extension& extension); -Result convert( +GeneralResult convert(const hal::V1_2::OutputShape& outputShape); +GeneralResult convert(const hal::V1_2::MeasureTiming& measureTiming); +GeneralResult convert(const hal::V1_2::Timing& timing); +GeneralResult convert(const hal::V1_2::Extension& extension); +GeneralResult convert( const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation); -Result convert(const hardware::hidl_handle& handle); +GeneralResult convert(const hardware::hidl_handle& handle); -Result> convert(const hardware::hidl_vec& extensions); -Result> convert(const hardware::hidl_vec& handles); -Result> convert( +GeneralResult> convert( + const hardware::hidl_vec& extensions); +GeneralResult> convert( + const hardware::hidl_vec& handles); +GeneralResult> convert( const hardware::hidl_vec& outputShapes); } // namespace android::nn namespace android::hardware::neuralnetworks::V1_2::utils { -nn::Result convert(const nn::OperandType& operandType); -nn::Result convert(const nn::OperationType& operationType); -nn::Result convert(const nn::DeviceType& deviceType); -nn::Result convert(const nn::Capabilities& capabilities); -nn::Result convert( +nn::GeneralResult convert(const nn::OperandType& operandType); +nn::GeneralResult convert(const nn::OperationType& operationType); +nn::GeneralResult convert(const nn::DeviceType& deviceType); +nn::GeneralResult convert(const nn::Capabilities& capabilities); +nn::GeneralResult convert( const nn::Capabilities::OperandPerformance& operandPerformance); -nn::Result convert(const nn::Operation& operation); -nn::Result convert( +nn::GeneralResult convert(const nn::Operation& operation); +nn::GeneralResult convert( const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams); -nn::Result convert(const nn::Operand& operand); -nn::Result convert(const nn::Operand::ExtraParams& extraParams); -nn::Result convert(const nn::Model& model); -nn::Result convert( +nn::GeneralResult convert(const nn::Operand& operand); +nn::GeneralResult convert(const nn::Operand::ExtraParams& extraParams); +nn::GeneralResult convert(const nn::Model& model); +nn::GeneralResult convert( const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix); -nn::Result convert(const nn::OutputShape& outputShape); -nn::Result convert(const nn::MeasureTiming& measureTiming); -nn::Result convert(const nn::Timing& timing); -nn::Result convert(const nn::Extension& extension); -nn::Result convert( +nn::GeneralResult convert(const nn::OutputShape& outputShape); +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming); +nn::GeneralResult convert(const nn::Timing& timing); +nn::GeneralResult convert(const nn::Extension& extension); +nn::GeneralResult convert( const nn::Extension::OperandTypeInformation& operandTypeInformation); -nn::Result convert(const nn::NativeHandle& handle); +nn::GeneralResult convert(const nn::NativeHandle& handle); -nn::Result> convert(const std::vector& extensions); -nn::Result> convert(const std::vector& handles); -nn::Result> convert(const std::vector& outputShapes); +nn::GeneralResult> convert(const std::vector& extensions); +nn::GeneralResult> convert(const std::vector& handles); +nn::GeneralResult> convert(const std::vector& outputShapes); } // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h new file mode 100644 index 0000000000..eb317b12cf --- /dev/null +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { + +nn::GeneralResult initVersionString(V1_2::IDevice* device); +nn::GeneralResult initDeviceType(V1_2::IDevice* device); +nn::GeneralResult> initExtensions(V1_2::IDevice* device); +nn::GeneralResult initCapabilities(V1_2::IDevice* device); +nn::GeneralResult> initNumberOfCacheFilesNeeded( + V1_2::IDevice* device); + +class Device final : public nn::IDevice { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create(std::string name, + sp device); + + Device(PrivateConstructorTag tag, std::string name, std::string versionString, + nn::DeviceType deviceType, std::vector extensions, + nn::Capabilities capabilities, std::pair numberOfCacheFilesNeeded, + sp device, hal::utils::DeathHandler deathHandler); + + const std::string& getName() const override; + const std::string& getVersionString() const override; + nn::Version getFeatureLevel() const override; + nn::DeviceType getType() const override; + const std::vector& getSupportedExtensions() const override; + const nn::Capabilities& getCapabilities() const override; + std::pair getNumberOfCacheFilesNeeded() const override; + + nn::GeneralResult wait() const override; + + nn::GeneralResult> getSupportedOperations( + const nn::Model& model) const override; + + nn::GeneralResult prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const override; + + private: + const std::string kName; + const std::string kVersionString; + const nn::DeviceType kDeviceType; + const std::vector kExtensions; + const nn::Capabilities kCapabilities; + const std::pair kNumberOfCacheFilesNeeded; + const sp kDevice; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_2::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h new file mode 100644 index 0000000000..65e1e8aa3f --- /dev/null +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { + +class PreparedModel final : public nn::IPreparedModel { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create( + sp preparedModel); + + PreparedModel(PrivateConstructorTag tag, sp preparedModel, + hal::utils::DeathHandler deathHandler); + + nn::ExecutionResult, nn::Timing>> execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + + nn::GeneralResult> executeFenced( + const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + + std::any getUnderlyingResource() const override; + + private: + nn::ExecutionResult, nn::Timing>> executeSynchronously( + const V1_0::Request& request, MeasureTiming measure) const; + nn::ExecutionResult, nn::Timing>> executeAsynchronously( + const V1_0::Request& request, MeasureTiming measure) const; + + const sp kPreparedModel; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_2::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h new file mode 100644 index 0000000000..44f004f034 --- /dev/null +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { + +nn::GeneralResult getDevice(const std::string& name); + +} // namespace android::hardware::neuralnetworks::V1_2::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h index b1c2f1a81a..a9a6baeccc 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -38,10 +39,14 @@ constexpr auto kVersion = nn::Version::ANDROID_Q; template nn::Result validate(const Type& halObject) { - const auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeCanonical = nn::convert(halObject); + if (!maybeCanonical.has_value()) { + return nn::error() << maybeCanonical.error().message; + } + const auto version = NN_TRY(nn::validate(maybeCanonical.value())); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return {}; } @@ -58,9 +63,14 @@ bool valid(const Type& halObject) { template decltype(nn::convert(std::declval())) validatedConvertToCanonical(const Type& halObject) { auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeVersion = nn::validate(canonical); + if (!maybeVersion.has_value()) { + return nn::error() << maybeVersion.error(); + } + const auto version = maybeVersion.value(); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return canonical; } diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp new file mode 100644 index 0000000000..cb739f0eeb --- /dev/null +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Callbacks.h" + +#include "Conversions.h" +#include "PreparedModel.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { +namespace { + +nn::GeneralResult convertPreparedModel( + const sp& preparedModel) { + return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); +} + +nn::GeneralResult convertPreparedModel( + const sp& preparedModel) { + return NN_TRY(utils::PreparedModel::create(preparedModel)); +} + +nn::GeneralResult, nn::Timing>> +convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, + const Timing& timing) { + return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)), + NN_TRY(validatedConvertToCanonical(timing))); +} + +nn::ExecutionResult, nn::Timing>> +convertExecutionGeneralResults(const hidl_vec& outputShapes, const Timing& timing) { + return hal::utils::makeExecutionFailure( + convertExecutionGeneralResultsHelper(outputShapes, timing)); +} + +} // namespace + +Return PreparedModelCallback::notify(V1_0::ErrorStatus status, + const sp& preparedModel) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); + } else if (preparedModel == nullptr) { + notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Returned preparedModel is nullptr"); + } else { + notifyInternal(convertPreparedModel(preparedModel)); + } + return Void(); +} + +Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, + const sp& preparedModel) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); + } else if (preparedModel == nullptr) { + notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Returned preparedModel is nullptr"); + } else { + notifyInternal(convertPreparedModel(preparedModel)); + } + return Void(); +} + +void PreparedModelCallback::notifyAsDeadObject() { + notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); +} + +PreparedModelCallback::Data PreparedModelCallback::get() { + return mData.take(); +} + +void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { + mData.put(std::move(result)); +} + +// ExecutionCallback methods begin here + +Return ExecutionCallback::notify(V1_0::ErrorStatus status) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); + } else { + notifyInternal({}); + } + return Void(); +} + +Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, + const hidl_vec& outputShapes, + const Timing& timing) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); + } else { + notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); + } + return Void(); +} + +void ExecutionCallback::notifyAsDeadObject() { + notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); +} + +ExecutionCallback::Data ExecutionCallback::get() { + return mData.take(); +} + +void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { + mData.put(std::move(result)); +} + +} // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index fed314b7c1..378719afc9 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -78,7 +79,7 @@ template using ConvertOutput = std::decay_t()).value())>; template -Result>> convertVec(const hidl_vec& arguments) { +GeneralResult>> convertVec(const hidl_vec& arguments) { std::vector> canonical; canonical.reserve(arguments.size()); for (const auto& argument : arguments) { @@ -88,25 +89,25 @@ Result>> convertVec(const hidl_vec& argume } template -Result>> convert(const hidl_vec& arguments) { +GeneralResult>> convert(const hidl_vec& arguments) { return convertVec(arguments); } } // anonymous namespace -Result convert(const hal::V1_2::OperandType& operandType) { +GeneralResult convert(const hal::V1_2::OperandType& operandType) { return static_cast(operandType); } -Result convert(const hal::V1_2::OperationType& operationType) { +GeneralResult convert(const hal::V1_2::OperationType& operationType) { return static_cast(operationType); } -Result convert(const hal::V1_2::DeviceType& deviceType) { +GeneralResult convert(const hal::V1_2::DeviceType& deviceType) { return static_cast(deviceType); } -Result convert(const hal::V1_2::Capabilities& capabilities) { +GeneralResult convert(const hal::V1_2::Capabilities& capabilities) { const bool validOperandTypes = std::all_of( capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), [](const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { @@ -114,7 +115,7 @@ Result convert(const hal::V1_2::Capabilities& capabilities) { return !maybeType.has_value() ? false : validOperandType(maybeType.value()); }); if (!validOperandTypes) { - return NN_ERROR() + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OperandType when converting OperandPerformance in Capabilities"; } @@ -124,8 +125,9 @@ Result convert(const hal::V1_2::Capabilities& capabilities) { NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance)); - auto table = - NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance))); + auto table = NN_TRY(hal::utils::makeGeneralFailure( + Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)), + nn::ErrorStatus::GENERAL_FAILURE)); return Capabilities{ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, @@ -134,7 +136,7 @@ Result convert(const hal::V1_2::Capabilities& capabilities) { }; } -Result convert( +GeneralResult convert( const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { return Capabilities::OperandPerformance{ .type = NN_TRY(convert(operandPerformance.type)), @@ -142,7 +144,7 @@ Result convert( }; } -Result convert(const hal::V1_2::Operation& operation) { +GeneralResult convert(const hal::V1_2::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -150,7 +152,7 @@ Result convert(const hal::V1_2::Operation& operation) { }; } -Result convert( +GeneralResult convert( const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams) { return Operand::SymmPerChannelQuantParams{ .scales = symmPerChannelQuantParams.scales, @@ -158,7 +160,7 @@ Result convert( }; } -Result convert(const hal::V1_2::Operand& operand) { +GeneralResult convert(const hal::V1_2::Operand& operand) { return Operand{ .type = NN_TRY(convert(operand.type)), .dimensions = operand.dimensions, @@ -170,7 +172,7 @@ Result convert(const hal::V1_2::Operand& operand) { }; } -Result convert(const hal::V1_2::Operand::ExtraParams& extraParams) { +GeneralResult convert(const hal::V1_2::Operand::ExtraParams& extraParams) { using Discriminator = hal::V1_2::Operand::ExtraParams::hidl_discriminator; switch (extraParams.getDiscriminator()) { case Discriminator::none: @@ -180,11 +182,12 @@ Result convert(const hal::V1_2::Operand::ExtraParams& extr case Discriminator::extension: return extraParams.extension(); } - return NN_ERROR() << "Unrecognized Operand::ExtraParams discriminator: " - << underlyingType(extraParams.getDiscriminator()); + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Unrecognized Operand::ExtraParams discriminator: " + << underlyingType(extraParams.getDiscriminator()); } -Result convert(const hal::V1_2::Model& model) { +GeneralResult convert(const hal::V1_2::Model& model) { auto operations = NN_TRY(convert(model.operations)); // Verify number of consumers. @@ -193,9 +196,9 @@ Result convert(const hal::V1_2::Model& model) { CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { - return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected " - << numberOfConsumers[i] << " but found " - << model.operands[i].numberOfConsumers; + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid numberOfConsumers for operand " << i << ", expected " + << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers; } } @@ -215,7 +218,7 @@ Result convert(const hal::V1_2::Model& model) { }; } -Result convert( +GeneralResult convert( const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { return Model::ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, @@ -223,29 +226,29 @@ Result convert( }; } -Result convert(const hal::V1_2::OutputShape& outputShape) { +GeneralResult convert(const hal::V1_2::OutputShape& outputShape) { return OutputShape{ .dimensions = outputShape.dimensions, .isSufficient = outputShape.isSufficient, }; } -Result convert(const hal::V1_2::MeasureTiming& measureTiming) { +GeneralResult convert(const hal::V1_2::MeasureTiming& measureTiming) { return static_cast(measureTiming); } -Result convert(const hal::V1_2::Timing& timing) { +GeneralResult convert(const hal::V1_2::Timing& timing) { return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; } -Result convert(const hal::V1_2::Extension& extension) { +GeneralResult convert(const hal::V1_2::Extension& extension) { return Extension{ .name = extension.name, .operandTypes = NN_TRY(convert(extension.operandTypes)), }; } -Result convert( +GeneralResult convert( const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation) { return Extension::OperandTypeInformation{ .type = operandTypeInformation.type, @@ -254,20 +257,21 @@ Result convert( }; } -Result convert(const hidl_handle& handle) { +GeneralResult convert(const hidl_handle& handle) { auto* cloned = native_handle_clone(handle.getNativeHandle()); return ::android::NativeHandle::create(cloned, /*ownsHandle=*/true); } -Result> convert(const hidl_vec& extensions) { +GeneralResult> convert(const hidl_vec& extensions) { return convertVec(extensions); } -Result> convert(const hidl_vec& handles) { +GeneralResult> convert(const hidl_vec& handles) { return convertVec(handles); } -Result> convert(const hidl_vec& outputShapes) { +GeneralResult> convert( + const hidl_vec& outputShapes) { return convertVec(outputShapes); } @@ -278,24 +282,24 @@ namespace { using utils::convert; -nn::Result convert(const nn::Operand::LifeTime& lifetime) { +nn::GeneralResult convert(const nn::Operand::LifeTime& lifetime) { return V1_0::utils::convert(lifetime); } -nn::Result convert( +nn::GeneralResult convert( const nn::Capabilities::PerformanceInfo& performanceInfo) { return V1_0::utils::convert(performanceInfo); } -nn::Result convert(const nn::DataLocation& location) { +nn::GeneralResult convert(const nn::DataLocation& location) { return V1_0::utils::convert(location); } -nn::Result> convert(const nn::Model::OperandValues& operandValues) { +nn::GeneralResult> convert(const nn::Model::OperandValues& operandValues) { return V1_0::utils::convert(operandValues); } -nn::Result convert(const nn::Memory& memory) { +nn::GeneralResult convert(const nn::Memory& memory) { return V1_0::utils::convert(memory); } @@ -303,7 +307,7 @@ template using ConvertOutput = std::decay_t()).value())>; template -nn::Result>> convertVec(const std::vector& arguments) { +nn::GeneralResult>> convertVec(const std::vector& arguments) { hidl_vec> halObject(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { halObject[i] = NN_TRY(convert(arguments[i])); @@ -312,22 +316,23 @@ nn::Result>> convertVec(const std::vector& ar } template -nn::Result>> convert(const std::vector& arguments) { +nn::GeneralResult>> convert(const std::vector& arguments) { return convertVec(arguments); } -nn::Result makeExtraParams(nn::Operand::NoParams /*noParams*/) { +nn::GeneralResult makeExtraParams(nn::Operand::NoParams /*noParams*/) { return Operand::ExtraParams{}; } -nn::Result makeExtraParams( +nn::GeneralResult makeExtraParams( const nn::Operand::SymmPerChannelQuantParams& channelQuant) { Operand::ExtraParams ret; ret.channelQuant(NN_TRY(convert(channelQuant))); return ret; } -nn::Result makeExtraParams(const nn::Operand::ExtensionParams& extension) { +nn::GeneralResult makeExtraParams( + const nn::Operand::ExtensionParams& extension) { Operand::ExtraParams ret; ret.extension(extension); return ret; @@ -335,28 +340,29 @@ nn::Result makeExtraParams(const nn::Operand::ExtensionPar } // anonymous namespace -nn::Result convert(const nn::OperandType& operandType) { +nn::GeneralResult convert(const nn::OperandType& operandType) { return static_cast(operandType); } -nn::Result convert(const nn::OperationType& operationType) { +nn::GeneralResult convert(const nn::OperationType& operationType) { return static_cast(operationType); } -nn::Result convert(const nn::DeviceType& deviceType) { +nn::GeneralResult convert(const nn::DeviceType& deviceType) { switch (deviceType) { case nn::DeviceType::UNKNOWN: - return NN_ERROR() << "Invalid DeviceType UNKNOWN"; + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid DeviceType UNKNOWN"; case nn::DeviceType::OTHER: case nn::DeviceType::CPU: case nn::DeviceType::GPU: case nn::DeviceType::ACCELERATOR: return static_cast(deviceType); } - return NN_ERROR() << "Invalid DeviceType " << underlyingType(deviceType); + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid DeviceType " << underlyingType(deviceType); } -nn::Result convert(const nn::Capabilities& capabilities) { +nn::GeneralResult convert(const nn::Capabilities& capabilities) { std::vector operandPerformance; operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); std::copy_if(capabilities.operandPerformance.asVector().begin(), @@ -375,7 +381,7 @@ nn::Result convert(const nn::Capabilities& capabilities) { }; } -nn::Result convert( +nn::GeneralResult convert( const nn::Capabilities::OperandPerformance& operandPerformance) { return Capabilities::OperandPerformance{ .type = NN_TRY(convert(operandPerformance.type)), @@ -383,7 +389,7 @@ nn::Result convert( }; } -nn::Result convert(const nn::Operation& operation) { +nn::GeneralResult convert(const nn::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -391,7 +397,7 @@ nn::Result convert(const nn::Operation& operation) { }; } -nn::Result convert( +nn::GeneralResult convert( const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams) { return SymmPerChannelQuantParams{ .scales = symmPerChannelQuantParams.scales, @@ -399,7 +405,7 @@ nn::Result convert( }; } -nn::Result convert(const nn::Operand& operand) { +nn::GeneralResult convert(const nn::Operand& operand) { return Operand{ .type = NN_TRY(convert(operand.type)), .dimensions = operand.dimensions, @@ -412,13 +418,14 @@ nn::Result convert(const nn::Operand& operand) { }; } -nn::Result convert(const nn::Operand::ExtraParams& extraParams) { +nn::GeneralResult convert(const nn::Operand::ExtraParams& extraParams) { return std::visit([](const auto& x) { return makeExtraParams(x); }, extraParams); } -nn::Result convert(const nn::Model& model) { +nn::GeneralResult convert(const nn::Model& model) { if (!hal::utils::hasNoPointerData(model)) { - return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Model cannot be converted because it contains pointer-based memory"; } auto operands = NN_TRY(convert(model.main.operands)); @@ -443,7 +450,7 @@ nn::Result convert(const nn::Model& model) { }; } -nn::Result convert( +nn::GeneralResult convert( const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { return Model::ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, @@ -451,27 +458,27 @@ nn::Result convert( }; } -nn::Result convert(const nn::OutputShape& outputShape) { +nn::GeneralResult convert(const nn::OutputShape& outputShape) { return OutputShape{.dimensions = outputShape.dimensions, .isSufficient = outputShape.isSufficient}; } -nn::Result convert(const nn::MeasureTiming& measureTiming) { +nn::GeneralResult convert(const nn::MeasureTiming& measureTiming) { return static_cast(measureTiming); } -nn::Result convert(const nn::Timing& timing) { +nn::GeneralResult convert(const nn::Timing& timing) { return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver}; } -nn::Result convert(const nn::Extension& extension) { +nn::GeneralResult convert(const nn::Extension& extension) { return Extension{ .name = extension.name, .operandTypes = NN_TRY(convert(extension.operandTypes)), }; } -nn::Result convert( +nn::GeneralResult convert( const nn::Extension::OperandTypeInformation& operandTypeInformation) { return Extension::OperandTypeInformation{ .type = operandTypeInformation.type, @@ -480,22 +487,22 @@ nn::Result convert( }; } -nn::Result convert(const nn::NativeHandle& handle) { +nn::GeneralResult convert(const nn::NativeHandle& handle) { const auto hidlHandle = hidl_handle(handle->handle()); // Copy memory to force the native_handle_t to be copied. auto copiedHandle = hidlHandle; return copiedHandle; } -nn::Result> convert(const std::vector& extensions) { +nn::GeneralResult> convert(const std::vector& extensions) { return convertVec(extensions); } -nn::Result> convert(const std::vector& handles) { +nn::GeneralResult> convert(const std::vector& handles) { return convertVec(handles); } -nn::Result> convert(const std::vector& outputShapes) { +nn::GeneralResult> convert(const std::vector& outputShapes) { return convertVec(outputShapes); } diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp new file mode 100644 index 0000000000..ca236f17c6 --- /dev/null +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Device.h" + +#include "Callbacks.h" +#include "Conversions.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { + +nn::GeneralResult initVersionString(V1_2::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status); + } else { + result = versionString; + } + }; + + const auto ret = device->getVersionString(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult initDeviceType(V1_2::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status); + } else { + result = nn::convert(deviceType); + } + }; + + const auto ret = device->getType(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult> initExtensions(V1_2::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& extensions) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status); + } else { + result = nn::convert(extensions); + } + }; + + const auto ret = device->getSupportedExtensions(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult initCapabilities(V1_2::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status); + } else { + result = validatedConvertToCanonical(capabilities); + } + }; + + const auto ret = device->getCapabilities_1_2(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult> initNumberOfCacheFilesNeeded( + V1_2::IDevice* device) { + CHECK(device != nullptr); + + nn::GeneralResult> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache, + uint32_t numDataCache) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) + << "getNumberOfCacheFilesNeeded failed with " << toString(status); + } else { + result = std::make_pair(numModelCache, numDataCache); + } + }; + + const auto ret = device->getNumberOfCacheFilesNeeded(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult> Device::create(std::string name, + sp device) { + if (name.empty()) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_2::utils::Device::create must have non-empty name"; + } + if (device == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_2::utils::Device::create must have non-null device"; + } + + auto versionString = NN_TRY(initVersionString(device.get())); + const auto deviceType = NN_TRY(initDeviceType(device.get())); + auto extensions = NN_TRY(initExtensions(device.get())); + auto capabilities = NN_TRY(initCapabilities(device.get())); + const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get())); + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); + return std::make_shared( + PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType, + std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded, + std::move(device), std::move(deathHandler)); +} + +Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString, + nn::DeviceType deviceType, std::vector extensions, + nn::Capabilities capabilities, + std::pair numberOfCacheFilesNeeded, sp device, + hal::utils::DeathHandler deathHandler) + : kName(std::move(name)), + kVersionString(std::move(versionString)), + kDeviceType(deviceType), + kExtensions(std::move(extensions)), + kCapabilities(std::move(capabilities)), + kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded), + kDevice(std::move(device)), + kDeathHandler(std::move(deathHandler)) {} + +const std::string& Device::getName() const { + return kName; +} + +const std::string& Device::getVersionString() const { + return kVersionString; +} + +nn::Version Device::getFeatureLevel() const { + return nn::Version::ANDROID_Q; +} + +nn::DeviceType Device::getType() const { + return kDeviceType; +} + +const std::vector& Device::getSupportedExtensions() const { + return kExtensions; +} + +const nn::Capabilities& Device::getCapabilities() const { + return kCapabilities; +} + +std::pair Device::getNumberOfCacheFilesNeeded() const { + return kNumberOfCacheFilesNeeded; +} + +nn::GeneralResult Device::wait() const { + const auto ret = kDevice->ping(); + return hal::utils::handleTransportError(ret); +} + +nn::GeneralResult> Device::getSupportedOperations(const nn::Model& model) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + + nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + auto cb = [&result, &model](V1_0::ErrorStatus status, + const hidl_vec& supportedOperations) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) + << "getSupportedOperations_1_2 failed with " << toString(status); + } else if (supportedOperations.size() != model.main.operations.size()) { + result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "getSupportedOperations_1_2 returned vector of size " + << supportedOperations.size() << " but expected " + << model.main.operations.size(); + } else { + result = supportedOperations; + } + }; + + const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult Device::prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, + nn::OptionalTimePoint /*deadline*/, const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = token; + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache, + hidlDataCache, hidlToken, cb); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status); + } + + return cb->get(); +} + +nn::GeneralResult Device::prepareModelFromCache( + nn::OptionalTimePoint /*deadline*/, const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + const auto hidlModelCache = NN_TRY(convert(modelCache)); + const auto hidlDataCache = NN_TRY(convert(dataCache)); + const auto hidlToken = token; + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status); + } + + return cb->get(); +} + +nn::GeneralResult Device::allocate( + const nn::BufferDesc& /*desc*/, + const std::vector& /*preparedModels*/, + const std::vector& /*inputRoles*/, + const std::vector& /*outputRoles*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IDevice::allocate not supported on 1.2 HAL service"; +} + +} // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp new file mode 100644 index 0000000000..ff9db215a2 --- /dev/null +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PreparedModel.h" + +#include "Callbacks.h" +#include "Conversions.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_2::utils { +namespace { + +nn::GeneralResult, nn::Timing>> +convertExecutionResultsHelper(const hidl_vec& outputShapes, const Timing& timing) { + return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)), + NN_TRY(validatedConvertToCanonical(timing))); +} + +nn::ExecutionResult, nn::Timing>> convertExecutionResults( + const hidl_vec& outputShapes, const Timing& timing) { + return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); +} + +} // namespace + +nn::GeneralResult> PreparedModel::create( + sp preparedModel) { + if (preparedModel == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_2::utils::PreparedModel::create must have non-null preparedModel"; + } + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); + return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), + std::move(deathHandler)); +} + +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, + hal::utils::DeathHandler deathHandler) + : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + +nn::ExecutionResult, nn::Timing>> +PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const { + nn::ExecutionResult, nn::Timing>> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec& outputShapes, + const Timing& timing) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); + } else { + result = convertExecutionResults(outputShapes, timing); + } + }; + + const auto ret = kPreparedModel->executeSynchronously(request, measure, cb); + NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret))); + + return result; +} + +nn::ExecutionResult, nn::Timing>> +PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming measure) const { + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kPreparedModel->execute_1_2(request, measure, cb); + const auto status = + NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret))); + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "execute failed with " << toString(status); + } + + return cb->get(); +} + +nn::ExecutionResult, nn::Timing>> PreparedModel::execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + // Ensure that request is ready for IPC. + std::optional maybeRequestInShared; + const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); + + const auto hidlRequest = + NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared))); + const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure))); + + nn::ExecutionResult, nn::Timing>> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + const bool preferSynchronous = true; + + // Execute synchronously if allowed. + if (preferSynchronous) { + result = executeSynchronously(hidlRequest, hidlMeasure); + } + + // Run asymchronous execution if execution has not already completed. + if (!result.has_value()) { + result = executeAsynchronously(hidlRequest, hidlMeasure); + } + + // Flush output buffers if suxcessful execution. + if (result.has_value()) { + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); + } + + return result; +} + +nn::GeneralResult> +PreparedModel::executeFenced( + const nn::Request& /*request*/, const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, + const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IPreparedModel::executeFenced is not supported on 1.2 HAL service"; +} + +std::any PreparedModel::getUnderlyingResource() const { + sp resource = kPreparedModel; + return resource; +} + +} // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.2/utils/src/Service.cpp b/neuralnetworks/1.2/utils/src/Service.cpp new file mode 100644 index 0000000000..110188f4f1 --- /dev/null +++ b/neuralnetworks/1.2/utils/src/Service.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Service.h" + +#include +#include +#include +#include +#include +#include "Device.h" + +namespace android::hardware::neuralnetworks::V1_2::utils { + +nn::GeneralResult getDevice(const std::string& name) { + hal::utils::ResilientDevice::Factory makeDevice = + [name](bool blocking) -> nn::GeneralResult { + auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name); + if (service == nullptr) { + return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr"; + } + return Device::create(name, std::move(service)); + }; + + return hal::utils::ResilientDevice::create(std::move(makeDevice)); +} + +} // namespace android::hardware::neuralnetworks::V1_2::utils diff --git a/neuralnetworks/1.3/utils/Android.bp b/neuralnetworks/1.3/utils/Android.bp index 279b250532..d5d897d470 100644 --- a/neuralnetworks/1.3/utils/Android.bp +++ b/neuralnetworks/1.3/utils/Android.bp @@ -20,6 +20,7 @@ cc_library_static { srcs: ["src/*"], local_include_dirs: ["include/nnapi/hal/1.3/"], export_include_dirs: ["include"], + cflags: ["-Wthread-safety"], static_libs: [ "neuralnetworks_types", "neuralnetworks_utils_hal_common", diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h new file mode 100644 index 0000000000..637179de33 --- /dev/null +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H + +#include +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { + +class Buffer final : public nn::IBuffer { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create( + sp buffer, nn::Request::MemoryDomainToken token); + + Buffer(PrivateConstructorTag tag, sp buffer, + nn::Request::MemoryDomainToken token); + + nn::Request::MemoryDomainToken getToken() const override; + + nn::GeneralResult copyTo(const nn::Memory& dst) const override; + nn::GeneralResult copyFrom(const nn::Memory& src, + const nn::Dimensions& dimensions) const override; + + private: + const sp kBuffer; + const nn::Request::MemoryDomainToken kToken; +}; + +} // namespace android::hardware::neuralnetworks::V1_3::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h new file mode 100644 index 0000000000..d46b111701 --- /dev/null +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { + +class PreparedModelCallback final : public IPreparedModelCallback, + public hal::utils::IProtectedCallback { + public: + using Data = nn::GeneralResult; + + Return notify(V1_0::ErrorStatus status, + const sp& preparedModel) override; + Return notify_1_2(V1_0::ErrorStatus status, + const sp& preparedModel) override; + Return notify_1_3(ErrorStatus status, const sp& preparedModel) override; + + void notifyAsDeadObject() override; + + Data get(); + + private: + void notifyInternal(Data result); + + hal::utils::TransferValue mData; +}; + +class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback { + public: + using Data = nn::ExecutionResult, nn::Timing>>; + + Return notify(V1_0::ErrorStatus status) override; + Return notify_1_2(V1_0::ErrorStatus status, + const hidl_vec& outputShapes, + const V1_2::Timing& timing) override; + Return notify_1_3(ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing) override; + + void notifyAsDeadObject() override; + + Data get(); + + private: + void notifyInternal(Data result); + + hal::utils::TransferValue mData; +}; + +} // namespace android::hardware::neuralnetworks::V1_3::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h index 43987a9727..64aa96e61a 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h @@ -25,54 +25,54 @@ namespace android::nn { -Result convert(const hal::V1_3::OperandType& operandType); -Result convert(const hal::V1_3::OperationType& operationType); -Result convert(const hal::V1_3::Priority& priority); -Result convert(const hal::V1_3::Capabilities& capabilities); -Result convert( +GeneralResult convert(const hal::V1_3::OperandType& operandType); +GeneralResult convert(const hal::V1_3::OperationType& operationType); +GeneralResult convert(const hal::V1_3::Priority& priority); +GeneralResult convert(const hal::V1_3::Capabilities& capabilities); +GeneralResult convert( const hal::V1_3::Capabilities::OperandPerformance& operandPerformance); -Result convert(const hal::V1_3::Operation& operation); -Result convert(const hal::V1_3::OperandLifeTime& operandLifeTime); -Result convert(const hal::V1_3::Operand& operand); -Result convert(const hal::V1_3::Model& model); -Result convert(const hal::V1_3::Subgraph& subgraph); -Result convert(const hal::V1_3::BufferDesc& bufferDesc); -Result convert(const hal::V1_3::BufferRole& bufferRole); -Result convert(const hal::V1_3::Request& request); -Result convert(const hal::V1_3::Request::MemoryPool& memoryPool); -Result convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint); -Result convert( +GeneralResult convert(const hal::V1_3::Operation& operation); +GeneralResult convert(const hal::V1_3::OperandLifeTime& operandLifeTime); +GeneralResult convert(const hal::V1_3::Operand& operand); +GeneralResult convert(const hal::V1_3::Model& model); +GeneralResult convert(const hal::V1_3::Subgraph& subgraph); +GeneralResult convert(const hal::V1_3::BufferDesc& bufferDesc); +GeneralResult convert(const hal::V1_3::BufferRole& bufferRole); +GeneralResult convert(const hal::V1_3::Request& request); +GeneralResult convert(const hal::V1_3::Request::MemoryPool& memoryPool); +GeneralResult convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint); +GeneralResult convert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration); -Result convert(const hal::V1_3::ErrorStatus& errorStatus); +GeneralResult convert(const hal::V1_3::ErrorStatus& errorStatus); -Result> convert( +GeneralResult> convert( const hardware::hidl_vec& bufferRoles); } // namespace android::nn namespace android::hardware::neuralnetworks::V1_3::utils { -nn::Result convert(const nn::OperandType& operandType); -nn::Result convert(const nn::OperationType& operationType); -nn::Result convert(const nn::Priority& priority); -nn::Result convert(const nn::Capabilities& capabilities); -nn::Result convert( +nn::GeneralResult convert(const nn::OperandType& operandType); +nn::GeneralResult convert(const nn::OperationType& operationType); +nn::GeneralResult convert(const nn::Priority& priority); +nn::GeneralResult convert(const nn::Capabilities& capabilities); +nn::GeneralResult convert( const nn::Capabilities::OperandPerformance& operandPerformance); -nn::Result convert(const nn::Operation& operation); -nn::Result convert(const nn::Operand::LifeTime& operandLifeTime); -nn::Result convert(const nn::Operand& operand); -nn::Result convert(const nn::Model& model); -nn::Result convert(const nn::Model::Subgraph& subgraph); -nn::Result convert(const nn::BufferDesc& bufferDesc); -nn::Result convert(const nn::BufferRole& bufferRole); -nn::Result convert(const nn::Request& request); -nn::Result convert(const nn::Request::MemoryPool& memoryPool); -nn::Result convert(const nn::OptionalTimePoint& optionalTimePoint); -nn::Result convert( +nn::GeneralResult convert(const nn::Operation& operation); +nn::GeneralResult convert(const nn::Operand::LifeTime& operandLifeTime); +nn::GeneralResult convert(const nn::Operand& operand); +nn::GeneralResult convert(const nn::Model& model); +nn::GeneralResult convert(const nn::Model::Subgraph& subgraph); +nn::GeneralResult convert(const nn::BufferDesc& bufferDesc); +nn::GeneralResult convert(const nn::BufferRole& bufferRole); +nn::GeneralResult convert(const nn::Request& request); +nn::GeneralResult convert(const nn::Request::MemoryPool& memoryPool); +nn::GeneralResult convert(const nn::OptionalTimePoint& optionalTimePoint); +nn::GeneralResult convert( const nn::OptionalTimeoutDuration& optionalTimeoutDuration); -nn::Result convert(const nn::ErrorStatus& errorStatus); +nn::GeneralResult convert(const nn::ErrorStatus& errorStatus); -nn::Result> convert(const std::vector& bufferRoles); +nn::GeneralResult> convert(const std::vector& bufferRoles); } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h new file mode 100644 index 0000000000..2f6c46a858 --- /dev/null +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { + +class Device final : public nn::IDevice { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create(std::string name, + sp device); + + Device(PrivateConstructorTag tag, std::string name, std::string versionString, + nn::DeviceType deviceType, std::vector extensions, + nn::Capabilities capabilities, std::pair numberOfCacheFilesNeeded, + sp device, hal::utils::DeathHandler deathHandler); + + const std::string& getName() const override; + const std::string& getVersionString() const override; + nn::Version getFeatureLevel() const override; + nn::DeviceType getType() const override; + const std::vector& getSupportedExtensions() const override; + const nn::Capabilities& getCapabilities() const override; + std::pair getNumberOfCacheFilesNeeded() const override; + + nn::GeneralResult wait() const override; + + nn::GeneralResult> getSupportedOperations( + const nn::Model& model) const override; + + nn::GeneralResult prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const override; + + private: + const std::string kName; + const std::string kVersionString; + const nn::DeviceType kDeviceType; + const std::vector kExtensions; + const nn::Capabilities kCapabilities; + const std::pair kNumberOfCacheFilesNeeded; + const sp kDevice; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_3::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h new file mode 100644 index 0000000000..e0d69dd7c6 --- /dev/null +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { + +class PreparedModel final : public nn::IPreparedModel { + struct PrivateConstructorTag {}; + + public: + static nn::GeneralResult> create( + sp preparedModel); + + PreparedModel(PrivateConstructorTag tag, sp preparedModel, + hal::utils::DeathHandler deathHandler); + + nn::ExecutionResult, nn::Timing>> execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + + nn::GeneralResult> executeFenced( + const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + + std::any getUnderlyingResource() const override; + + private: + nn::ExecutionResult, nn::Timing>> executeSynchronously( + const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const; + nn::ExecutionResult, nn::Timing>> executeAsynchronously( + const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const; + + const sp kPreparedModel; + const hal::utils::DeathHandler kDeathHandler; +}; + +} // namespace android::hardware::neuralnetworks::V1_3::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h new file mode 100644 index 0000000000..2bc32574ff --- /dev/null +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { + +nn::GeneralResult getDevice(const std::string& name); + +} // namespace android::hardware::neuralnetworks::V1_3::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h index f8c975d5d7..e61859d5f9 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -35,10 +36,14 @@ constexpr auto kVersion = nn::Version::ANDROID_R; template nn::Result validate(const Type& halObject) { - const auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeCanonical = nn::convert(halObject); + if (!maybeCanonical.has_value()) { + return nn::error() << maybeCanonical.error().message; + } + const auto version = NN_TRY(nn::validate(maybeCanonical.value())); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return {}; } @@ -55,9 +60,14 @@ bool valid(const Type& halObject) { template decltype(nn::convert(std::declval())) validatedConvertToCanonical(const Type& halObject) { auto canonical = NN_TRY(nn::convert(halObject)); - const auto version = NN_TRY(nn::validate(canonical)); + const auto maybeVersion = nn::validate(canonical); + if (!maybeVersion.has_value()) { + return nn::error() << maybeVersion.error(); + } + const auto version = maybeVersion.value(); if (version > utils::kVersion) { - return NN_ERROR() << ""; + return NN_ERROR() << "Insufficient version: " << version << " vs required " + << utils::kVersion; } return canonical; } diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp new file mode 100644 index 0000000000..f3fe9b5112 --- /dev/null +++ b/neuralnetworks/1.3/utils/src/Buffer.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Buffer.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "Conversions.h" +#include "Utils.h" + +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { + +nn::GeneralResult> Buffer::create( + sp buffer, nn::Request::MemoryDomainToken token) { + if (buffer == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_3::utils::Buffer::create must have non-null buffer"; + } + if (token == static_cast(0)) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_3::utils::Buffer::create must have non-zero token"; + } + + return std::make_shared(PrivateConstructorTag{}, std::move(buffer), token); +} + +Buffer::Buffer(PrivateConstructorTag /*tag*/, sp buffer, + nn::Request::MemoryDomainToken token) + : kBuffer(std::move(buffer)), kToken(token) { + CHECK(kBuffer != nullptr); + CHECK(kToken != static_cast(0)); +} + +nn::Request::MemoryDomainToken Buffer::getToken() const { + return kToken; +} + +nn::GeneralResult Buffer::copyTo(const nn::Memory& dst) const { + const auto hidlDst = NN_TRY(V1_0::utils::convert(dst)); + + const auto ret = kBuffer->copyTo(hidlDst); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status); + } + + return {}; +} + +nn::GeneralResult Buffer::copyFrom(const nn::Memory& src, + const nn::Dimensions& dimensions) const { + const auto hidlSrc = NN_TRY(V1_0::utils::convert(src)); + const auto hidlDimensions = hidl_vec(dimensions); + + const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status); + } + + return {}; +} + +} // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp new file mode 100644 index 0000000000..ff81275335 --- /dev/null +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Callbacks.h" + +#include "Conversions.h" +#include "PreparedModel.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { +namespace { + +nn::GeneralResult convertPreparedModel( + const sp& preparedModel) { + return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel)); +} + +nn::GeneralResult convertPreparedModel( + const sp& preparedModel) { + return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel)); +} + +nn::GeneralResult convertPreparedModel( + const sp& preparedModel) { + return NN_TRY(utils::PreparedModel::create(preparedModel)); +} + +nn::GeneralResult, nn::Timing>> +convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)), + NN_TRY(validatedConvertToCanonical(timing))); +} + +nn::ExecutionResult, nn::Timing>> +convertExecutionGeneralResults(const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + return hal::utils::makeExecutionFailure( + convertExecutionGeneralResultsHelper(outputShapes, timing)); +} + +} // namespace + +Return PreparedModelCallback::notify(V1_0::ErrorStatus status, + const sp& preparedModel) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); + } else if (preparedModel == nullptr) { + notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Returned preparedModel is nullptr"); + } else { + notifyInternal(convertPreparedModel(preparedModel)); + } + return Void(); +} + +Return PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status, + const sp& preparedModel) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); + } else if (preparedModel == nullptr) { + notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Returned preparedModel is nullptr"); + } else { + notifyInternal(convertPreparedModel(preparedModel)); + } + return Void(); +} + +Return PreparedModelCallback::notify_1_3(ErrorStatus status, + const sp& preparedModel) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status)); + } else if (preparedModel == nullptr) { + notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Returned preparedModel is nullptr"); + } else { + notifyInternal(convertPreparedModel(preparedModel)); + } + return Void(); +} + +void PreparedModelCallback::notifyAsDeadObject() { + notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); +} + +PreparedModelCallback::Data PreparedModelCallback::get() { + return mData.take(); +} + +void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) { + mData.put(std::move(result)); +} + +// ExecutionCallback methods begin here + +Return ExecutionCallback::notify(V1_0::ErrorStatus status) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); + } else { + notifyInternal({}); + } + return Void(); +} + +Return ExecutionCallback::notify_1_2(V1_0::ErrorStatus status, + const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + if (status != V1_0::ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); + } else { + notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); + } + return Void(); +} + +Return ExecutionCallback::notify_1_3(ErrorStatus status, + const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status)); + } else { + notifyInternal(convertExecutionGeneralResults(outputShapes, timing)); + } + return Void(); +} + +void ExecutionCallback::notifyAsDeadObject() { + notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object"); +} + +ExecutionCallback::Data ExecutionCallback::get() { + return mData.take(); +} + +void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) { + mData.put(std::move(result)); +} + +} // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index 4c54e3b12e..0dc078534c 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -79,7 +80,7 @@ template using ConvertOutput = std::decay_t()).value())>; template -Result>> convertVec(const hidl_vec& arguments) { +GeneralResult>> convertVec(const hidl_vec& arguments) { std::vector> canonical; canonical.reserve(arguments.size()); for (const auto& argument : arguments) { @@ -89,25 +90,25 @@ Result>> convertVec(const hidl_vec& argume } template -Result>> convert(const hidl_vec& arguments) { +GeneralResult>> convert(const hidl_vec& arguments) { return convertVec(arguments); } } // anonymous namespace -Result convert(const hal::V1_3::OperandType& operandType) { +GeneralResult convert(const hal::V1_3::OperandType& operandType) { return static_cast(operandType); } -Result convert(const hal::V1_3::OperationType& operationType) { +GeneralResult convert(const hal::V1_3::OperationType& operationType) { return static_cast(operationType); } -Result convert(const hal::V1_3::Priority& priority) { +GeneralResult convert(const hal::V1_3::Priority& priority) { return static_cast(priority); } -Result convert(const hal::V1_3::Capabilities& capabilities) { +GeneralResult convert(const hal::V1_3::Capabilities& capabilities) { const bool validOperandTypes = std::all_of( capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), [](const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) { @@ -115,13 +116,14 @@ Result convert(const hal::V1_3::Capabilities& capabilities) { return !maybeType.has_value() ? false : validOperandType(maybeType.value()); }); if (!validOperandTypes) { - return NN_ERROR() + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OperandType when converting OperandPerformance in Capabilities"; } auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance)); - auto table = - NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance))); + auto table = NN_TRY(hal::utils::makeGeneralFailure( + Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)), + nn::ErrorStatus::GENERAL_FAILURE)); return Capabilities{ .relaxedFloat32toFloat16PerformanceScalar = @@ -134,7 +136,7 @@ Result convert(const hal::V1_3::Capabilities& capabilities) { }; } -Result convert( +GeneralResult convert( const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) { return Capabilities::OperandPerformance{ .type = NN_TRY(convert(operandPerformance.type)), @@ -142,7 +144,7 @@ Result convert( }; } -Result convert(const hal::V1_3::Operation& operation) { +GeneralResult convert(const hal::V1_3::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -150,11 +152,11 @@ Result convert(const hal::V1_3::Operation& operation) { }; } -Result convert(const hal::V1_3::OperandLifeTime& operandLifeTime) { +GeneralResult convert(const hal::V1_3::OperandLifeTime& operandLifeTime) { return static_cast(operandLifeTime); } -Result convert(const hal::V1_3::Operand& operand) { +GeneralResult convert(const hal::V1_3::Operand& operand) { return Operand{ .type = NN_TRY(convert(operand.type)), .dimensions = operand.dimensions, @@ -166,7 +168,7 @@ Result convert(const hal::V1_3::Operand& operand) { }; } -Result convert(const hal::V1_3::Model& model) { +GeneralResult convert(const hal::V1_3::Model& model) { return Model{ .main = NN_TRY(convert(model.main)), .referenced = NN_TRY(convert(model.referenced)), @@ -177,7 +179,7 @@ Result convert(const hal::V1_3::Model& model) { }; } -Result convert(const hal::V1_3::Subgraph& subgraph) { +GeneralResult convert(const hal::V1_3::Subgraph& subgraph) { auto operations = NN_TRY(convert(subgraph.operations)); // Verify number of consumers. @@ -186,9 +188,10 @@ Result convert(const hal::V1_3::Subgraph& subgraph) { CHECK(subgraph.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < subgraph.operands.size(); ++i) { if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) { - return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected " - << numberOfConsumers[i] << " but found " - << subgraph.operands[i].numberOfConsumers; + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid numberOfConsumers for operand " << i << ", expected " + << numberOfConsumers[i] << " but found " + << subgraph.operands[i].numberOfConsumers; } } @@ -200,11 +203,11 @@ Result convert(const hal::V1_3::Subgraph& subgraph) { }; } -Result convert(const hal::V1_3::BufferDesc& bufferDesc) { +GeneralResult convert(const hal::V1_3::BufferDesc& bufferDesc) { return BufferDesc{.dimensions = bufferDesc.dimensions}; } -Result convert(const hal::V1_3::BufferRole& bufferRole) { +GeneralResult convert(const hal::V1_3::BufferRole& bufferRole) { return BufferRole{ .modelIndex = bufferRole.modelIndex, .ioIndex = bufferRole.ioIndex, @@ -212,7 +215,7 @@ Result convert(const hal::V1_3::BufferRole& bufferRole) { }; } -Result convert(const hal::V1_3::Request& request) { +GeneralResult convert(const hal::V1_3::Request& request) { return Request{ .inputs = NN_TRY(convert(request.inputs)), .outputs = NN_TRY(convert(request.outputs)), @@ -220,7 +223,7 @@ Result convert(const hal::V1_3::Request& request) { }; } -Result convert(const hal::V1_3::Request::MemoryPool& memoryPool) { +GeneralResult convert(const hal::V1_3::Request::MemoryPool& memoryPool) { using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator; switch (memoryPool.getDiscriminator()) { case Discriminator::hidlMemory: @@ -228,15 +231,16 @@ Result convert(const hal::V1_3::Request::MemoryPool& memory case Discriminator::token: return static_cast(memoryPool.token()); } - return NN_ERROR() << "Invalid Request::MemoryPool discriminator " - << underlyingType(memoryPool.getDiscriminator()); + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid Request::MemoryPool discriminator " + << underlyingType(memoryPool.getDiscriminator()); } -Result convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) { +GeneralResult convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) { constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count(); - const auto makeTimePoint = [](uint64_t count) -> Result { + const auto makeTimePoint = [](uint64_t count) -> GeneralResult { if (count > kTimePointMaxCount) { - return NN_ERROR() + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to convert OptionalTimePoint because the count exceeds the max"; } const auto nanoseconds = std::chrono::nanoseconds{count}; @@ -250,16 +254,17 @@ Result convert(const hal::V1_3::OptionalTimePoint& optionalTi case Discriminator::nanosecondsSinceEpoch: return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch()); } - return NN_ERROR() << "Invalid OptionalTimePoint discriminator " - << underlyingType(optionalTimePoint.getDiscriminator()); + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid OptionalTimePoint discriminator " + << underlyingType(optionalTimePoint.getDiscriminator()); } -Result convert( +GeneralResult convert( const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) { constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count(); - const auto makeTimeoutDuration = [](uint64_t count) -> Result { + const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult { if (count > kTimeoutDurationMaxCount) { - return NN_ERROR() + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to convert OptionalTimeoutDuration because the count exceeds the max"; } return TimeoutDuration{count}; @@ -272,11 +277,12 @@ Result convert( case Discriminator::nanoseconds: return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds()); } - return NN_ERROR() << "Invalid OptionalTimeoutDuration discriminator " - << underlyingType(optionalTimeoutDuration.getDiscriminator()); + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid OptionalTimeoutDuration discriminator " + << underlyingType(optionalTimeoutDuration.getDiscriminator()); } -Result convert(const hal::V1_3::ErrorStatus& status) { +GeneralResult convert(const hal::V1_3::ErrorStatus& status) { switch (status) { case hal::V1_3::ErrorStatus::NONE: case hal::V1_3::ErrorStatus::DEVICE_UNAVAILABLE: @@ -289,10 +295,11 @@ Result convert(const hal::V1_3::ErrorStatus& status) { case hal::V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: return static_cast(status); } - return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status); + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Invalid ErrorStatus " << underlyingType(status); } -Result> convert( +GeneralResult> convert( const hardware::hidl_vec& bufferRoles) { return convertVec(bufferRoles); } @@ -304,32 +311,32 @@ namespace { using utils::convert; -nn::Result convert( +nn::GeneralResult convert( const nn::Capabilities::PerformanceInfo& performanceInfo) { return V1_0::utils::convert(performanceInfo); } -nn::Result convert(const nn::DataLocation& dataLocation) { +nn::GeneralResult convert(const nn::DataLocation& dataLocation) { return V1_0::utils::convert(dataLocation); } -nn::Result> convert(const nn::Model::OperandValues& operandValues) { +nn::GeneralResult> convert(const nn::Model::OperandValues& operandValues) { return V1_0::utils::convert(operandValues); } -nn::Result convert(const nn::Memory& memory) { +nn::GeneralResult convert(const nn::Memory& memory) { return V1_0::utils::convert(memory); } -nn::Result convert(const nn::Request::Argument& argument) { +nn::GeneralResult convert(const nn::Request::Argument& argument) { return V1_0::utils::convert(argument); } -nn::Result convert(const nn::Operand::ExtraParams& extraParams) { +nn::GeneralResult convert(const nn::Operand::ExtraParams& extraParams) { return V1_2::utils::convert(extraParams); } -nn::Result convert( +nn::GeneralResult convert( const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { return V1_2::utils::convert(extensionNameAndPrefix); } @@ -338,7 +345,7 @@ template using ConvertOutput = std::decay_t()).value())>; template -nn::Result>> convertVec(const std::vector& arguments) { +nn::GeneralResult>> convertVec(const std::vector& arguments) { hidl_vec> halObject(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { halObject[i] = NN_TRY(convert(arguments[i])); @@ -347,42 +354,41 @@ nn::Result>> convertVec(const std::vector& ar } template -nn::Result>> convert(const std::vector& arguments) { +nn::GeneralResult>> convert(const std::vector& arguments) { return convertVec(arguments); } -nn::Result makeMemoryPool(const nn::Memory& memory) { +nn::GeneralResult makeMemoryPool(const nn::Memory& memory) { Request::MemoryPool ret; ret.hidlMemory(NN_TRY(convert(memory))); return ret; } -nn::Result makeMemoryPool(const nn::Request::MemoryDomainToken& token) { +nn::GeneralResult makeMemoryPool(const nn::Request::MemoryDomainToken& token) { Request::MemoryPool ret; ret.token(underlyingType(token)); return ret; } -nn::Result makeMemoryPool( - const std::shared_ptr& /*buffer*/) { - return NN_ERROR() << "Unable to make memory pool from IBuffer"; +nn::GeneralResult makeMemoryPool(const nn::SharedBuffer& /*buffer*/) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to make memory pool from IBuffer"; } } // anonymous namespace -nn::Result convert(const nn::OperandType& operandType) { +nn::GeneralResult convert(const nn::OperandType& operandType) { return static_cast(operandType); } -nn::Result convert(const nn::OperationType& operationType) { +nn::GeneralResult convert(const nn::OperationType& operationType) { return static_cast(operationType); } -nn::Result convert(const nn::Priority& priority) { +nn::GeneralResult convert(const nn::Priority& priority) { return static_cast(priority); } -nn::Result convert(const nn::Capabilities& capabilities) { +nn::GeneralResult convert(const nn::Capabilities& capabilities) { std::vector operandPerformance; operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); std::copy_if(capabilities.operandPerformance.asVector().begin(), @@ -403,7 +409,7 @@ nn::Result convert(const nn::Capabilities& capabilities) { }; } -nn::Result convert( +nn::GeneralResult convert( const nn::Capabilities::OperandPerformance& operandPerformance) { return Capabilities::OperandPerformance{ .type = NN_TRY(convert(operandPerformance.type)), @@ -411,7 +417,7 @@ nn::Result convert( }; } -nn::Result convert(const nn::Operation& operation) { +nn::GeneralResult convert(const nn::Operation& operation) { return Operation{ .type = NN_TRY(convert(operation.type)), .inputs = operation.inputs, @@ -419,14 +425,15 @@ nn::Result convert(const nn::Operation& operation) { }; } -nn::Result convert(const nn::Operand::LifeTime& operandLifeTime) { +nn::GeneralResult convert(const nn::Operand::LifeTime& operandLifeTime) { if (operandLifeTime == nn::Operand::LifeTime::POINTER) { - return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Model cannot be converted because it contains pointer-based memory"; } return static_cast(operandLifeTime); } -nn::Result convert(const nn::Operand& operand) { +nn::GeneralResult convert(const nn::Operand& operand) { return Operand{ .type = NN_TRY(convert(operand.type)), .dimensions = operand.dimensions, @@ -439,9 +446,10 @@ nn::Result convert(const nn::Operand& operand) { }; } -nn::Result convert(const nn::Model& model) { +nn::GeneralResult convert(const nn::Model& model) { if (!hal::utils::hasNoPointerData(model)) { - return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Model cannot be converted because it contains pointer-based memory"; } return Model{ @@ -454,7 +462,7 @@ nn::Result convert(const nn::Model& model) { }; } -nn::Result convert(const nn::Model::Subgraph& subgraph) { +nn::GeneralResult convert(const nn::Model::Subgraph& subgraph) { auto operands = NN_TRY(convert(subgraph.operands)); // Update number of consumers. @@ -473,11 +481,11 @@ nn::Result convert(const nn::Model::Subgraph& subgraph) { }; } -nn::Result convert(const nn::BufferDesc& bufferDesc) { +nn::GeneralResult convert(const nn::BufferDesc& bufferDesc) { return BufferDesc{.dimensions = bufferDesc.dimensions}; } -nn::Result convert(const nn::BufferRole& bufferRole) { +nn::GeneralResult convert(const nn::BufferRole& bufferRole) { return BufferRole{ .modelIndex = bufferRole.modelIndex, .ioIndex = bufferRole.ioIndex, @@ -485,9 +493,10 @@ nn::Result convert(const nn::BufferRole& bufferRole) { }; } -nn::Result convert(const nn::Request& request) { +nn::GeneralResult convert(const nn::Request& request) { if (!hal::utils::hasNoPointerData(request)) { - return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory"; + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Request cannot be converted because it contains pointer-based memory"; } return Request{ @@ -497,30 +506,31 @@ nn::Result convert(const nn::Request& request) { }; } -nn::Result convert(const nn::Request::MemoryPool& memoryPool) { +nn::GeneralResult convert(const nn::Request::MemoryPool& memoryPool) { return std::visit([](const auto& o) { return makeMemoryPool(o); }, memoryPool); } -nn::Result convert(const nn::OptionalTimePoint& optionalTimePoint) { +nn::GeneralResult convert(const nn::OptionalTimePoint& optionalTimePoint) { OptionalTimePoint ret; if (optionalTimePoint.has_value()) { const auto count = optionalTimePoint.value().time_since_epoch().count(); if (count < 0) { - return NN_ERROR() << "Unable to convert OptionalTimePoint because time since epoch " - "count is negative"; + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Unable to convert OptionalTimePoint because time since epoch count is " + "negative"; } ret.nanosecondsSinceEpoch(count); } return ret; } -nn::Result convert( +nn::GeneralResult convert( const nn::OptionalTimeoutDuration& optionalTimeoutDuration) { OptionalTimeoutDuration ret; if (optionalTimeoutDuration.has_value()) { const auto count = optionalTimeoutDuration.value().count(); if (count < 0) { - return NN_ERROR() + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to convert OptionalTimeoutDuration because count is negative"; } ret.nanoseconds(count); @@ -528,7 +538,7 @@ nn::Result convert( return ret; } -nn::Result convert(const nn::ErrorStatus& errorStatus) { +nn::GeneralResult convert(const nn::ErrorStatus& errorStatus) { switch (errorStatus) { case nn::ErrorStatus::NONE: case nn::ErrorStatus::DEVICE_UNAVAILABLE: @@ -545,7 +555,7 @@ nn::Result convert(const nn::ErrorStatus& errorStatus) { } } -nn::Result> convert(const std::vector& bufferRoles) { +nn::GeneralResult> convert(const std::vector& bufferRoles) { return convertVec(bufferRoles); } diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp new file mode 100644 index 0000000000..c215f39ecf --- /dev/null +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Device.h" + +#include "Buffer.h" +#include "Callbacks.h" +#include "Conversions.h" +#include "PreparedModel.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { +namespace { + +nn::GeneralResult>> convert( + const std::vector& preparedModels) { + hidl_vec> hidlPreparedModels(preparedModels.size()); + for (size_t i = 0; i < preparedModels.size(); ++i) { + std::any underlyingResource = preparedModels[i]->getUnderlyingResource(); + if (const auto* hidlPreparedModel = + std::any_cast>(&underlyingResource)) { + hidlPreparedModels[i] = *hidlPreparedModel; + } else { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "Unable to convert from nn::IPreparedModel to V1_3::IPreparedModel"; + } + } + return hidlPreparedModels; +} + +nn::GeneralResult convert( + nn::GeneralResult> result) { + return NN_TRY(std::move(result)); +} + +} // namespace + +nn::GeneralResult> Device::create(std::string name, + sp device) { + if (name.empty()) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_3::utils::Device::create must have non-empty name"; + } + if (device == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_3::utils::Device::create must have non-null device"; + } + + auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get())); + const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get())); + auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get())); + auto capabilities = NN_TRY(V1_2::utils::initCapabilities(device.get())); + const auto numberOfCacheFilesNeeded = + NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get())); + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device)); + return std::make_shared( + PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType, + std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded, + std::move(device), std::move(deathHandler)); +} + +Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString, + nn::DeviceType deviceType, std::vector extensions, + nn::Capabilities capabilities, + std::pair numberOfCacheFilesNeeded, sp device, + hal::utils::DeathHandler deathHandler) + : kName(std::move(name)), + kVersionString(std::move(versionString)), + kDeviceType(deviceType), + kExtensions(std::move(extensions)), + kCapabilities(std::move(capabilities)), + kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded), + kDevice(std::move(device)), + kDeathHandler(std::move(deathHandler)) {} + +const std::string& Device::getName() const { + return kName; +} + +const std::string& Device::getVersionString() const { + return kVersionString; +} + +nn::Version Device::getFeatureLevel() const { + return nn::Version::ANDROID_R; +} + +nn::DeviceType Device::getType() const { + return kDeviceType; +} + +const std::vector& Device::getSupportedExtensions() const { + return kExtensions; +} + +const nn::Capabilities& Device::getCapabilities() const { + return kCapabilities; +} + +std::pair Device::getNumberOfCacheFilesNeeded() const { + return kNumberOfCacheFilesNeeded; +} + +nn::GeneralResult Device::wait() const { + const auto ret = kDevice->ping(); + return hal::utils::handleTransportError(ret); +} + +nn::GeneralResult> Device::getSupportedOperations(const nn::Model& model) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + + nn::GeneralResult> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + auto cb = [&result, &model](ErrorStatus status, const hidl_vec& supportedOperations) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) + << "IDevice::getSupportedOperations_1_3 failed with " << toString(status); + } else if (supportedOperations.size() != model.main.operations.size()) { + result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "IDevice::getSupportedOperations_1_3 returned vector of size " + << supportedOperations.size() << " but expected " + << model.main.operations.size(); + } else { + result = supportedOperations; + } + }; + + const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +nn::GeneralResult Device::prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + // Ensure that model is ready for IPC. + std::optional maybeModelInShared; + const nn::Model& modelInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared)); + + const auto hidlModel = NN_TRY(convert(modelInShared)); + const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference)); + const auto hidlPriority = NN_TRY(convert(priority)); + const auto hidlDeadline = NN_TRY(convert(deadline)); + const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); + const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); + const auto hidlToken = token; + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = + kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline, + hidlModelCache, hidlDataCache, hidlToken, cb); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status); + } + + return cb->get(); +} + +nn::GeneralResult Device::prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + const auto hidlDeadline = NN_TRY(convert(deadline)); + const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache)); + const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache)); + const auto hidlToken = token; + + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache, + hidlToken, cb); + const auto status = NN_TRY(hal::utils::handleTransportError(ret)); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status); + } + + return cb->get(); +} + +nn::GeneralResult Device::allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const { + const auto hidlDesc = NN_TRY(convert(desc)); + const auto hidlPreparedModels = NN_TRY(convert(preparedModels)); + const auto hidlInputRoles = NN_TRY(convert(inputRoles)); + const auto hidlOutputRoles = NN_TRY(convert(outputRoles)); + + nn::GeneralResult result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "uninitialized"; + auto cb = [&result](ErrorStatus status, const sp& buffer, uint32_t token) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status); + } else if (buffer == nullptr) { + result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr"; + } else if (token == 0) { + result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)"; + } else { + result = convert( + Buffer::create(buffer, static_cast(token))); + } + }; + + const auto ret = + kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; +} + +} // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp new file mode 100644 index 0000000000..df9b280119 --- /dev/null +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PreparedModel.h" + +#include "Callbacks.h" +#include "Conversions.h" +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::V1_3::utils { +namespace { + +nn::GeneralResult, nn::Timing>> +convertExecutionResultsHelper(const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)), + NN_TRY(validatedConvertToCanonical(timing))); +} + +nn::ExecutionResult, nn::Timing>> convertExecutionResults( + const hidl_vec& outputShapes, const V1_2::Timing& timing) { + return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing)); +} + +nn::GeneralResult> convertSyncFences( + const std::vector& syncFences) { + hidl_vec handles(syncFences.size()); + for (size_t i = 0; i < syncFences.size(); ++i) { + handles[i] = NN_TRY(V1_2::utils::convert(syncFences[i].getHandle())); + } + return handles; +} + +nn::GeneralResult> convertFencedExecutionCallbackResults( + const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { + return std::make_pair(NN_TRY(validatedConvertToCanonical(timingLaunched)), + NN_TRY(validatedConvertToCanonical(timingFenced))); +} + +nn::GeneralResult> +convertExecuteFencedResults(const hidl_handle& syncFence, + const sp& callback) { + auto resultSyncFence = nn::SyncFence::createAsSignaled(); + if (syncFence.getNativeHandle() != nullptr) { + auto nativeHandle = NN_TRY(validatedConvertToCanonical(syncFence)); + resultSyncFence = NN_TRY(hal::utils::makeGeneralFailure( + nn::SyncFence::create(std::move(nativeHandle)), nn::ErrorStatus::GENERAL_FAILURE)); + } + + if (callback == nullptr) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null"; + } + + // Create callback which can be used to retrieve the execution error status and timings. + nn::ExecuteFencedInfoCallback resultCallback = + [callback]() -> nn::GeneralResult> { + nn::GeneralResult> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched, + const V1_2::Timing& timingFenced) { + if (status != ErrorStatus::NONE) { + const auto canonical = validatedConvertToCanonical(status).value_or( + nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status); + } else { + result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced); + } + }; + + const auto ret = callback->getExecutionInfo(cb); + NN_TRY(hal::utils::handleTransportError(ret)); + + return result; + }; + + return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); +} + +} // namespace + +nn::GeneralResult> PreparedModel::create( + sp preparedModel) { + if (preparedModel == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "V1_3::utils::PreparedModel::create must have non-null preparedModel"; + } + + auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel)); + return std::make_shared(PrivateConstructorTag{}, std::move(preparedModel), + std::move(deathHandler)); +} + +PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp preparedModel, + hal::utils::DeathHandler deathHandler) + : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {} + +nn::ExecutionResult, nn::Timing>> +PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure, + const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const { + nn::ExecutionResult, nn::Timing>> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + const auto cb = [&result](ErrorStatus status, const hidl_vec& outputShapes, + const V1_2::Timing& timing) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status); + } else { + result = convertExecutionResults(outputShapes, timing); + } + }; + + const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline, + loopTimeoutDuration, cb); + NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret))); + + return result; +} + +nn::ExecutionResult, nn::Timing>> +PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming measure, + const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const { + const auto cb = sp::make(); + const auto scoped = kDeathHandler.protectCallback(cb.get()); + + const auto ret = + kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb); + const auto status = + NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret))); + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status); + } + + return cb->get(); +} + +nn::ExecutionResult, nn::Timing>> PreparedModel::execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const { + // Ensure that request is ready for IPC. + std::optional maybeRequestInShared; + const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared))); + + const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared))); + const auto hidlMeasure = + NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure))); + const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline))); + const auto hidlLoopTimeoutDuration = + NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration))); + + nn::ExecutionResult, nn::Timing>> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + const bool preferSynchronous = true; + + // Execute synchronously if allowed. + if (preferSynchronous) { + result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration); + } + + // Run asymchronous execution if execution has not already completed. + if (!result.has_value()) { + result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline, + hidlLoopTimeoutDuration); + } + + // Flush output buffers if suxcessful execution. + if (result.has_value()) { + NN_TRY(hal::utils::makeExecutionFailure( + hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared))); + } + + return result; +} + +nn::GeneralResult> +PreparedModel::executeFenced(const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const { + // Ensure that request is ready for IPC. + std::optional maybeRequestInShared; + const nn::Request& requestInShared = + NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)); + + const auto hidlRequest = NN_TRY(convert(requestInShared)); + const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor)); + const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure)); + const auto hidlDeadline = NN_TRY(convert(deadline)); + const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration)); + const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence)); + + nn::GeneralResult> result = + NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized"; + auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence, + const sp& callback) { + if (status != ErrorStatus::NONE) { + const auto canonical = + validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE); + result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status); + } else { + result = convertExecuteFencedResults(syncFence, callback); + } + }; + + const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure, + hidlDeadline, hidlLoopTimeoutDuration, + hidlTimeoutDurationAfterFence, cb); + NN_TRY(hal::utils::handleTransportError(ret)); + auto [syncFence, callback] = NN_TRY(std::move(result)); + + // If executeFenced required the request memory to be moved into shared memory, block here until + // the fenced execution has completed and flush the memory back. + if (maybeRequestInShared.has_value()) { + const auto state = syncFence.syncWait({}); + if (state != nn::SyncFence::FenceState::SIGNALED) { + return NN_ERROR() << "syncWait failed with " << state; + } + NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)); + } + + return std::make_pair(std::move(syncFence), std::move(callback)); +} + +std::any PreparedModel::getUnderlyingResource() const { + sp resource = kPreparedModel; + return resource; +} + +} // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Service.cpp b/neuralnetworks/1.3/utils/src/Service.cpp new file mode 100644 index 0000000000..62887fb41a --- /dev/null +++ b/neuralnetworks/1.3/utils/src/Service.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Service.h" + +#include +#include +#include +#include +#include +#include "Device.h" + +namespace android::hardware::neuralnetworks::V1_3::utils { + +nn::GeneralResult getDevice(const std::string& name) { + hal::utils::ResilientDevice::Factory makeDevice = + [name](bool blocking) -> nn::GeneralResult { + auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name); + if (service == nullptr) { + return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr"; + } + return Device::create(name, std::move(service)); + }; + + return hal::utils::ResilientDevice::create(std::move(makeDevice)); +} + +} // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp index b61dc970ed..21562cffaf 100644 --- a/neuralnetworks/utils/common/Android.bp +++ b/neuralnetworks/utils/common/Android.bp @@ -20,6 +20,7 @@ cc_library_static { srcs: ["src/*"], local_include_dirs: ["include/nnapi/hal"], export_include_dirs: ["include"], + cflags: ["-Wthread-safety"], static_libs: [ "neuralnetworks_types", ], diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h index 8c013682ce..254a3d4acf 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h @@ -19,6 +19,7 @@ #include #include +#include #include // Shorthand @@ -42,14 +43,16 @@ bool hasNoPointerData(const nn::Model& model); bool hasNoPointerData(const nn::Request& request); // Relocate pointer-based data to shared memory. -nn::Result flushDataFromPointerToShared(const nn::Model& model); -nn::Result flushDataFromPointerToShared(const nn::Request& request); +nn::GeneralResult> flushDataFromPointerToShared( + const nn::Model* model, std::optional* maybeModelInSharedOut); +nn::GeneralResult> flushDataFromPointerToShared( + const nn::Request* request, std::optional* maybeRequestInSharedOut); // Undoes `flushDataFromPointerToShared` on a Request object. More specifically, // `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed // Request object back to the output pointer-based memory in the original Request object. -nn::Result unflushDataFromSharedToPointer(const nn::Request& request, - const nn::Request& requestInShared); +nn::GeneralResult unflushDataFromSharedToPointer( + const nn::Request& request, const std::optional& maybeRequestInShared); std::vector countNumberOfConsumers(size_t numberOfOperands, const std::vector& operations); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h new file mode 100644 index 0000000000..e4046b5407 --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +template +nn::GeneralResult handleTransportError(const hardware::Return& ret) { + if (ret.isDeadObject()) { + return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) + << "Return<>::isDeadObject returned true: " << ret.description(); + } + if (!ret.isOk()) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Return<>::isOk returned false: " << ret.description(); + } + return ret; +} + +template <> +inline nn::GeneralResult handleTransportError(const hardware::Return& ret) { + if (ret.isDeadObject()) { + return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) + << "Return<>::isDeadObject returned true: " << ret.description(); + } + if (!ret.isOk()) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Return<>::isOk returned false: " << ret.description(); + } + return {}; +} + +template +nn::GeneralResult makeGeneralFailure(nn::Result result, nn::ErrorStatus status) { + if (!result.has_value()) { + return nn::error(status) << std::move(result).error(); + } + return std::move(result).value(); +} + +template <> +inline nn::GeneralResult makeGeneralFailure(nn::Result result, nn::ErrorStatus status) { + if (!result.has_value()) { + return nn::error(status) << std::move(result).error(); + } + return {}; +} + +template +nn::ExecutionResult makeExecutionFailure(nn::Result result, nn::ErrorStatus status) { + if (!result.has_value()) { + return nn::error(status) << std::move(result).error(); + } + return std::move(result).value(); +} + +template <> +inline nn::ExecutionResult makeExecutionFailure(nn::Result result, + nn::ErrorStatus status) { + if (!result.has_value()) { + return nn::error(status) << std::move(result).error(); + } + return {}; +} + +template +nn::ExecutionResult makeExecutionFailure(nn::GeneralResult result) { + if (!result.has_value()) { + const auto [message, status] = std::move(result).error(); + return nn::error(status) << message; + } + return std::move(result).value(); +} + +template <> +inline nn::ExecutionResult makeExecutionFailure(nn::GeneralResult result) { + if (!result.has_value()) { + const auto [message, status] = std::move(result).error(); + return nn::error(status) << message; + } + return {}; +} + +} // namespace android::hardware::neuralnetworks::utils \ No newline at end of file diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h new file mode 100644 index 0000000000..85bd6137ee --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class IProtectedCallback { + public: + /** + * Marks this object as a dead object. + */ + virtual void notifyAsDeadObject() = 0; + + // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. + // E.g., std::unique_ptr. + virtual ~IProtectedCallback() = default; + + protected: + // Protect the non-destructor special member functions to prevent object slicing. + IProtectedCallback() = default; + IProtectedCallback(const IProtectedCallback&) = default; + IProtectedCallback(IProtectedCallback&&) noexcept = default; + IProtectedCallback& operator=(const IProtectedCallback&) = default; + IProtectedCallback& operator=(IProtectedCallback&&) noexcept = default; +}; + +// Thread safe class +class DeathRecipient final : public hidl_death_recipient { + public: + void serviceDied(uint64_t /*cookie*/, const wp& /*who*/) override; + // Precondition: `killable` must be non-null. + void add(IProtectedCallback* killable) const; + // Precondition: `killable` must be non-null. + void remove(IProtectedCallback* killable) const; + + private: + mutable std::mutex mMutex; + mutable std::vector mObjects GUARDED_BY(mMutex); +}; + +class DeathHandler final { + public: + static nn::GeneralResult create(sp object); + + DeathHandler(const DeathHandler&) = delete; + DeathHandler(DeathHandler&&) noexcept = default; + DeathHandler& operator=(const DeathHandler&) = delete; + DeathHandler& operator=(DeathHandler&&) noexcept = delete; + ~DeathHandler(); + + using Cleanup = std::function; + // Precondition: `killable` must be non-null. + [[nodiscard]] base::ScopeGuard protectCallback(IProtectedCallback* killable) const; + + private: + DeathHandler(sp object, sp deathRecipient); + + sp kObject; + sp kDeathRecipient; +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h new file mode 100644 index 0000000000..996ec1ee81 --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class ResilientBuffer final : public nn::IBuffer { + struct PrivateConstructorTag {}; + + public: + using Factory = std::function(bool blocking)>; + + static nn::GeneralResult> create(Factory makeBuffer); + + explicit ResilientBuffer(PrivateConstructorTag tag, Factory makeBuffer, + nn::SharedBuffer buffer); + + nn::SharedBuffer getBuffer() const; + nn::SharedBuffer recover(const nn::IBuffer* failingBuffer, bool blocking) const; + + nn::Request::MemoryDomainToken getToken() const override; + + nn::GeneralResult copyTo(const nn::Memory& dst) const override; + + nn::GeneralResult copyFrom(const nn::Memory& src, + const nn::Dimensions& dimensions) const override; + + private: + const Factory kMakeBuffer; + mutable std::mutex mMutex; + mutable nn::SharedBuffer mBuffer GUARDED_BY(mMutex); +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h new file mode 100644 index 0000000000..4f1afb983a --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class ResilientDevice final : public nn::IDevice, + public std::enable_shared_from_this { + struct PrivateConstructorTag {}; + + public: + using Factory = std::function(bool blocking)>; + + static nn::GeneralResult> create(Factory makeDevice); + + explicit ResilientDevice(PrivateConstructorTag tag, Factory makeDevice, std::string name, + std::string versionString, std::vector extensions, + nn::Capabilities capabilities, nn::SharedDevice device); + + nn::SharedDevice getDevice() const; + nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const; + + const std::string& getName() const override; + const std::string& getVersionString() const override; + nn::Version getFeatureLevel() const override; + nn::DeviceType getType() const override; + const std::vector& getSupportedExtensions() const override; + const nn::Capabilities& getCapabilities() const override; + std::pair getNumberOfCacheFilesNeeded() const override; + + nn::GeneralResult wait() const override; + + nn::GeneralResult> getSupportedOperations( + const nn::Model& model) const override; + + nn::GeneralResult prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const override; + + private: + nn::GeneralResult prepareModelInternal( + bool blocking, const nn::Model& model, nn::ExecutionPreference preference, + nn::Priority priority, nn::OptionalTimePoint deadline, + const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const; + nn::GeneralResult prepareModelFromCacheInternal( + bool blocking, nn::OptionalTimePoint deadline, + const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const; + nn::GeneralResult allocateInternal( + bool blocking, const nn::BufferDesc& desc, + const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const; + + const Factory kMakeDevice; + const std::string kName; + const std::string kVersionString; + const std::vector kExtensions; + const nn::Capabilities kCapabilities; + mutable std::mutex mMutex; + mutable nn::SharedDevice mDevice GUARDED_BY(mMutex); +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h new file mode 100644 index 0000000000..c2940d16bc --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class ResilientPreparedModel final : public nn::IPreparedModel { + struct PrivateConstructorTag {}; + + public: + using Factory = std::function(bool blocking)>; + + static nn::GeneralResult> create( + Factory makePreparedModel); + + explicit ResilientPreparedModel(PrivateConstructorTag tag, Factory makePreparedModel, + nn::SharedPreparedModel preparedModel); + + nn::SharedPreparedModel getPreparedModel() const; + nn::SharedPreparedModel recover(const nn::IPreparedModel* failingPreparedModel, + bool blocking) const; + + nn::ExecutionResult, nn::Timing>> execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + + nn::GeneralResult> executeFenced( + const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + + std::any getUnderlyingResource() const override; + + private: + const Factory kMakePreparedModel; + mutable std::mutex mMutex; + mutable nn::SharedPreparedModel mPreparedModel GUARDED_BY(mMutex); +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h new file mode 100644 index 0000000000..7103c6b375 --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H + +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +// This class is thread safe. +template +class TransferValue final { + public: + void put(Type object) const; + [[nodiscard]] Type take() const; + + private: + mutable std::mutex mMutex; + mutable std::condition_variable mCondition; + mutable std::optional mObject GUARDED_BY(mMutex); +}; + +// template implementation + +template +void TransferValue::put(Type object) const { + { + std::lock_guard guard(mMutex); + // Immediately return if value already exists. + if (mObject.has_value()) return; + mObject.emplace(std::move(object)); + } + mCondition.notify_all(); +} + +template +Type TransferValue::take() const { + std::unique_lock lock(mMutex); + base::ScopedLockAssertion lockAssertion(mMutex); + mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); }); + std::optional object; + std::swap(object, mObject); + return std::move(object).value(); +} + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index 667189b2a0..25659728c3 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -16,6 +16,8 @@ #include "CommonUtils.h" +#include "HandleError.h" + #include #include #include @@ -25,6 +27,7 @@ #include #include +#include #include #include #include @@ -111,8 +114,18 @@ bool hasNoPointerData(const nn::Request& request) { return hasNoPointerData(request.inputs) && hasNoPointerData(request.outputs); } -nn::Result flushDataFromPointerToShared(const nn::Model& model) { - auto modelInShared = model; +nn::GeneralResult> flushDataFromPointerToShared( + const nn::Model* model, std::optional* maybeModelInSharedOut) { + CHECK(model != nullptr); + CHECK(maybeModelInSharedOut != nullptr); + + if (hasNoPointerData(*model)) { + return *model; + } + + // Make a copy of the model in order to make modifications. The modified model is returned to + // the caller through `maybeModelInSharedOut` if the function succeeds. + nn::Model modelInShared = *model; nn::ConstantMemoryBuilder memoryBuilder(modelInShared.pools.size()); copyPointersToSharedMemory(&modelInShared.main, &memoryBuilder); @@ -126,11 +139,22 @@ nn::Result flushDataFromPointerToShared(const nn::Model& model) { modelInShared.pools.push_back(std::move(memory)); } - return modelInShared; + *maybeModelInSharedOut = modelInShared; + return **maybeModelInSharedOut; } -nn::Result flushDataFromPointerToShared(const nn::Request& request) { - auto requestInShared = request; +nn::GeneralResult> flushDataFromPointerToShared( + const nn::Request* request, std::optional* maybeRequestInSharedOut) { + CHECK(request != nullptr); + CHECK(maybeRequestInSharedOut != nullptr); + + if (hasNoPointerData(*request)) { + return *request; + } + + // Make a copy of the request in order to make modifications. The modified request is returned + // to the caller through `maybeRequestInSharedOut` if the function succeeds. + nn::Request requestInShared = *request; // Change input pointers to shared memory. nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size()); @@ -171,15 +195,17 @@ nn::Result flushDataFromPointerToShared(const nn::Request& request) requestInShared.pools.push_back(std::move(memory)); } - return requestInShared; + *maybeRequestInSharedOut = requestInShared; + return **maybeRequestInSharedOut; } -nn::Result unflushDataFromSharedToPointer(const nn::Request& request, - const nn::Request& requestInShared) { - if (requestInShared.pools.empty() || - !std::holds_alternative(requestInShared.pools.back())) { +nn::GeneralResult unflushDataFromSharedToPointer( + const nn::Request& request, const std::optional& maybeRequestInShared) { + if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() || + !std::holds_alternative(maybeRequestInShared->pools.back())) { return {}; } + const auto& requestInShared = *maybeRequestInShared; // Map the memory. const auto& outputMemory = std::get(requestInShared.pools.back()); diff --git a/neuralnetworks/utils/common/src/ProtectCallback.cpp b/neuralnetworks/utils/common/src/ProtectCallback.cpp new file mode 100644 index 0000000000..1d9a3074db --- /dev/null +++ b/neuralnetworks/utils/common/src/ProtectCallback.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ProtectCallback.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +void DeathRecipient::serviceDied(uint64_t /*cookie*/, const wp& /*who*/) { + std::lock_guard guard(mMutex); + std::for_each(mObjects.begin(), mObjects.end(), + [](IProtectedCallback* killable) { killable->notifyAsDeadObject(); }); +} + +void DeathRecipient::add(IProtectedCallback* killable) const { + CHECK(killable != nullptr); + std::lock_guard guard(mMutex); + mObjects.push_back(killable); +} + +void DeathRecipient::remove(IProtectedCallback* killable) const { + CHECK(killable != nullptr); + std::lock_guard guard(mMutex); + const auto removedIter = std::remove(mObjects.begin(), mObjects.end(), killable); + mObjects.erase(removedIter); +} + +nn::GeneralResult DeathHandler::create(sp object) { + if (object == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "utils::DeathHandler::create must have non-null object"; + } + auto deathRecipient = sp::make(); + + const auto ret = object->linkToDeath(deathRecipient, /*cookie=*/0); + const bool success = NN_TRY(handleTransportError(ret)); + if (!success) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IBase::linkToDeath returned false"; + } + + return DeathHandler(std::move(object), std::move(deathRecipient)); +} + +DeathHandler::DeathHandler(sp object, sp deathRecipient) + : kObject(std::move(object)), kDeathRecipient(std::move(deathRecipient)) { + CHECK(kObject != nullptr); + CHECK(kDeathRecipient != nullptr); +} + +DeathHandler::~DeathHandler() { + if (kObject != nullptr && kDeathRecipient != nullptr) { + const auto ret = kObject->unlinkToDeath(kDeathRecipient); + const auto maybeSuccess = handleTransportError(ret); + if (!maybeSuccess.has_value()) { + LOG(ERROR) << maybeSuccess.error().message; + } else if (!maybeSuccess.value()) { + LOG(ERROR) << "IBase::linkToDeath returned false"; + } + } +} + +[[nodiscard]] base::ScopeGuard DeathHandler::protectCallback( + IProtectedCallback* killable) const { + CHECK(killable != nullptr); + kDeathRecipient->add(killable); + return base::make_scope_guard( + [deathRecipient = kDeathRecipient, killable] { deathRecipient->remove(killable); }); +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp new file mode 100644 index 0000000000..984295b729 --- /dev/null +++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ResilientBuffer.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +nn::GeneralResult> ResilientBuffer::create( + Factory makeBuffer) { + if (makeBuffer == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "utils::ResilientBuffer::create must have non-empty makeBuffer"; + } + auto buffer = NN_TRY(makeBuffer(/*blocking=*/true)); + CHECK(buffer != nullptr); + return std::make_shared(PrivateConstructorTag{}, std::move(makeBuffer), + std::move(buffer)); +} + +ResilientBuffer::ResilientBuffer(PrivateConstructorTag /*tag*/, Factory makeBuffer, + nn::SharedBuffer buffer) + : kMakeBuffer(std::move(makeBuffer)), mBuffer(std::move(buffer)) { + CHECK(kMakeBuffer != nullptr); + CHECK(mBuffer != nullptr); +} + +nn::SharedBuffer ResilientBuffer::getBuffer() const { + std::lock_guard guard(mMutex); + return mBuffer; +} +nn::SharedBuffer ResilientBuffer::recover(const nn::IBuffer* /*failingBuffer*/, + bool /*blocking*/) const { + std::lock_guard guard(mMutex); + return mBuffer; +} + +nn::Request::MemoryDomainToken ResilientBuffer::getToken() const { + return getBuffer()->getToken(); +} + +nn::GeneralResult ResilientBuffer::copyTo(const nn::Memory& dst) const { + return getBuffer()->copyTo(dst); +} + +nn::GeneralResult ResilientBuffer::copyFrom(const nn::Memory& src, + const nn::Dimensions& dimensions) const { + return getBuffer()->copyFrom(src, dimensions); +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp new file mode 100644 index 0000000000..95662d96bd --- /dev/null +++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp @@ -0,0 +1,236 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ResilientDevice.h" + +#include "ResilientBuffer.h" +#include "ResilientPreparedModel.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { +namespace { + +template +auto protect(const ResilientDevice& resilientDevice, const FnType& fn, bool blocking) + -> decltype(fn(*resilientDevice.getDevice())) { + auto device = resilientDevice.getDevice(); + auto result = fn(*device); + + // Immediately return if device is not dead. + if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) { + return result; + } + + device = resilientDevice.recover(device.get(), blocking); + return fn(*device); +} + +} // namespace + +nn::GeneralResult> ResilientDevice::create( + Factory makeDevice) { + if (makeDevice == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "utils::ResilientDevice::create must have non-empty makeDevice"; + } + auto device = NN_TRY(makeDevice(/*blocking=*/true)); + CHECK(device != nullptr); + + auto name = device->getName(); + auto versionString = device->getVersionString(); + auto extensions = device->getSupportedExtensions(); + auto capabilities = device->getCapabilities(); + + return std::make_shared(PrivateConstructorTag{}, std::move(makeDevice), + std::move(name), std::move(versionString), + std::move(extensions), std::move(capabilities), + std::move(device)); +} + +ResilientDevice::ResilientDevice(PrivateConstructorTag /*tag*/, Factory makeDevice, + std::string name, std::string versionString, + std::vector extensions, + nn::Capabilities capabilities, nn::SharedDevice device) + : kMakeDevice(std::move(makeDevice)), + kName(std::move(name)), + kVersionString(std::move(versionString)), + kExtensions(std::move(extensions)), + kCapabilities(std::move(capabilities)), + mDevice(std::move(device)) { + CHECK(kMakeDevice != nullptr); + CHECK(mDevice != nullptr); +} + +nn::SharedDevice ResilientDevice::getDevice() const { + std::lock_guard guard(mMutex); + return mDevice; +} + +nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool blocking) const { + std::lock_guard guard(mMutex); + + // Another caller updated the failing device. + if (mDevice.get() != failingDevice) { + return mDevice; + } + + auto maybeDevice = kMakeDevice(blocking); + if (!maybeDevice.has_value()) { + const auto& [message, code] = maybeDevice.error(); + LOG(ERROR) << "Failed to recover dead device with error " << code << ": " << message; + return mDevice; + } + auto device = std::move(maybeDevice).value(); + + // TODO(b/173081926): Instead of CHECKing to ensure the cache has not been changed, return an + // invalid/"null" IDevice object that always fails. + CHECK_EQ(kName, device->getName()); + CHECK_EQ(kVersionString, device->getVersionString()); + CHECK(kExtensions == device->getSupportedExtensions()); + CHECK_EQ(kCapabilities, device->getCapabilities()); + + mDevice = std::move(device); + return mDevice; +} + +const std::string& ResilientDevice::getName() const { + return kName; +} + +const std::string& ResilientDevice::getVersionString() const { + return kVersionString; +} + +nn::Version ResilientDevice::getFeatureLevel() const { + return getDevice()->getFeatureLevel(); +} + +nn::DeviceType ResilientDevice::getType() const { + return getDevice()->getType(); +} + +const std::vector& ResilientDevice::getSupportedExtensions() const { + return kExtensions; +} + +const nn::Capabilities& ResilientDevice::getCapabilities() const { + return kCapabilities; +} + +std::pair ResilientDevice::getNumberOfCacheFilesNeeded() const { + return getDevice()->getNumberOfCacheFilesNeeded(); +} + +nn::GeneralResult ResilientDevice::wait() const { + const auto fn = [](const nn::IDevice& device) { return device.wait(); }; + return protect(*this, fn, /*blocking=*/true); +} + +nn::GeneralResult> ResilientDevice::getSupportedOperations( + const nn::Model& model) const { + const auto fn = [&model](const nn::IDevice& device) { + return device.getSupportedOperations(model); + }; + return protect(*this, fn, /*blocking=*/false); +} + +nn::GeneralResult ResilientDevice::prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + auto self = shared_from_this(); + ResilientPreparedModel::Factory makePreparedModel = + [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache, + token](bool blocking) -> nn::GeneralResult { + return device->prepareModelInternal(blocking, model, preference, priority, deadline, + modelCache, dataCache, token); + }; + return ResilientPreparedModel::create(std::move(makePreparedModel)); +} + +nn::GeneralResult ResilientDevice::prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + auto self = shared_from_this(); + ResilientPreparedModel::Factory makePreparedModel = + [device = std::move(self), deadline, modelCache, dataCache, + token](bool blocking) -> nn::GeneralResult { + return device->prepareModelFromCacheInternal(blocking, deadline, modelCache, dataCache, + token); + }; + return ResilientPreparedModel::create(std::move(makePreparedModel)); +} + +nn::GeneralResult ResilientDevice::allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const { + auto self = shared_from_this(); + ResilientBuffer::Factory makeBuffer = + [device = std::move(self), desc, preparedModels, inputRoles, + outputRoles](bool blocking) -> nn::GeneralResult { + return device->allocateInternal(blocking, desc, preparedModels, inputRoles, outputRoles); + }; + return ResilientBuffer::create(std::move(makeBuffer)); +} + +nn::GeneralResult ResilientDevice::prepareModelInternal( + bool blocking, const nn::Model& model, nn::ExecutionPreference preference, + nn::Priority priority, nn::OptionalTimePoint deadline, + const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache, + token](const nn::IDevice& device) { + return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache, + token); + }; + return protect(*this, fn, blocking); +} + +nn::GeneralResult ResilientDevice::prepareModelFromCacheInternal( + bool blocking, nn::OptionalTimePoint deadline, + const std::vector& modelCache, + const std::vector& dataCache, const nn::CacheToken& token) const { + const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) { + return device.prepareModelFromCache(deadline, modelCache, dataCache, token); + }; + return protect(*this, fn, blocking); +} + +nn::GeneralResult ResilientDevice::allocateInternal( + bool blocking, const nn::BufferDesc& desc, + const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const { + const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) { + return device.allocate(desc, preparedModels, inputRoles, outputRoles); + }; + return protect(*this, fn, blocking); +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp new file mode 100644 index 0000000000..1c9ecba4f6 --- /dev/null +++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ResilientPreparedModel.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +nn::GeneralResult> ResilientPreparedModel::create( + Factory makePreparedModel) { + if (makePreparedModel == nullptr) { + return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) + << "utils::ResilientPreparedModel::create must have non-empty makePreparedModel"; + } + auto preparedModel = NN_TRY(makePreparedModel(/*blocking=*/true)); + CHECK(preparedModel != nullptr); + return std::make_shared( + PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel)); +} + +ResilientPreparedModel::ResilientPreparedModel(PrivateConstructorTag /*tag*/, + Factory makePreparedModel, + nn::SharedPreparedModel preparedModel) + : kMakePreparedModel(std::move(makePreparedModel)), mPreparedModel(std::move(preparedModel)) { + CHECK(kMakePreparedModel != nullptr); + CHECK(mPreparedModel != nullptr); +} + +nn::SharedPreparedModel ResilientPreparedModel::getPreparedModel() const { + std::lock_guard guard(mMutex); + return mPreparedModel; +} + +nn::SharedPreparedModel ResilientPreparedModel::recover( + const nn::IPreparedModel* /*failingPreparedModel*/, bool /*blocking*/) const { + std::lock_guard guard(mMutex); + return mPreparedModel; +} + +nn::ExecutionResult, nn::Timing>> +ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const { + return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration); +} + +nn::GeneralResult> +ResilientPreparedModel::executeFenced( + const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const { + return getPreparedModel()->executeFenced(request, waitFor, measure, deadline, + loopTimeoutDuration, timeoutDurationAfterFence); +} + +std::any ResilientPreparedModel::getUnderlyingResource() const { + return getPreparedModel()->getUnderlyingResource(); +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp new file mode 100644 index 0000000000..87d27c7ac3 --- /dev/null +++ b/neuralnetworks/utils/service/Android.bp @@ -0,0 +1,36 @@ +// +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +cc_library_static { + name: "neuralnetworks_utils_hal_service", + defaults: ["neuralnetworks_utils_defaults"], + srcs: ["src/*"], + local_include_dirs: ["include/nnapi/hal"], + export_include_dirs: ["include"], + static_libs: [ + "neuralnetworks_types", + "neuralnetworks_utils_hal_1_0", + "neuralnetworks_utils_hal_1_1", + "neuralnetworks_utils_hal_1_2", + "neuralnetworks_utils_hal_1_3", + ], + shared_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", + "android.hardware.neuralnetworks@1.3", + ], +} diff --git a/neuralnetworks/utils/service/include/nnapi/hal/Service.h b/neuralnetworks/utils/service/include/nnapi/hal/Service.h new file mode 100644 index 0000000000..e339627fd0 --- /dev/null +++ b/neuralnetworks/utils/service/include/nnapi/hal/Service.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H + +#include +#include +#include +#include + +namespace android::nn::hal { + +std::vector getDevices(); + +} // namespace android::nn::hal + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H diff --git a/neuralnetworks/utils/service/src/Service.cpp b/neuralnetworks/utils/service/src/Service.cpp new file mode 100644 index 0000000000..a59549dbf9 --- /dev/null +++ b/neuralnetworks/utils/service/src/Service.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Service.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace android::hardware::neuralnetworks::service { +namespace { + +using getDeviceFn = std::add_pointer_t(const std::string&)>; + +void getDevicesForVersion(const std::string& descriptor, getDeviceFn getDevice, + std::vector* devices, + std::unordered_set* registeredDevices) { + CHECK(devices != nullptr); + CHECK(registeredDevices != nullptr); + + const auto names = getAllHalInstanceNames(descriptor); + for (const auto& name : names) { + if (const auto [it, unregistered] = registeredDevices->insert(name); unregistered) { + auto maybeDevice = getDevice(name); + if (maybeDevice.has_value()) { + auto device = std::move(maybeDevice).value(); + CHECK(device != nullptr); + devices->push_back(std::move(device)); + } else { + LOG(ERROR) << "getDevice(" << name << ") failed with " << maybeDevice.error().code + << ": " << maybeDevice.error().message; + } + } + } +} + +std::vector getDevices() { + std::vector devices; + std::unordered_set registeredDevices; + + getDevicesForVersion(V1_3::IDevice::descriptor, &V1_3::utils::getDevice, &devices, + ®isteredDevices); + getDevicesForVersion(V1_2::IDevice::descriptor, &V1_2::utils::getDevice, &devices, + ®isteredDevices); + getDevicesForVersion(V1_1::IDevice::descriptor, &V1_1::utils::getDevice, &devices, + ®isteredDevices); + getDevicesForVersion(V1_0::IDevice::descriptor, &V1_0::utils::getDevice, &devices, + ®isteredDevices); + + return devices; +} + +} // namespace +} // namespace android::hardware::neuralnetworks::service + +namespace android::nn::hal { + +std::vector getDevices() { + return hardware::neuralnetworks::service::getDevices(); +} + +} // namespace android::nn::hal