From 301ef060e92d87772f30f309d236bf02bfd75d13 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Thu, 14 Oct 2021 22:04:59 -0700 Subject: [PATCH 1/4] Remove hal::utils::countNumberOfConsumers This CL removes hal::utils::countNumberOfConsumers and uses the existing nn::countNumberOfConsumers. This change is part of a larger chain of changes to remove HIDL and AIDL libraries from neuralnetworks_utils_hal_common. Bug: N/A Test: mma Change-Id: I7d06ea355eae7aa80b94b09a23d606bbb2322120 --- neuralnetworks/1.0/utils/src/Conversions.cpp | 4 ++-- neuralnetworks/1.1/utils/src/Conversions.cpp | 4 ++-- neuralnetworks/1.2/utils/src/Conversions.cpp | 4 ++-- neuralnetworks/1.3/utils/src/Conversions.cpp | 4 ++-- neuralnetworks/aidl/vts/functional/ValidateModel.cpp | 4 ++-- neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h | 3 --- neuralnetworks/utils/common/src/CommonUtils.cpp | 5 ----- 7 files changed, 10 insertions(+), 18 deletions(-) diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp index c0498eb876..927087043b 100644 --- a/neuralnetworks/1.0/utils/src/Conversions.cpp +++ b/neuralnetworks/1.0/utils/src/Conversions.cpp @@ -155,7 +155,7 @@ GeneralResult unvalidatedConvert(const hal::V1_0::Model& model) { // Verify number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations)); + NN_TRY(countNumberOfConsumers(model.operands.size(), operations)); CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { @@ -346,7 +346,7 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { // Update number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations)); + NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations)); CHECK(operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < operands.size(); ++i) { operands[i].numberOfConsumers = numberOfConsumers[i]; diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp index 467ceb389b..5bdbe314b6 100644 --- a/neuralnetworks/1.1/utils/src/Conversions.cpp +++ b/neuralnetworks/1.1/utils/src/Conversions.cpp @@ -100,7 +100,7 @@ GeneralResult unvalidatedConvert(const hal::V1_1::Model& model) { // Verify number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations)); + NN_TRY(countNumberOfConsumers(model.operands.size(), operations)); CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { @@ -223,7 +223,7 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { // Update number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations)); + NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations)); CHECK(operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < operands.size(); ++i) { operands[i].numberOfConsumers = numberOfConsumers[i]; diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index 6a80b42db9..c8e83a2fc4 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -187,7 +187,7 @@ GeneralResult unvalidatedConvert(const hal::V1_2::Model& model) { // Verify number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations)); + NN_TRY(countNumberOfConsumers(model.operands.size(), operations)); CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { @@ -481,7 +481,7 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { // Update number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations)); + NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations)); CHECK(operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < operands.size(); ++i) { operands[i].numberOfConsumers = numberOfConsumers[i]; diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index b35b2cdf89..74d8938fe8 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -194,7 +194,7 @@ GeneralResult unvalidatedConvert(const hal::V1_3::Subgraph& sub // Verify number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(subgraph.operands.size(), operations)); + NN_TRY(countNumberOfConsumers(subgraph.operands.size(), operations)); CHECK(subgraph.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < subgraph.operands.size(); ++i) { if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) { @@ -543,7 +543,7 @@ nn::GeneralResult unvalidatedConvert(const nn::Model::Subgraph& subgra // Update number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), subgraph.operations)); + NN_TRY(countNumberOfConsumers(operands.size(), subgraph.operations)); CHECK(operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < operands.size(); ++i) { operands[i].numberOfConsumers = numberOfConsumers[i]; diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp index 698c054941..3f3e2250c6 100644 --- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp @@ -1315,8 +1315,8 @@ static void mutateExecutionPriorityTest(const std::shared_ptr& device, void validateModel(const std::shared_ptr& device, const Model& model) { const auto numberOfConsumers = - nn::countNumberOfConsumers(model.main.operands.size(), - nn::unvalidatedConvert(model.main.operations).value()) + countNumberOfConsumers(model.main.operands.size(), + nn::unvalidatedConvert(model.main.operations).value()) .value(); mutateExecutionOrderTest(device, model, numberOfConsumers); mutateOperandTypeTest(device, model); diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h index 702ee92da8..2c5b648cd3 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h @@ -125,9 +125,6 @@ nn::GeneralResult> convertRequestFromP const nn::Request* request, uint32_t alignment, uint32_t padding, std::optional* maybeRequestInSharedOut, RequestRelocation* relocationOut); -nn::GeneralResult> countNumberOfConsumers( - size_t numberOfOperands, const std::vector& operations); - nn::GeneralResult createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory); nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_memory& memory); diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index 235ba29d08..b4fbfa7b3b 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -331,11 +331,6 @@ nn::GeneralResult> convertRequestFromP return **maybeRequestInSharedOut; } -nn::GeneralResult> countNumberOfConsumers( - size_t numberOfOperands, const std::vector& operations) { - return nn::countNumberOfConsumers(numberOfOperands, operations); -} - nn::GeneralResult createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) { if (memory == nullptr) { return NN_ERROR() << "Memory must be non-empty"; From 15965821e8e6d01232e4b663c3fd5f3d659b8d40 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Thu, 14 Oct 2021 23:45:11 -0700 Subject: [PATCH 2/4] Move NN memory utils from utils/common to 1.0/utils This change is part of a larger chain of changes to remove HIDL and AIDL libraries from neuralnetworks_utils_hal_common. Bug: N/A Test: mma Change-Id: I9d4f0c30932fad7ccb6231aa03e57f7227af4880 --- neuralnetworks/1.0/utils/Android.bp | 6 + .../utils/include/nnapi/hal/1.0/Conversions.h | 2 + neuralnetworks/1.0/utils/src/Conversions.cpp | 224 ++++++++++++++++- .../utils/include/nnapi/hal/1.2/Conversions.h | 2 - neuralnetworks/1.2/utils/src/Conversions.cpp | 19 +- .../utils/include/nnapi/hal/1.3/Conversions.h | 3 + neuralnetworks/1.3/utils/src/Conversions.cpp | 11 +- neuralnetworks/1.3/utils/src/Execution.cpp | 2 +- .../1.3/utils/src/PreparedModel.cpp | 2 +- neuralnetworks/utils/common/Android.bp | 8 - .../common/include/nnapi/hal/CommonUtils.h | 11 - .../utils/common/src/CommonUtils.cpp | 236 ------------------ 12 files changed, 249 insertions(+), 277 deletions(-) diff --git a/neuralnetworks/1.0/utils/Android.bp b/neuralnetworks/1.0/utils/Android.bp index 8c51c67421..31cdded56e 100644 --- a/neuralnetworks/1.0/utils/Android.bp +++ b/neuralnetworks/1.0/utils/Android.bp @@ -31,6 +31,7 @@ cc_library_static { export_include_dirs: ["include"], cflags: ["-Wthread-safety"], static_libs: [ + "libarect", "neuralnetworks_types", "neuralnetworks_utils_hal_common", ], @@ -40,6 +41,11 @@ cc_library_static { export_static_lib_headers: [ "neuralnetworks_utils_hal_common", ], + target: { + android: { + shared_libs: ["libnativewindow"], + }, + }, } cc_test { diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h index 5d4bdbce82..a770d0664f 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h @@ -36,6 +36,7 @@ GeneralResult unvalidatedConvert(const hal::V1_0::Operand& operand); GeneralResult unvalidatedConvert(const hal::V1_0::Operation& operation); GeneralResult unvalidatedConvert( const hardware::hidl_vec& operandValues); +GeneralResult unvalidatedConvert(const hardware::hidl_handle& handle); GeneralResult unvalidatedConvert(const hardware::hidl_memory& memory); GeneralResult unvalidatedConvert(const hal::V1_0::Model& model); GeneralResult unvalidatedConvert( @@ -65,6 +66,7 @@ nn::GeneralResult unvalidatedConvert(const nn::Operand& operand); nn::GeneralResult unvalidatedConvert(const nn::Operation& operation); nn::GeneralResult> unvalidatedConvert( const nn::Model::OperandValues& operandValues); +nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle); nn::GeneralResult unvalidatedConvert(const nn::SharedMemory& memory); nn::GeneralResult unvalidatedConvert(const nn::Model& model); nn::GeneralResult unvalidatedConvert(const nn::Request::Argument& requestArgument); diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp index 927087043b..daa10fdb69 100644 --- a/neuralnetworks/1.0/utils/src/Conversions.cpp +++ b/neuralnetworks/1.0/utils/src/Conversions.cpp @@ -37,6 +37,11 @@ #include "Utils.h" +#ifdef __ANDROID__ +#include +#include +#endif // __ANDROID__ + namespace { template @@ -49,6 +54,7 @@ constexpr std::underlying_type_t underlyingType(Type value) { namespace android::nn { namespace { +using hardware::hidl_handle; using hardware::hidl_memory; using hardware::hidl_vec; @@ -74,6 +80,121 @@ GeneralResult> validatedConvert(const Type& halOb return canonical; } +nn::GeneralResult unknownHandleFromNativeHandle( + const native_handle_t* handle) { + if (handle == nullptr) { + return NN_ERROR() << "unknownHandleFromNativeHandle failed because handle is nullptr"; + } + + std::vector fds = + NN_TRY(nn::dupFds(handle->data + 0, handle->data + handle->numFds)); + + std::vector ints(handle->data + handle->numFds, + handle->data + handle->numFds + handle->numInts); + + return nn::Memory::Unknown::Handle{.fds = std::move(fds), .ints = std::move(ints)}; +} + +nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_memory& memory) { + CHECK_LE(memory.size(), std::numeric_limits::max()); + if (!memory.valid()) { + return NN_ERROR() << "Unable to convert invalid hidl_memory"; + } + + if (memory.name() == "ashmem") { + if (memory.handle()->numFds != 1) { + return NN_ERROR() << "Unable to convert invalid ashmem memory object with " + << memory.handle()->numFds << " numFds, but expected 1"; + } + if (memory.handle()->numInts != 0) { + return NN_ERROR() << "Unable to convert invalid ashmem memory object with " + << memory.handle()->numInts << " numInts, but expected 0"; + } + auto handle = nn::Memory::Ashmem{ + .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])), + .size = static_cast(memory.size()), + }; + return std::make_shared(nn::Memory{.handle = std::move(handle)}); + } + + if (memory.name() == "mmap_fd") { + if (memory.handle()->numFds != 1) { + return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with " + << memory.handle()->numFds << " numFds, but expected 1"; + } + if (memory.handle()->numInts != 3) { + return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with " + << memory.handle()->numInts << " numInts, but expected 3"; + } + + const int fd = memory.handle()->data[0]; + const int prot = memory.handle()->data[1]; + const int lower = memory.handle()->data[2]; + const int higher = memory.handle()->data[3]; + const size_t offset = nn::getOffsetFromInts(lower, higher); + + return nn::createSharedMemoryFromFd(static_cast(memory.size()), prot, fd, offset); + } + + if (memory.name() != "hardware_buffer_blob") { + auto handle = nn::Memory::Unknown{ + .handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())), + .size = static_cast(memory.size()), + .name = memory.name(), + }; + return std::make_shared(nn::Memory{.handle = std::move(handle)}); + } + +#ifdef __ANDROID__ + constexpr auto roundUpToMultiple = [](uint32_t value, uint32_t multiple) -> uint32_t { + return (value + multiple - 1) / multiple * multiple; + }; + + const auto size = memory.size(); + const auto format = AHARDWAREBUFFER_FORMAT_BLOB; + const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; + const uint32_t width = size; + const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer. + const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer. + + // AHardwareBuffer_createFromHandle() might fail because an allocator + // expects a specific stride value. In that case, we try to guess it by + // aligning the width to small powers of 2. + // TODO(b/174120849): Avoid stride assumptions. + AHardwareBuffer* hardwareBuffer = nullptr; + status_t status = UNKNOWN_ERROR; + for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) { + const uint32_t stride = roundUpToMultiple(width, alignment); + AHardwareBuffer_Desc desc{ + .width = width, + .height = height, + .layers = layers, + .format = format, + .usage = usage, + .stride = stride, + }; + status = AHardwareBuffer_createFromHandle(&desc, memory.handle(), + AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, + &hardwareBuffer); + if (status == NO_ERROR) { + break; + } + } + if (status != NO_ERROR) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Can't create AHardwareBuffer from handle. Error: " << status; + } + + return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true); +#else // __ANDROID__ + LOG(FATAL) << "nn::GeneralResult createSharedMemoryFromHidlMemory(const " + "hidl_memory& memory): Not Available on Host Build"; + return (NN_ERROR() << "createSharedMemoryFromHidlMemory failed") + . + operator nn::GeneralResult(); +#endif // __ANDROID__ +} + } // anonymous namespace GeneralResult unvalidatedConvert(const hal::V1_0::OperandType& operandType) { @@ -146,8 +267,20 @@ GeneralResult unvalidatedConvert(const hidl_vec& return Model::OperandValues(operandValues.data(), operandValues.size()); } +GeneralResult unvalidatedConvert(const hidl_handle& handle) { + if (handle.getNativeHandle() == nullptr) { + return nullptr; + } + if (handle->numFds != 1 || handle->numInts != 0) { + return NN_ERROR() + << "unvalidatedConvert failed because handle does not only hold a single fd"; + } + auto duplicatedFd = NN_TRY(nn::dupFd(handle->data[0])); + return std::make_shared(std::move(duplicatedFd)); +} + GeneralResult unvalidatedConvert(const hidl_memory& memory) { - return hal::utils::createSharedMemoryFromHidlMemory(memory); + return createSharedMemoryFromHidlMemory(memory); } GeneralResult unvalidatedConvert(const hal::V1_0::Model& model) { @@ -260,6 +393,82 @@ nn::GeneralResult> validatedConvert(const Type& c return utils::unvalidatedConvert(canonical); } +nn::GeneralResult createNativeHandleFrom(std::vector fds, + const std::vector& ints) { + constexpr size_t kIntMax = std::numeric_limits::max(); + CHECK_LE(fds.size(), kIntMax); + CHECK_LE(ints.size(), kIntMax); + native_handle_t* nativeHandle = + native_handle_create(static_cast(fds.size()), static_cast(ints.size())); + if (nativeHandle == nullptr) { + return NN_ERROR() << "Failed to create native_handle"; + } + + for (size_t i = 0; i < fds.size(); ++i) { + nativeHandle->data[i] = fds[i].release(); + } + std::copy(ints.begin(), ints.end(), nativeHandle->data + nativeHandle->numFds); + + hidl_handle handle; + handle.setTo(nativeHandle, /*shouldOwn=*/true); + return handle; +} + +nn::GeneralResult createNativeHandleFrom(base::unique_fd fd, + const std::vector& ints) { + std::vector fds; + fds.push_back(std::move(fd)); + return createNativeHandleFrom(std::move(fds), ints); +} + +nn::GeneralResult createNativeHandleFrom(const nn::Memory::Unknown::Handle& handle) { + std::vector fds = NN_TRY(nn::dupFds(handle.fds.begin(), handle.fds.end())); + return createNativeHandleFrom(std::move(fds), handle.ints); +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Ashmem& memory) { + auto fd = NN_TRY(nn::dupFd(memory.fd)); + auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {})); + return hidl_memory("ashmem", std::move(handle), memory.size); +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Fd& memory) { + auto fd = NN_TRY(nn::dupFd(memory.fd)); + + const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset); + const std::vector ints = {memory.prot, lowOffsetBits, highOffsetBits}; + + auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints)); + return hidl_memory("mmap_fd", std::move(handle), memory.size); +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) { +#ifdef __ANDROID__ + const auto* ahwb = memory.handle.get(); + AHardwareBuffer_Desc bufferDesc; + AHardwareBuffer_describe(ahwb, &bufferDesc); + + const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB; + const size_t size = isBlob ? bufferDesc.width : 0; + const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer"; + + const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb); + const hidl_handle hidlHandle(nativeHandle); + hidl_handle copiedHandle(hidlHandle); + + return hidl_memory(name, std::move(copiedHandle), size); +#else // __ANDROID__ + LOG(FATAL) << "nn::GeneralResult createHidlMemoryFrom(const " + "nn::Memory::HardwareBuffer& memory): Not Available on Host Build"; + (void)memory; + return (NN_ERROR() << "createHidlMemoryFrom failed").operator nn::GeneralResult(); +#endif // __ANDROID__ +} + +nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Unknown& memory) { + return hidl_memory(memory.name, NN_TRY(createNativeHandleFrom(memory.handle)), memory.size); +} + } // anonymous namespace nn::GeneralResult unvalidatedConvert(const nn::OperandType& operandType) { @@ -332,8 +541,19 @@ nn::GeneralResult> unvalidatedConvert( return hidl_vec(operandValues.data(), operandValues.data() + operandValues.size()); } +nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle) { + if (handle == nullptr) { + return {}; + } + base::unique_fd fd = NN_TRY(nn::dupFd(handle->get())); + return createNativeHandleFrom(std::move(fd), {}); +} + nn::GeneralResult unvalidatedConvert(const nn::SharedMemory& memory) { - return hal::utils::createHidlMemoryFromSharedMemory(memory); + if (memory == nullptr) { + return NN_ERROR() << "Memory must be non-empty"; + } + return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle); } nn::GeneralResult unvalidatedConvert(const nn::Model& model) { diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h index 272cee7e88..c3348aa8f2 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h @@ -45,7 +45,6 @@ GeneralResult unvalidatedConvert(const hal::V1_2::Timing& timing); GeneralResult unvalidatedConvert(const hal::V1_2::Extension& extension); GeneralResult unvalidatedConvert( const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation); -GeneralResult unvalidatedConvert(const hardware::hidl_handle& handle); GeneralResult convert(const hal::V1_2::DeviceType& deviceType); GeneralResult convert(const hal::V1_2::Capabilities& capabilities); @@ -86,7 +85,6 @@ nn::GeneralResult unvalidatedConvert(const nn::Timing& timing); nn::GeneralResult unvalidatedConvert(const nn::Extension& extension); nn::GeneralResult unvalidatedConvert( const nn::Extension::OperandTypeInformation& operandTypeInformation); -nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle); nn::GeneralResult convert(const nn::DeviceType& deviceType); nn::GeneralResult convert(const nn::Capabilities& capabilities); diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index c8e83a2fc4..ef5056b5fe 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -264,14 +264,6 @@ GeneralResult unvalidatedConvert( }; } -GeneralResult unvalidatedConvert(const hidl_handle& hidlHandle) { - if (hidlHandle.getNativeHandle() == nullptr) { - return nullptr; - } - auto handle = NN_TRY(hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle())); - return std::make_shared(std::move(handle)); -} - GeneralResult convert(const hal::V1_2::DeviceType& deviceType) { return validatedConvert(deviceType); } @@ -334,6 +326,10 @@ nn::GeneralResult> unvalidatedConvert( return V1_0::utils::unvalidatedConvert(operandValues); } +nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle) { + return V1_0::utils::unvalidatedConvert(handle); +} + nn::GeneralResult unvalidatedConvert(const nn::SharedMemory& memory) { return V1_0::utils::unvalidatedConvert(memory); } @@ -544,13 +540,6 @@ nn::GeneralResult unvalidatedConvert( }; } -nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle) { - if (handle == nullptr) { - return {}; - } - return hal::utils::hidlHandleFromSharedHandle(*handle); -} - nn::GeneralResult convert(const nn::DeviceType& deviceType) { return validatedConvert(deviceType); } diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h index b677c62505..ec1e530364 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h @@ -113,6 +113,9 @@ nn::GeneralResult convert(const nn::DeviceType& deviceType); nn::GeneralResult convert(const nn::MeasureTiming& measureTiming); nn::GeneralResult convert(const nn::Timing& timing); +nn::GeneralResult> convertSyncFences( + const std::vector& fences); + } // namespace android::hardware::neuralnetworks::V1_3::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index 74d8938fe8..c885d60ee9 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -380,7 +380,7 @@ nn::GeneralResult> unvalidatedConvert( } nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle) { - return V1_2::utils::unvalidatedConvert(handle); + return V1_0::utils::unvalidatedConvert(handle); } nn::GeneralResult unvalidatedConvert(const nn::SharedMemory& memory) { @@ -727,4 +727,13 @@ nn::GeneralResult convert(const nn::Timing& timing) { return V1_2::utils::convert(timing); } +nn::GeneralResult> convertSyncFences( + const std::vector& syncFences) { + std::vector handles; + handles.reserve(syncFences.size()); + std::transform(syncFences.begin(), syncFences.end(), std::back_inserter(handles), + [](const nn::SyncFence& syncFence) { return syncFence.getSharedHandle(); }); + return convert(handles); +} + } // namespace android::hardware::neuralnetworks::V1_3::utils diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp index 4dc0ddfc0d..467d87d3aa 100644 --- a/neuralnetworks/1.3/utils/src/Execution.cpp +++ b/neuralnetworks/1.3/utils/src/Execution.cpp @@ -73,7 +73,7 @@ nn::ExecutionResult, nn::Timing>> Executi nn::GeneralResult> Execution::computeFenced( const std::vector& waitFor, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& timeoutDurationAfterFence) const { - const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor)); + const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor)); const auto hidlDeadline = NN_TRY(convert(deadline)); const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence)); return kPreparedModel->executeFencedInternal(kRequest, hidlWaitFor, kMeasure, hidlDeadline, diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index d5dee9d34b..c9771e3643 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -186,7 +186,7 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector -#include #include #include #include @@ -125,15 +123,6 @@ nn::GeneralResult> convertRequestFromP const nn::Request* request, uint32_t alignment, uint32_t padding, std::optional* maybeRequestInSharedOut, RequestRelocation* relocationOut); -nn::GeneralResult createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory); -nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_memory& memory); - -nn::GeneralResult hidlHandleFromSharedHandle(const nn::Handle& handle); -nn::GeneralResult sharedHandleFromNativeHandle(const native_handle_t* handle); - -nn::GeneralResult> convertSyncFences( - const std::vector& fences); - } // namespace android::hardware::neuralnetworks::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index b4fbfa7b3b..b66fbd17ef 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -19,8 +19,6 @@ #include "HandleError.h" #include -#include -#include #include #include #include @@ -34,11 +32,6 @@ #include #include -#ifdef __ANDROID__ -#include -#include -#endif // __ANDROID__ - namespace android::hardware::neuralnetworks::utils { namespace { @@ -92,97 +85,6 @@ void copyPointersToSharedMemory(nn::Model::Subgraph* subgraph, }); } -nn::GeneralResult createNativeHandleFrom(std::vector fds, - const std::vector& ints) { - constexpr size_t kIntMax = std::numeric_limits::max(); - CHECK_LE(fds.size(), kIntMax); - CHECK_LE(ints.size(), kIntMax); - native_handle_t* nativeHandle = - native_handle_create(static_cast(fds.size()), static_cast(ints.size())); - if (nativeHandle == nullptr) { - return NN_ERROR() << "Failed to create native_handle"; - } - - for (size_t i = 0; i < fds.size(); ++i) { - nativeHandle->data[i] = fds[i].release(); - } - std::copy(ints.begin(), ints.end(), nativeHandle->data + nativeHandle->numFds); - - hidl_handle handle; - handle.setTo(nativeHandle, /*shouldOwn=*/true); - return handle; -} - -nn::GeneralResult createNativeHandleFrom(base::unique_fd fd, - const std::vector& ints) { - std::vector fds; - fds.push_back(std::move(fd)); - return createNativeHandleFrom(std::move(fds), ints); -} - -nn::GeneralResult createNativeHandleFrom(const nn::Memory::Unknown::Handle& handle) { - std::vector fds = NN_TRY(nn::dupFds(handle.fds.begin(), handle.fds.end())); - return createNativeHandleFrom(std::move(fds), handle.ints); -} - -nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Ashmem& memory) { - auto fd = NN_TRY(nn::dupFd(memory.fd)); - auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {})); - return hidl_memory("ashmem", std::move(handle), memory.size); -} - -nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Fd& memory) { - auto fd = NN_TRY(nn::dupFd(memory.fd)); - - const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset); - const std::vector ints = {memory.prot, lowOffsetBits, highOffsetBits}; - - auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints)); - return hidl_memory("mmap_fd", std::move(handle), memory.size); -} - -nn::GeneralResult createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) { -#ifdef __ANDROID__ - const auto* ahwb = memory.handle.get(); - AHardwareBuffer_Desc bufferDesc; - AHardwareBuffer_describe(ahwb, &bufferDesc); - - const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB; - const size_t size = isBlob ? bufferDesc.width : 0; - const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer"; - - const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb); - const hidl_handle hidlHandle(nativeHandle); - hidl_handle copiedHandle(hidlHandle); - - return hidl_memory(name, std::move(copiedHandle), size); -#else // __ANDROID__ - LOG(FATAL) << "nn::GeneralResult createHidlMemoryFrom(const " - "nn::Memory::HardwareBuffer& memory): Not Available on Host Build"; - (void)memory; - return (NN_ERROR() << "createHidlMemoryFrom failed").operator nn::GeneralResult(); -#endif // __ANDROID__ -} - -nn::GeneralResult createHidlMemoryFrom(const nn::Memory::Unknown& memory) { - return hidl_memory(memory.name, NN_TRY(createNativeHandleFrom(memory.handle)), memory.size); -} - -nn::GeneralResult unknownHandleFromNativeHandle( - const native_handle_t* handle) { - if (handle == nullptr) { - return NN_ERROR() << "unknownHandleFromNativeHandle failed because handle is nullptr"; - } - - std::vector fds = - NN_TRY(nn::dupFds(handle->data + 0, handle->data + handle->numFds)); - - std::vector ints(handle->data + handle->numFds, - handle->data + handle->numFds + handle->numInts); - - return nn::Memory::Unknown::Handle{.fds = std::move(fds), .ints = std::move(ints)}; -} - } // anonymous namespace nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWithP( @@ -331,142 +233,4 @@ nn::GeneralResult> convertRequestFromP return **maybeRequestInSharedOut; } -nn::GeneralResult createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) { - if (memory == nullptr) { - return NN_ERROR() << "Memory must be non-empty"; - } - return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle); -} - -#ifdef __ANDROID__ -static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) { - return (value + multiple - 1) / multiple * multiple; -} -#endif // __ANDROID__ - -nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_memory& memory) { - CHECK_LE(memory.size(), std::numeric_limits::max()); - if (!memory.valid()) { - return NN_ERROR() << "Unable to convert invalid hidl_memory"; - } - - if (memory.name() == "ashmem") { - if (memory.handle()->numFds != 1) { - return NN_ERROR() << "Unable to convert invalid ashmem memory object with " - << memory.handle()->numFds << " numFds, but expected 1"; - } - if (memory.handle()->numInts != 0) { - return NN_ERROR() << "Unable to convert invalid ashmem memory object with " - << memory.handle()->numInts << " numInts, but expected 0"; - } - auto handle = nn::Memory::Ashmem{ - .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])), - .size = static_cast(memory.size()), - }; - return std::make_shared(nn::Memory{.handle = std::move(handle)}); - } - - if (memory.name() == "mmap_fd") { - if (memory.handle()->numFds != 1) { - return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with " - << memory.handle()->numFds << " numFds, but expected 1"; - } - if (memory.handle()->numInts != 3) { - return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with " - << memory.handle()->numInts << " numInts, but expected 3"; - } - - const int fd = memory.handle()->data[0]; - const int prot = memory.handle()->data[1]; - const int lower = memory.handle()->data[2]; - const int higher = memory.handle()->data[3]; - const size_t offset = nn::getOffsetFromInts(lower, higher); - - return nn::createSharedMemoryFromFd(static_cast(memory.size()), prot, fd, offset); - } - - if (memory.name() != "hardware_buffer_blob") { - auto handle = nn::Memory::Unknown{ - .handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())), - .size = static_cast(memory.size()), - .name = memory.name(), - }; - return std::make_shared(nn::Memory{.handle = std::move(handle)}); - } - -#ifdef __ANDROID__ - const auto size = memory.size(); - const auto format = AHARDWAREBUFFER_FORMAT_BLOB; - const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; - const uint32_t width = size; - const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer. - const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer. - - // AHardwareBuffer_createFromHandle() might fail because an allocator - // expects a specific stride value. In that case, we try to guess it by - // aligning the width to small powers of 2. - // TODO(b/174120849): Avoid stride assumptions. - AHardwareBuffer* hardwareBuffer = nullptr; - status_t status = UNKNOWN_ERROR; - for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) { - const uint32_t stride = roundUpToMultiple(width, alignment); - AHardwareBuffer_Desc desc{ - .width = width, - .height = height, - .layers = layers, - .format = format, - .usage = usage, - .stride = stride, - }; - status = AHardwareBuffer_createFromHandle(&desc, memory.handle(), - AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, - &hardwareBuffer); - if (status == NO_ERROR) { - break; - } - } - if (status != NO_ERROR) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) - << "Can't create AHardwareBuffer from handle. Error: " << status; - } - - return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true); -#else // __ANDROID__ - LOG(FATAL) << "nn::GeneralResult createSharedMemoryFromHidlMemory(const " - "hidl_memory& memory): Not Available on Host Build"; - return (NN_ERROR() << "createSharedMemoryFromHidlMemory failed") - . - operator nn::GeneralResult(); -#endif // __ANDROID__ -} - -nn::GeneralResult hidlHandleFromSharedHandle(const nn::Handle& handle) { - base::unique_fd fd = NN_TRY(nn::dupFd(handle.get())); - return createNativeHandleFrom(std::move(fd), {}); -} - -nn::GeneralResult sharedHandleFromNativeHandle(const native_handle_t* handle) { - if (handle == nullptr) { - return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle is nullptr"; - } - if (handle->numFds != 1 || handle->numInts != 0) { - return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle does not only " - "hold a single fd"; - } - return nn::dupFd(handle->data[0]); -} - -nn::GeneralResult> convertSyncFences( - const std::vector& syncFences) { - hidl_vec handles(syncFences.size()); - for (size_t i = 0; i < syncFences.size(); ++i) { - const auto& handle = syncFences[i].getSharedHandle(); - if (handle == nullptr) { - return NN_ERROR() << "convertSyncFences failed because sync fence is empty"; - } - handles[i] = NN_TRY(hidlHandleFromSharedHandle(*handle)); - } - return handles; -} - } // namespace android::hardware::neuralnetworks::utils From e8645c3b8d12ba8a5f7b5b2d672bf2d812d6b196 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Fri, 15 Oct 2021 18:42:32 -0700 Subject: [PATCH 3/4] Relocate NN ProtectCallback to 1.0/utils This change is part of a larger chain of changes to remove HIDL and AIDL libraries from neuralnetworks_utils_hal_common. Bug: N/A Test: mma Change-Id: Ib43f1cb683a09ae5c9116a6dea4d269c9c2c78b4 --- .../utils/include/nnapi/hal/1.0/Callbacks.h | 3 +- .../1.0/utils/include/nnapi/hal/1.0/Device.h | 3 +- .../utils/include/nnapi/hal/1.0/Execution.h | 2 +- .../include/nnapi/hal/1.0/PreparedModel.h | 3 +- .../include/nnapi/hal/1.0}/ProtectCallback.h | 6 ++-- neuralnetworks/1.0/utils/src/Callbacks.cpp | 2 +- neuralnetworks/1.0/utils/src/Device.cpp | 2 +- neuralnetworks/1.0/utils/src/Execution.cpp | 2 +- .../1.0/utils/src/PreparedModel.cpp | 2 +- .../utils}/src/ProtectCallback.cpp | 0 .../1.1/utils/include/nnapi/hal/1.1/Device.h | 2 +- neuralnetworks/1.1/utils/src/Device.cpp | 2 +- .../utils/include/nnapi/hal/1.2/Callbacks.h | 2 +- .../1.2/utils/include/nnapi/hal/1.2/Device.h | 2 +- .../utils/include/nnapi/hal/1.2/Execution.h | 2 +- .../nnapi/hal/1.2/ExecutionBurstController.h | 2 +- .../nnapi/hal/1.2/ExecutionBurstServer.h | 2 +- .../nnapi/hal/1.2/ExecutionBurstUtils.h | 2 +- .../include/nnapi/hal/1.2/PreparedModel.h | 2 +- neuralnetworks/1.2/utils/src/Callbacks.cpp | 2 +- neuralnetworks/1.2/utils/src/Device.cpp | 2 +- .../utils/src/ExecutionBurstController.cpp | 2 +- .../1.2/utils/src/ExecutionBurstServer.cpp | 2 +- .../1.2/utils/src/ExecutionBurstUtils.cpp | 2 +- .../1.2/utils/src/PreparedModel.cpp | 2 +- .../utils/include/nnapi/hal/1.3/Callbacks.h | 2 +- .../1.3/utils/include/nnapi/hal/1.3/Device.h | 2 +- .../include/nnapi/hal/1.3/PreparedModel.h | 2 +- neuralnetworks/1.3/utils/src/Callbacks.cpp | 2 +- neuralnetworks/1.3/utils/src/Device.cpp | 2 +- .../1.3/utils/src/PreparedModel.cpp | 2 +- .../utils/include/nnapi/hal/aidl/Callbacks.h | 3 +- .../include/nnapi/hal/aidl/ProtectCallback.h | 29 +++++++++++++++---- .../aidl/utils/src/ProtectCallback.cpp | 9 +++--- 34 files changed, 64 insertions(+), 44 deletions(-) rename neuralnetworks/{utils/common/include/nnapi/hal => 1.0/utils/include/nnapi/hal/1.0}/ProtectCallback.h (93%) rename neuralnetworks/{utils/common => 1.0/utils}/src/ProtectCallback.cpp (100%) diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h index 3b32e1dbf9..1ab9dcb90a 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -24,9 +24,10 @@ #include #include #include -#include #include +#include "nnapi/hal/1.0/ProtectCallback.h" + // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface // lifetimes across processes and for protecting asynchronous calls across HIDL. diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h index db3b2ad44f..0a6ca3edce 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h @@ -24,7 +24,8 @@ #include #include #include -#include + +#include "nnapi/hal/1.0/ProtectCallback.h" #include #include diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h index e201e25a13..66497c200b 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h @@ -22,9 +22,9 @@ #include #include #include -#include #include "PreparedModel.h" +#include "nnapi/hal/1.0/ProtectCallback.h" #include #include diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h index 48be595d41..bdb5b54281 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h @@ -22,7 +22,8 @@ #include #include #include -#include + +#include "nnapi/hal/1.0/ProtectCallback.h" #include #include diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ProtectCallback.h similarity index 93% rename from neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h rename to neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ProtectCallback.h index 05110bc364..7418cfad1c 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ProtectCallback.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H -#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PROTECT_CALLBACK_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PROTECT_CALLBACK_H #include #include @@ -98,4 +98,4 @@ class DeathHandler final { } // namespace android::hardware::neuralnetworks::utils -#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PROTECT_CALLBACK_H diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp index ea3ea56de6..5fb9e928a1 100644 --- a/neuralnetworks/1.0/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -18,6 +18,7 @@ #include "Conversions.h" #include "PreparedModel.h" +#include "ProtectCallback.h" #include "Utils.h" #include @@ -28,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 93bd81a19c..404a92fcbb 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -18,6 +18,7 @@ #include "Callbacks.h" #include "Conversions.h" +#include "ProtectCallback.h" #include "Utils.h" #include @@ -30,7 +31,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.0/utils/src/Execution.cpp b/neuralnetworks/1.0/utils/src/Execution.cpp index 7a3216b5db..056b0414ca 100644 --- a/neuralnetworks/1.0/utils/src/Execution.cpp +++ b/neuralnetworks/1.0/utils/src/Execution.cpp @@ -18,6 +18,7 @@ #include "Callbacks.h" #include "Conversions.h" +#include "ProtectCallback.h" #include "Utils.h" #include @@ -28,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index 3060c652da..b332182700 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -20,6 +20,7 @@ #include "Callbacks.h" #include "Conversions.h" #include "Execution.h" +#include "ProtectCallback.h" #include "Utils.h" #include @@ -29,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/utils/common/src/ProtectCallback.cpp b/neuralnetworks/1.0/utils/src/ProtectCallback.cpp similarity index 100% rename from neuralnetworks/utils/common/src/ProtectCallback.cpp rename to neuralnetworks/1.0/utils/src/ProtectCallback.cpp diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h index 5e224b5018..d6bd36a7fe 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h @@ -23,8 +23,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index 3197ef4ac3..0e239cbe82 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -29,9 +29,9 @@ #include #include #include +#include #include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h index ba3c1ba1db..6dd8138f64 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -27,8 +27,8 @@ #include #include #include +#include #include -#include #include // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h index b4bef5ee0a..e7ac172211 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -23,8 +23,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h index 9c66446a2a..867f181bda 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h @@ -21,8 +21,8 @@ #include #include #include +#include #include -#include #include "PreparedModel.h" diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h index dae1ff36c5..8078693aa5 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h @@ -32,8 +32,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h index f7926f5835..500aa0cfd0 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h index c662bc3eed..c081305a86 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 35abd7947b..1150e5e79b 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -22,8 +22,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp index 01b5e12387..c855b172e9 100644 --- a/neuralnetworks/1.2/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -30,9 +30,9 @@ #include #include #include +#include #include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index 9fe0de25b3..42c2a61fab 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -30,10 +30,10 @@ #include #include #include +#include #include #include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp index 2746965da8..98a75fa785 100644 --- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp +++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp @@ -28,9 +28,9 @@ #include #include #include +#include #include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp index 65ec7f5532..52cabd8b96 100644 --- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp +++ b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp @@ -27,8 +27,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp index 1bdde1e71a..e0d029aa59 100644 --- a/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp +++ b/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index c26118410d..d11cdc2d2a 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -31,9 +31,9 @@ #include #include #include +#include #include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h index 643172e192..4b8ddc1885 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -30,8 +30,8 @@ #include #include #include +#include #include -#include #include // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h index 84f606a357..c3c6fc4eb8 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h @@ -23,8 +23,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h index 5acba71826..480438d9f2 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h @@ -21,8 +21,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp index 156216f594..b4fe487422 100644 --- a/neuralnetworks/1.3/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -31,12 +31,12 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index d710b85070..462d557e53 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -33,13 +33,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index c9771e3643..792665ed1e 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -30,12 +30,12 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h index 8651912cb8..168264babf 100644 --- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h +++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h @@ -32,8 +32,7 @@ namespace aidl::android::hardware::neuralnetworks::utils { // An AIDL callback class to receive the results of IDevice::prepareModel* asynchronously. -class PreparedModelCallback final : public BnPreparedModelCallback, - public hal::utils::IProtectedCallback { +class PreparedModelCallback final : public BnPreparedModelCallback, public IProtectedCallback { public: using Data = nn::GeneralResult; diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h index ab1108c182..92ed1cda5d 100644 --- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h +++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -34,19 +33,39 @@ namespace aidl::android::hardware::neuralnetworks::utils { +class IProtectedCallback { + public: + /** + * Marks this object as a dead object. + */ + virtual void notifyAsDeadObject() = 0; + + // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. + // E.g., std::unique_ptr. + virtual ~IProtectedCallback() = default; + + protected: + // Protect the non-destructor special member functions to prevent object slicing. + IProtectedCallback() = default; + IProtectedCallback(const IProtectedCallback&) = default; + IProtectedCallback(IProtectedCallback&&) noexcept = default; + IProtectedCallback& operator=(const IProtectedCallback&) = default; + IProtectedCallback& operator=(IProtectedCallback&&) noexcept = default; +}; + // Thread safe class class DeathMonitor final { public: static void serviceDied(void* cookie); void serviceDied(); // Precondition: `killable` must be non-null. - void add(hal::utils::IProtectedCallback* killable) const; + void add(IProtectedCallback* killable) const; // Precondition: `killable` must be non-null. - void remove(hal::utils::IProtectedCallback* killable) const; + void remove(IProtectedCallback* killable) const; private: mutable std::mutex mMutex; - mutable std::vector mObjects GUARDED_BY(mMutex); + mutable std::vector mObjects GUARDED_BY(mMutex); }; class DeathHandler final { @@ -62,7 +81,7 @@ class DeathHandler final { using Cleanup = std::function; // Precondition: `killable` must be non-null. [[nodiscard]] ::android::base::ScopeGuard protectCallback( - hal::utils::IProtectedCallback* killable) const; + IProtectedCallback* killable) const; std::shared_ptr getDeathMonitor() const { return kDeathMonitor; } diff --git a/neuralnetworks/aidl/utils/src/ProtectCallback.cpp b/neuralnetworks/aidl/utils/src/ProtectCallback.cpp index 124641cbb8..54a673caf5 100644 --- a/neuralnetworks/aidl/utils/src/ProtectCallback.cpp +++ b/neuralnetworks/aidl/utils/src/ProtectCallback.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -37,7 +36,7 @@ namespace aidl::android::hardware::neuralnetworks::utils { void DeathMonitor::serviceDied() { std::lock_guard guard(mMutex); std::for_each(mObjects.begin(), mObjects.end(), - [](hal::utils::IProtectedCallback* killable) { killable->notifyAsDeadObject(); }); + [](IProtectedCallback* killable) { killable->notifyAsDeadObject(); }); } void DeathMonitor::serviceDied(void* cookie) { @@ -45,13 +44,13 @@ void DeathMonitor::serviceDied(void* cookie) { deathMonitor->serviceDied(); } -void DeathMonitor::add(hal::utils::IProtectedCallback* killable) const { +void DeathMonitor::add(IProtectedCallback* killable) const { CHECK(killable != nullptr); std::lock_guard guard(mMutex); mObjects.push_back(killable); } -void DeathMonitor::remove(hal::utils::IProtectedCallback* killable) const { +void DeathMonitor::remove(IProtectedCallback* killable) const { CHECK(killable != nullptr); std::lock_guard guard(mMutex); const auto removedIter = std::remove(mObjects.begin(), mObjects.end(), killable); @@ -102,7 +101,7 @@ DeathHandler::~DeathHandler() { } [[nodiscard]] ::android::base::ScopeGuard DeathHandler::protectCallback( - hal::utils::IProtectedCallback* killable) const { + IProtectedCallback* killable) const { CHECK(killable != nullptr); kDeathMonitor->add(killable); return ::android::base::make_scope_guard( From 49d95e0457f9f3d947a2f07d88e7093399937c88 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Fri, 15 Oct 2021 18:52:52 -0700 Subject: [PATCH 4/4] Move NN HandleError from utils/common to 1.0/utils This change is part of a larger chain of changes to remove HIDL and AIDL libraries from neuralnetworks_utils_hal_common. Bug: N/A Test: mma Change-Id: Iae9f692ffc72700294aae694c256e75c7e353fef --- .../utils/include/nnapi/hal/1.0}/HandleError.h | 18 +++++++++--------- .../1.0/utils/include/nnapi/hal/1.0/Utils.h | 1 - neuralnetworks/1.0/utils/src/Callbacks.cpp | 8 ++++---- neuralnetworks/1.0/utils/src/Device.cpp | 6 +++--- neuralnetworks/1.0/utils/src/Execution.cpp | 2 +- neuralnetworks/1.0/utils/src/PreparedModel.cpp | 4 ++-- .../1.0/utils/src/ProtectCallback.cpp | 3 ++- .../1.1/utils/include/nnapi/hal/1.1/Utils.h | 1 - neuralnetworks/1.1/utils/src/Device.cpp | 6 +++--- .../1.2/utils/include/nnapi/hal/1.2/Utils.h | 1 - neuralnetworks/1.2/utils/src/Callbacks.cpp | 6 +++--- neuralnetworks/1.2/utils/src/Conversions.cpp | 1 - neuralnetworks/1.2/utils/src/Device.cpp | 16 ++++++++-------- neuralnetworks/1.2/utils/src/Execution.cpp | 1 - .../1.2/utils/src/ExecutionBurstController.cpp | 6 +++--- .../1.2/utils/src/ExecutionBurstServer.cpp | 4 ++-- neuralnetworks/1.2/utils/src/PreparedModel.cpp | 4 ++-- .../1.3/utils/include/nnapi/hal/1.3/Utils.h | 1 - neuralnetworks/1.3/utils/src/Buffer.cpp | 6 +++--- neuralnetworks/1.3/utils/src/Callbacks.cpp | 8 ++++---- neuralnetworks/1.3/utils/src/Conversions.cpp | 1 - neuralnetworks/1.3/utils/src/Device.cpp | 10 +++++----- neuralnetworks/1.3/utils/src/Execution.cpp | 1 - neuralnetworks/1.3/utils/src/PreparedModel.cpp | 8 ++++---- neuralnetworks/aidl/utils/Android.bp | 1 - .../aidl/utils/include/nnapi/hal/aidl/Utils.h | 8 +++++++- neuralnetworks/aidl/utils/src/Burst.cpp | 1 - neuralnetworks/aidl/utils/src/Callbacks.cpp | 2 +- neuralnetworks/aidl/utils/src/Conversions.cpp | 1 - neuralnetworks/aidl/utils/src/Execution.cpp | 1 - .../aidl/utils/src/PreparedModel.cpp | 3 +-- .../utils/adapter/src/PreparedModel.cpp | 1 - neuralnetworks/utils/common/Android.bp | 9 ++------- .../utils/common/src/CommonUtils.cpp | 2 -- 34 files changed, 69 insertions(+), 83 deletions(-) rename neuralnetworks/{utils/common/include/nnapi/hal => 1.0/utils/include/nnapi/hal/1.0}/HandleError.h (79%) diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/HandleError.h similarity index 79% rename from neuralnetworks/utils/common/include/nnapi/hal/HandleError.h rename to neuralnetworks/1.0/utils/include/nnapi/hal/1.0/HandleError.h index e51f916792..8e02633b41 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/HandleError.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H -#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_HANDLE_ERROR_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_HANDLE_ERROR_H #include #include @@ -27,7 +27,7 @@ namespace android::hardware::neuralnetworks::utils { template -nn::GeneralResult handleTransportError(const hardware::Return& ret) { +nn::GeneralResult handleTransportError(const Return& ret) { if (ret.isDeadObject()) { return nn::error(nn::ErrorStatus::DEAD_OBJECT) << "Return<>::isDeadObject returned true: " << ret.description(); @@ -52,13 +52,13 @@ nn::GeneralResult handleTransportError(const hardware::Return& ret) std::move(result).value(); \ }) -#define HANDLE_HAL_STATUS(status) \ - if (const auto canonical = ::android::nn::convert(status).value_or( \ - ::android::nn::ErrorStatus::GENERAL_FAILURE); \ - canonical == ::android::nn::ErrorStatus::NONE) { \ - } else \ +#define HANDLE_STATUS_HIDL(status) \ + if (const ::android::nn::ErrorStatus canonical = ::android::nn::convert(status).value_or( \ + ::android::nn::ErrorStatus::GENERAL_FAILURE); \ + canonical == ::android::nn::ErrorStatus::NONE) { \ + } else \ return NN_ERROR(canonical) } // namespace android::hardware::neuralnetworks::utils -#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_HANDLE_ERROR_H diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h index 360b338c0e..5c1480e83b 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h @@ -25,7 +25,6 @@ #include #include #include -#include namespace android::hardware::neuralnetworks::V1_0::utils { diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp index 5fb9e928a1..7b478ae3c6 100644 --- a/neuralnetworks/1.0/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp @@ -17,6 +17,7 @@ #include "Callbacks.h" #include "Conversions.h" +#include "HandleError.h" #include "PreparedModel.h" #include "ProtectCallback.h" #include "Utils.h" @@ -28,7 +29,6 @@ #include #include #include -#include #include #include @@ -40,19 +40,19 @@ namespace android::hardware::neuralnetworks::V1_0::utils { nn::GeneralResult> supportedOperationsCallback( ErrorStatus status, const hidl_vec& supportedOperations) { - HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "get supported operations failed with " << toString(status); return supportedOperations; } nn::GeneralResult prepareModelCallback( ErrorStatus status, const sp& preparedModel) { - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return NN_TRY(PreparedModel::create(preparedModel)); } nn::ExecutionResult, nn::Timing>> executionCallback( ErrorStatus status) { - HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status); return {}; } diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 404a92fcbb..49913a2584 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -18,6 +18,7 @@ #include "Callbacks.h" #include "Conversions.h" +#include "HandleError.h" #include "ProtectCallback.h" #include "Utils.h" @@ -30,7 +31,6 @@ #include #include #include -#include #include #include @@ -47,7 +47,7 @@ namespace { nn::GeneralResult capabilitiesCallback(ErrorStatus status, const Capabilities& capabilities) { - HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status); return nn::convert(capabilities); } @@ -156,7 +156,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel(hidlModel, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.0/utils/src/Execution.cpp b/neuralnetworks/1.0/utils/src/Execution.cpp index 056b0414ca..6e105a6791 100644 --- a/neuralnetworks/1.0/utils/src/Execution.cpp +++ b/neuralnetworks/1.0/utils/src/Execution.cpp @@ -18,6 +18,7 @@ #include "Callbacks.h" #include "Conversions.h" +#include "HandleError.h" #include "ProtectCallback.h" #include "Utils.h" @@ -28,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp index b332182700..00e7d22916 100644 --- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp @@ -20,6 +20,7 @@ #include "Callbacks.h" #include "Conversions.h" #include "Execution.h" +#include "HandleError.h" #include "ProtectCallback.h" #include "Utils.h" @@ -29,7 +30,6 @@ #include #include #include -#include #include #include @@ -84,7 +84,7 @@ PreparedModel::executeInternal(const V1_0::Request& request, const auto ret = kPreparedModel->execute(request, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status); auto result = NN_TRY(cb->get()); if (relocation.output) { diff --git a/neuralnetworks/1.0/utils/src/ProtectCallback.cpp b/neuralnetworks/1.0/utils/src/ProtectCallback.cpp index 18e1f3bf0b..89539b5f34 100644 --- a/neuralnetworks/1.0/utils/src/ProtectCallback.cpp +++ b/neuralnetworks/1.0/utils/src/ProtectCallback.cpp @@ -22,7 +22,8 @@ #include #include #include -#include + +#include "HandleError.h" #include #include diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h index 09d9fe89a7..4660ff732f 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h @@ -26,7 +26,6 @@ #include #include #include -#include namespace android::hardware::neuralnetworks::V1_1::utils { diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index 0e239cbe82..7d54cabeb9 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -29,9 +29,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -47,7 +47,7 @@ namespace { nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, const Capabilities& capabilities) { - HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status); return nn::convert(capabilities); } @@ -157,7 +157,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h index 5c3b8a743c..23e336a69a 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h @@ -28,7 +28,6 @@ #include #include #include -#include #include diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp index c855b172e9..cb61f21775 100644 --- a/neuralnetworks/1.2/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp @@ -29,10 +29,10 @@ #include #include #include +#include #include #include #include -#include #include #include @@ -62,7 +62,7 @@ convertExecutionGeneralResultsHelper(const hidl_vec& outputShapes, nn::GeneralResult prepareModelCallback( V1_0::ErrorStatus status, const sp& preparedModel) { - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); } @@ -74,7 +74,7 @@ nn::ExecutionResult, nn::Timing>> executi return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) << "execution failed with " << toString(status); } - HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status); return convertExecutionGeneralResultsHelper(outputShapes, timing); } diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index ef5056b5fe..838d9c4717 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -28,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index 42c2a61fab..f12669a6dc 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -30,10 +30,10 @@ #include #include #include +#include #include #include #include -#include #include #include @@ -49,31 +49,31 @@ namespace { nn::GeneralResult capabilitiesCallback(V1_0::ErrorStatus status, const Capabilities& capabilities) { - HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status); return nn::convert(capabilities); } nn::GeneralResult versionStringCallback(V1_0::ErrorStatus status, const hidl_string& versionString) { - HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getVersionString failed with " << toString(status); return versionString; } nn::GeneralResult deviceTypeCallback(V1_0::ErrorStatus status, DeviceType deviceType) { - HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getDeviceType failed with " << toString(status); return nn::convert(deviceType); } nn::GeneralResult> supportedExtensionsCallback( V1_0::ErrorStatus status, const hidl_vec& extensions) { - HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getExtensions failed with " << toString(status); return nn::convert(extensions); } nn::GeneralResult> numberOfCacheFilesNeededCallback( V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { - HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status); if (numModelCache > nn::kMaxNumberOfCacheFiles) { return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater " "than allowed max (" @@ -254,7 +254,7 @@ nn::GeneralResult Device::prepareModel( const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -271,7 +271,7 @@ nn::GeneralResult Device::prepareModelFromCache( const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.2/utils/src/Execution.cpp b/neuralnetworks/1.2/utils/src/Execution.cpp index 18d1c90edd..320b0e1e2f 100644 --- a/neuralnetworks/1.2/utils/src/Execution.cpp +++ b/neuralnetworks/1.2/utils/src/Execution.cpp @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp index 98a75fa785..a8ded9e95c 100644 --- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp +++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp @@ -28,9 +28,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -82,8 +82,8 @@ class BurstExecution final : public nn::IExecution, nn::GeneralResult> executionBurstResultCallback( V1_0::ErrorStatus status, const sp& burstContext) { - HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status " - << toString(status); + HANDLE_STATUS_HIDL(status) << "IPreparedModel::configureExecutionBurst failed with status " + << toString(status); if (burstContext == nullptr) { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::configureExecutionBurst returned nullptr for burst"; diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp index 52cabd8b96..f30b662182 100644 --- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp +++ b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp @@ -27,8 +27,8 @@ #include #include #include +#include #include -#include #include #include @@ -50,7 +50,7 @@ constexpr V1_2::Timing kNoTiming = {std::numeric_limits::max(), nn::GeneralResult> getMemoriesCallback( V1_0::ErrorStatus status, const hidl_vec& memories) { - HANDLE_HAL_STATUS(status) << "getting burst memories failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getting burst memories failed with " << toString(status); std::vector canonicalMemories; canonicalMemories.reserve(memories.size()); for (const auto& memory : memories) { diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index d11cdc2d2a..b8a5ae087c 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -31,9 +31,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -82,7 +82,7 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming const auto ret = kPreparedModel->execute_1_2(request, measure, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status); } return cb->get(); diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h index 28525bd450..2812db2546 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h @@ -30,7 +30,6 @@ #include #include #include -#include namespace android::hardware::neuralnetworks::V1_3::utils { diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp index ada526573b..34925eacfc 100644 --- a/neuralnetworks/1.3/utils/src/Buffer.cpp +++ b/neuralnetworks/1.3/utils/src/Buffer.cpp @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include "Conversions.h" #include "Utils.h" @@ -66,7 +66,7 @@ nn::GeneralResult Buffer::copyTo(const nn::SharedMemory& dst) const { const auto ret = kBuffer->copyTo(hidlDst); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "IBuffer::copyTo failed with " << toString(status); return {}; } @@ -78,7 +78,7 @@ nn::GeneralResult Buffer::copyFrom(const nn::SharedMemory& src, const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "IBuffer::copyFrom failed with " << toString(status); return {}; } diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp index b4fe487422..f0638626f6 100644 --- a/neuralnetworks/1.3/utils/src/Callbacks.cpp +++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp @@ -30,13 +30,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include @@ -71,13 +71,13 @@ convertExecutionGeneralResultsHelper(const hidl_vec& outputSh nn::GeneralResult> supportedOperationsCallback( ErrorStatus status, const hidl_vec& supportedOperations) { - HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "get supported operations failed with " << toString(status); return supportedOperations; } nn::GeneralResult prepareModelCallback( ErrorStatus status, const sp& preparedModel) { - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); } @@ -90,7 +90,7 @@ nn::ExecutionResult, nn::Timing>> executi return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) << "execution failed with " << toString(status); } - HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status); return convertExecutionGeneralResultsHelper(outputShapes, timing); } diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index c885d60ee9..a1d414c700 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -28,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index 462d557e53..a73ce82ed2 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -33,13 +33,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include @@ -72,7 +72,7 @@ nn::GeneralResult>> convert( nn::GeneralResult capabilitiesCallback(ErrorStatus status, const Capabilities& capabilities) { - HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status); return nn::convert(capabilities); } @@ -89,7 +89,7 @@ nn::GeneralResult getCapabilitiesFrom(V1_3::IDevice* device) { nn::GeneralResult allocationCallback(ErrorStatus status, const sp& buffer, uint32_t token) { - HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "IDevice::allocate failed with " << toString(status); return Buffer::create(buffer, static_cast(token)); } @@ -208,7 +208,7 @@ nn::GeneralResult Device::prepareModel( kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status); return cb->get(); } @@ -227,7 +227,7 @@ nn::GeneralResult Device::prepareModelFromCache( const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache, hidlToken, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); - HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "model preparation from cache failed with " << toString(status); return cb->get(); } diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp index 467d87d3aa..0ec7f56d37 100644 --- a/neuralnetworks/1.3/utils/src/Execution.cpp +++ b/neuralnetworks/1.3/utils/src/Execution.cpp @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp index 792665ed1e..2c81cb2ad5 100644 --- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp @@ -30,12 +30,12 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include @@ -50,14 +50,14 @@ namespace { nn::GeneralResult> convertFencedExecutionCallbackResults( ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) { - HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "fenced execution callback info failed with " << toString(status); return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); } nn::GeneralResult> fencedExecutionCallback( ErrorStatus status, const hidl_handle& syncFence, const sp& callback) { - HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "fenced execution failed with " << toString(status); auto resultSyncFence = nn::SyncFence::createAsSignaled(); if (syncFence.getNativeHandle() != nullptr) { @@ -127,7 +127,7 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb); const auto status = HANDLE_TRANSPORT_FAILURE(ret); if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status); + HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status); } return cb->get(); diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp index cb67b84bf2..2ff7534184 100644 --- a/neuralnetworks/aidl/utils/Android.bp +++ b/neuralnetworks/aidl/utils/Android.bp @@ -40,7 +40,6 @@ cc_library_static { shared_libs: [ "android.hardware.neuralnetworks-V2-ndk", "libbinder_ndk", - "libhidlbase", ], target: { android: { diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h index 1b149e452b..3a45f8d3ea 100644 --- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h +++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h @@ -24,7 +24,6 @@ #include #include #include -#include namespace aidl::android::hardware::neuralnetworks::utils { @@ -75,6 +74,13 @@ nn::GeneralResult handleTransportError(const ndk::ScopedAStatus& ret); for (const auto status = handleTransportError(ret); !status.ok();) \ return NN_ERROR(status.error().code) << status.error().message << ": " +#define HANDLE_STATUS_AIDL(status) \ + if (const ::android::nn::ErrorStatus canonical = ::android::nn::convert(status).value_or( \ + ::android::nn::ErrorStatus::GENERAL_FAILURE); \ + canonical == ::android::nn::ErrorStatus::NONE) { \ + } else \ + return NN_ERROR(canonical) + } // namespace aidl::android::hardware::neuralnetworks::utils #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_H diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp index c59c10bedd..fb00b264e3 100644 --- a/neuralnetworks/aidl/utils/src/Burst.cpp +++ b/neuralnetworks/aidl/utils/src/Burst.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/aidl/utils/src/Callbacks.cpp b/neuralnetworks/aidl/utils/src/Callbacks.cpp index 8055665228..a32147734c 100644 --- a/neuralnetworks/aidl/utils/src/Callbacks.cpp +++ b/neuralnetworks/aidl/utils/src/Callbacks.cpp @@ -38,7 +38,7 @@ namespace { // nn::Version::ANDROID_S. On failure, this function returns with the appropriate nn::GeneralError. nn::GeneralResult prepareModelCallback( ErrorStatus status, const std::shared_ptr& preparedModel) { - HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status); + HANDLE_STATUS_AIDL(status) << "model preparation failed with " << toString(status); return NN_TRY(PreparedModel::create(preparedModel)); } diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp index ddff3f2cb7..45628c8e73 100644 --- a/neuralnetworks/aidl/utils/src/Conversions.cpp +++ b/neuralnetworks/aidl/utils/src/Conversions.cpp @@ -34,7 +34,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp index 13d4f32048..94edd90c89 100644 --- a/neuralnetworks/aidl/utils/src/Execution.cpp +++ b/neuralnetworks/aidl/utils/src/Execution.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp index 0769016b4c..f25c2c8825 100644 --- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp +++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -51,7 +50,7 @@ nn::GeneralResult, nn::Timing>> convertEx nn::GeneralResult> convertFencedExecutionResults( ErrorStatus status, const aidl_hal::Timing& timingLaunched, const aidl_hal::Timing& timingFenced) { - HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status); + HANDLE_STATUS_AIDL(status) << "fenced execution callback info failed with " << toString(status); return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); } diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/src/PreparedModel.cpp index 7397defb34..40c0888cda 100644 --- a/neuralnetworks/utils/adapter/src/PreparedModel.cpp +++ b/neuralnetworks/utils/adapter/src/PreparedModel.cpp @@ -36,7 +36,6 @@ #include #include #include -#include #include #include diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp index 6f07be9207..e02a202574 100644 --- a/neuralnetworks/utils/common/Android.bp +++ b/neuralnetworks/utils/common/Android.bp @@ -30,13 +30,8 @@ cc_library_static { local_include_dirs: ["include/nnapi/hal"], export_include_dirs: ["include"], cflags: ["-Wthread-safety"], - static_libs: [ - "neuralnetworks_types", - ], - shared_libs: [ - "libhidlbase", - "libbinder_ndk", - ], + static_libs: ["neuralnetworks_types"], + shared_libs: ["libbinder_ndk"], } cc_test { diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index b66fbd17ef..b2498818f8 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -16,8 +16,6 @@ #include "CommonUtils.h" -#include "HandleError.h" - #include #include #include