diff --git a/neuralnetworks/aidl/Android.bp b/neuralnetworks/aidl/Android.bp index 308f89f663..0557e43a5a 100644 --- a/neuralnetworks/aidl/Android.bp +++ b/neuralnetworks/aidl/Android.bp @@ -15,5 +15,13 @@ aidl_interface { cpp: { enabled: false, }, + ndk: { + apex_available: [ + "//apex_available:platform", + "com.android.neuralnetworks", + "test_com.android.neuralnetworks", + ], + min_sdk_version: "30", + }, }, } diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp new file mode 100644 index 0000000000..56017da52d --- /dev/null +++ b/neuralnetworks/aidl/utils/Android.bp @@ -0,0 +1,32 @@ +// +// Copyright (C) 2021 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +cc_library_static { + name: "neuralnetworks_utils_hal_aidl", + defaults: ["neuralnetworks_utils_defaults"], + srcs: ["src/*"], + local_include_dirs: ["include/nnapi/hal/aidl/"], + export_include_dirs: ["include"], + static_libs: [ + "neuralnetworks_types", + "neuralnetworks_utils_hal_common", + ], + shared_libs: [ + "libhidlbase", + "android.hardware.neuralnetworks-V1-ndk_platform", + "libbinder_ndk", + ], +} diff --git a/neuralnetworks/aidl/utils/OWNERS b/neuralnetworks/aidl/utils/OWNERS new file mode 100644 index 0000000000..e4feee3496 --- /dev/null +++ b/neuralnetworks/aidl/utils/OWNERS @@ -0,0 +1,11 @@ +# Neuralnetworks team +butlermichael@google.com +dgross@google.com +galarragas@google.com +jeanluc@google.com +levp@google.com +miaowang@google.com +pszczepaniak@google.com +slavash@google.com +vddang@google.com +xusongw@google.com diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h new file mode 100644 index 0000000000..35de5befd0 --- /dev/null +++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_CONVERSIONS_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_CONVERSIONS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +namespace android::nn { + +GeneralResult unvalidatedConvert(const aidl_hal::OperandType& operandType); +GeneralResult unvalidatedConvert(const aidl_hal::OperationType& operationType); +GeneralResult unvalidatedConvert(const aidl_hal::DeviceType& deviceType); +GeneralResult unvalidatedConvert(const aidl_hal::Priority& priority); +GeneralResult unvalidatedConvert(const aidl_hal::Capabilities& capabilities); +GeneralResult unvalidatedConvert( + const aidl_hal::OperandPerformance& operandPerformance); +GeneralResult unvalidatedConvert( + const aidl_hal::PerformanceInfo& performanceInfo); +GeneralResult unvalidatedConvert(const aidl_hal::DataLocation& location); +GeneralResult unvalidatedConvert(const aidl_hal::Operand& operand); +GeneralResult unvalidatedConvert( + const std::optional& optionalExtraParams); +GeneralResult unvalidatedConvert( + const aidl_hal::OperandLifeTime& operandLifeTime); +GeneralResult unvalidatedConvert( + const aidl_hal::SymmPerChannelQuantParams& symmPerChannelQuantParams); +GeneralResult unvalidatedConvert(const aidl_hal::Operation& operation); +GeneralResult unvalidatedConvert(const aidl_hal::Model& model); +GeneralResult unvalidatedConvert( + const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix); +GeneralResult unvalidatedConvert(const std::vector& operandValues); +GeneralResult unvalidatedConvert(const aidl_hal::Subgraph& subgraph); +GeneralResult unvalidatedConvert(const aidl_hal::OutputShape& outputShape); +GeneralResult unvalidatedConvert(bool measureTiming); +GeneralResult unvalidatedConvert(const aidl_hal::Memory& memory); +GeneralResult unvalidatedConvert(const aidl_hal::Timing& timing); +GeneralResult unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc); +GeneralResult unvalidatedConvert(const aidl_hal::BufferRole& bufferRole); +GeneralResult unvalidatedConvert(const aidl_hal::Request& request); +GeneralResult unvalidatedConvert( + const aidl_hal::RequestArgument& requestArgument); +GeneralResult unvalidatedConvert( + const aidl_hal::RequestMemoryPool& memoryPool); +GeneralResult unvalidatedConvert(const aidl_hal::ErrorStatus& errorStatus); +GeneralResult unvalidatedConvert( + const aidl_hal::ExecutionPreference& executionPreference); +GeneralResult unvalidatedConvert(const aidl_hal::Extension& extension); +GeneralResult unvalidatedConvert( + const aidl_hal::ExtensionOperandTypeInformation& operandTypeInformation); +GeneralResult unvalidatedConvert( + const ::aidl::android::hardware::common::NativeHandle& handle); + +GeneralResult convert( + const aidl_hal::ExecutionPreference& executionPreference); +GeneralResult convert(const aidl_hal::Memory& memory); +GeneralResult convert(const aidl_hal::Model& model); +GeneralResult convert(const aidl_hal::Operand& operand); +GeneralResult convert(const aidl_hal::OperandType& operandType); +GeneralResult convert(const aidl_hal::Priority& priority); +GeneralResult convert(const aidl_hal::RequestMemoryPool& memoryPool); +GeneralResult convert(const aidl_hal::Request& request); + +GeneralResult> convert(const std::vector& outputShapes); +GeneralResult> convert(const std::vector& memories); + +GeneralResult> toUnsigned(const std::vector& vec); + +} // namespace android::nn + +namespace aidl::android::hardware::neuralnetworks::utils { + +namespace nn = ::android::nn; + +nn::GeneralResult unvalidatedConvert(const nn::Memory& memory); +nn::GeneralResult unvalidatedConvert(const nn::OutputShape& outputShape); +nn::GeneralResult unvalidatedConvert(const nn::ErrorStatus& errorStatus); + +nn::GeneralResult convert(const nn::Memory& memory); +nn::GeneralResult convert(const nn::ErrorStatus& errorStatus); +nn::GeneralResult> convert( + const std::vector& outputShapes); + +nn::GeneralResult> toSigned(const std::vector& vec); + +} // namespace aidl::android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_CONVERSIONS_H diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h new file mode 100644 index 0000000000..802e70304a --- /dev/null +++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_H + +#include "nnapi/hal/aidl/Conversions.h" + +#include +#include +#include +#include + +namespace aidl::android::hardware::neuralnetworks::utils { + +constexpr auto kDefaultPriority = Priority::MEDIUM; +constexpr auto kVersion = nn::Version::ANDROID_S; + +template +nn::Result validate(const Type& halObject) { + const auto maybeCanonical = nn::convert(halObject); + if (!maybeCanonical.has_value()) { + return nn::error() << maybeCanonical.error().message; + } + return {}; +} + +template +bool valid(const Type& halObject) { + const auto result = utils::validate(halObject); + if (!result.has_value()) { + LOG(ERROR) << result.error(); + } + return result.has_value(); +} + +nn::GeneralResult copyModel(const Model& model); + +} // namespace aidl::android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_H diff --git a/neuralnetworks/aidl/utils/src/Assertions.cpp b/neuralnetworks/aidl/utils/src/Assertions.cpp new file mode 100644 index 0000000000..0e88091cfb --- /dev/null +++ b/neuralnetworks/aidl/utils/src/Assertions.cpp @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace { + +#define COMPARE_ENUMS_TYPES(lhsType, rhsType) \ + static_assert( \ + std::is_same_v< \ + std::underlying_type_t<::aidl::android::hardware::neuralnetworks::lhsType>, \ + std::underlying_type_t<::android::nn::rhsType>>, \ + "::aidl::android::hardware::neuralnetworks::" #lhsType \ + " does not have the same underlying type as ::android::nn::" #rhsType) + +COMPARE_ENUMS_TYPES(OperandType, OperandType); +COMPARE_ENUMS_TYPES(OperationType, OperationType); +COMPARE_ENUMS_TYPES(Priority, Priority); +COMPARE_ENUMS_TYPES(OperandLifeTime, Operand::LifeTime); +COMPARE_ENUMS_TYPES(ErrorStatus, ErrorStatus); + +#undef COMPARE_ENUMS_TYPES + +#define COMPARE_ENUMS_FULL(lhsSymbol, rhsSymbol, lhsType, rhsType) \ + static_assert( \ + static_cast< \ + std::underlying_type_t<::aidl::android::hardware::neuralnetworks::lhsType>>( \ + ::aidl::android::hardware::neuralnetworks::lhsType::lhsSymbol) == \ + static_cast>( \ + ::android::nn::rhsType::rhsSymbol), \ + "::aidl::android::hardware::neuralnetworks::" #lhsType "::" #lhsSymbol \ + " does not match ::android::nn::" #rhsType "::" #rhsSymbol) + +#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, symbol, OperandType, OperandType) + +COMPARE_ENUMS(FLOAT32); +COMPARE_ENUMS(INT32); +COMPARE_ENUMS(UINT32); +COMPARE_ENUMS(TENSOR_FLOAT32); +COMPARE_ENUMS(TENSOR_INT32); +COMPARE_ENUMS(TENSOR_QUANT8_ASYMM); +COMPARE_ENUMS(BOOL); +COMPARE_ENUMS(TENSOR_QUANT16_SYMM); +COMPARE_ENUMS(TENSOR_FLOAT16); +COMPARE_ENUMS(TENSOR_BOOL8); +COMPARE_ENUMS(FLOAT16); +COMPARE_ENUMS(TENSOR_QUANT8_SYMM_PER_CHANNEL); +COMPARE_ENUMS(TENSOR_QUANT16_ASYMM); +COMPARE_ENUMS(TENSOR_QUANT8_SYMM); +COMPARE_ENUMS(TENSOR_QUANT8_ASYMM_SIGNED); +COMPARE_ENUMS(SUBGRAPH); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, symbol, OperationType, OperationType) + +COMPARE_ENUMS(ADD); +COMPARE_ENUMS(AVERAGE_POOL_2D); +COMPARE_ENUMS(CONCATENATION); +COMPARE_ENUMS(CONV_2D); +COMPARE_ENUMS(DEPTHWISE_CONV_2D); +COMPARE_ENUMS(DEPTH_TO_SPACE); +COMPARE_ENUMS(DEQUANTIZE); +COMPARE_ENUMS(EMBEDDING_LOOKUP); +COMPARE_ENUMS(FLOOR); +COMPARE_ENUMS(FULLY_CONNECTED); +COMPARE_ENUMS(HASHTABLE_LOOKUP); +COMPARE_ENUMS(L2_NORMALIZATION); +COMPARE_ENUMS(L2_POOL_2D); +COMPARE_ENUMS(LOCAL_RESPONSE_NORMALIZATION); +COMPARE_ENUMS(LOGISTIC); +COMPARE_ENUMS(LSH_PROJECTION); +COMPARE_ENUMS(LSTM); +COMPARE_ENUMS(MAX_POOL_2D); +COMPARE_ENUMS(MUL); +COMPARE_ENUMS(RELU); +COMPARE_ENUMS(RELU1); +COMPARE_ENUMS(RELU6); +COMPARE_ENUMS(RESHAPE); +COMPARE_ENUMS(RESIZE_BILINEAR); +COMPARE_ENUMS(RNN); +COMPARE_ENUMS(SOFTMAX); +COMPARE_ENUMS(SPACE_TO_DEPTH); +COMPARE_ENUMS(SVDF); +COMPARE_ENUMS(TANH); +COMPARE_ENUMS(BATCH_TO_SPACE_ND); +COMPARE_ENUMS(DIV); +COMPARE_ENUMS(MEAN); +COMPARE_ENUMS(PAD); +COMPARE_ENUMS(SPACE_TO_BATCH_ND); +COMPARE_ENUMS(SQUEEZE); +COMPARE_ENUMS(STRIDED_SLICE); +COMPARE_ENUMS(SUB); +COMPARE_ENUMS(TRANSPOSE); +COMPARE_ENUMS(ABS); +COMPARE_ENUMS(ARGMAX); +COMPARE_ENUMS(ARGMIN); +COMPARE_ENUMS(AXIS_ALIGNED_BBOX_TRANSFORM); +COMPARE_ENUMS(BIDIRECTIONAL_SEQUENCE_LSTM); +COMPARE_ENUMS(BIDIRECTIONAL_SEQUENCE_RNN); +COMPARE_ENUMS(BOX_WITH_NMS_LIMIT); +COMPARE_ENUMS(CAST); +COMPARE_ENUMS(CHANNEL_SHUFFLE); +COMPARE_ENUMS(DETECTION_POSTPROCESSING); +COMPARE_ENUMS(EQUAL); +COMPARE_ENUMS(EXP); +COMPARE_ENUMS(EXPAND_DIMS); +COMPARE_ENUMS(GATHER); +COMPARE_ENUMS(GENERATE_PROPOSALS); +COMPARE_ENUMS(GREATER); +COMPARE_ENUMS(GREATER_EQUAL); +COMPARE_ENUMS(GROUPED_CONV_2D); +COMPARE_ENUMS(HEATMAP_MAX_KEYPOINT); +COMPARE_ENUMS(INSTANCE_NORMALIZATION); +COMPARE_ENUMS(LESS); +COMPARE_ENUMS(LESS_EQUAL); +COMPARE_ENUMS(LOG); +COMPARE_ENUMS(LOGICAL_AND); +COMPARE_ENUMS(LOGICAL_NOT); +COMPARE_ENUMS(LOGICAL_OR); +COMPARE_ENUMS(LOG_SOFTMAX); +COMPARE_ENUMS(MAXIMUM); +COMPARE_ENUMS(MINIMUM); +COMPARE_ENUMS(NEG); +COMPARE_ENUMS(NOT_EQUAL); +COMPARE_ENUMS(PAD_V2); +COMPARE_ENUMS(POW); +COMPARE_ENUMS(PRELU); +COMPARE_ENUMS(QUANTIZE); +COMPARE_ENUMS(QUANTIZED_16BIT_LSTM); +COMPARE_ENUMS(RANDOM_MULTINOMIAL); +COMPARE_ENUMS(REDUCE_ALL); +COMPARE_ENUMS(REDUCE_ANY); +COMPARE_ENUMS(REDUCE_MAX); +COMPARE_ENUMS(REDUCE_MIN); +COMPARE_ENUMS(REDUCE_PROD); +COMPARE_ENUMS(REDUCE_SUM); +COMPARE_ENUMS(ROI_ALIGN); +COMPARE_ENUMS(ROI_POOLING); +COMPARE_ENUMS(RSQRT); +COMPARE_ENUMS(SELECT); +COMPARE_ENUMS(SIN); +COMPARE_ENUMS(SLICE); +COMPARE_ENUMS(SPLIT); +COMPARE_ENUMS(SQRT); +COMPARE_ENUMS(TILE); +COMPARE_ENUMS(TOPK_V2); +COMPARE_ENUMS(TRANSPOSE_CONV_2D); +COMPARE_ENUMS(UNIDIRECTIONAL_SEQUENCE_LSTM); +COMPARE_ENUMS(UNIDIRECTIONAL_SEQUENCE_RNN); +COMPARE_ENUMS(RESIZE_NEAREST_NEIGHBOR); +COMPARE_ENUMS(QUANTIZED_LSTM); +COMPARE_ENUMS(IF); +COMPARE_ENUMS(WHILE); +COMPARE_ENUMS(ELU); +COMPARE_ENUMS(HARD_SWISH); +COMPARE_ENUMS(FILL); +COMPARE_ENUMS(RANK); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, symbol, Priority, Priority) + +COMPARE_ENUMS(LOW); +COMPARE_ENUMS(MEDIUM); +COMPARE_ENUMS(HIGH); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(lhsSymbol, rhsSymbol) \ + COMPARE_ENUMS_FULL(lhsSymbol, rhsSymbol, OperandLifeTime, Operand::LifeTime) + +COMPARE_ENUMS(TEMPORARY_VARIABLE, TEMPORARY_VARIABLE); +COMPARE_ENUMS(SUBGRAPH_INPUT, SUBGRAPH_INPUT); +COMPARE_ENUMS(SUBGRAPH_OUTPUT, SUBGRAPH_OUTPUT); +COMPARE_ENUMS(CONSTANT_COPY, CONSTANT_COPY); +COMPARE_ENUMS(CONSTANT_POOL, CONSTANT_REFERENCE); +COMPARE_ENUMS(NO_VALUE, NO_VALUE); +COMPARE_ENUMS(SUBGRAPH, SUBGRAPH); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, symbol, ErrorStatus, ErrorStatus) + +COMPARE_ENUMS(NONE); +COMPARE_ENUMS(DEVICE_UNAVAILABLE); +COMPARE_ENUMS(GENERAL_FAILURE); +COMPARE_ENUMS(OUTPUT_INSUFFICIENT_SIZE); +COMPARE_ENUMS(INVALID_ARGUMENT); +COMPARE_ENUMS(MISSED_DEADLINE_TRANSIENT); +COMPARE_ENUMS(MISSED_DEADLINE_PERSISTENT); +COMPARE_ENUMS(RESOURCE_EXHAUSTED_TRANSIENT); +COMPARE_ENUMS(RESOURCE_EXHAUSTED_PERSISTENT); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(symbol) \ + COMPARE_ENUMS_FULL(symbol, symbol, ExecutionPreference, ExecutionPreference) + +COMPARE_ENUMS(LOW_POWER); +COMPARE_ENUMS(FAST_SINGLE_ANSWER); +COMPARE_ENUMS(SUSTAINED_SPEED); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, symbol, DeviceType, DeviceType) + +COMPARE_ENUMS(OTHER); +COMPARE_ENUMS(CPU); +COMPARE_ENUMS(GPU); +COMPARE_ENUMS(ACCELERATOR); + +#undef COMPARE_ENUMS + +#define COMPARE_ENUMS(symbol) \ + COMPARE_ENUMS_FULL(symbol, symbol, FusedActivationFunc, FusedActivationFunc) + +COMPARE_ENUMS(NONE); +COMPARE_ENUMS(RELU); +COMPARE_ENUMS(RELU1); +COMPARE_ENUMS(RELU6); + +#undef COMPARE_ENUMS + +#undef COMPARE_ENUMS_FULL + +#define COMPARE_CONSTANTS(halSymbol, canonicalSymbol) \ + static_assert(::aidl::android::hardware::neuralnetworks::halSymbol == \ + ::android::nn::canonicalSymbol); + +COMPARE_CONSTANTS(IDevice::BYTE_SIZE_OF_CACHE_TOKEN, kByteSizeOfCacheToken); +COMPARE_CONSTANTS(IDevice::MAX_NUMBER_OF_CACHE_FILES, kMaxNumberOfCacheFiles); +COMPARE_CONSTANTS(IDevice::EXTENSION_TYPE_HIGH_BITS_PREFIX, kExtensionPrefixBits - 1); +COMPARE_CONSTANTS(IDevice::EXTENSION_TYPE_LOW_BITS_TYPE, kExtensionTypeBits); +COMPARE_CONSTANTS(IPreparedModel::DEFAULT_LOOP_TIMEOUT_DURATION_NS, + operation_while::kTimeoutNsDefault); +COMPARE_CONSTANTS(IPreparedModel::MAXIMUM_LOOP_TIMEOUT_DURATION_NS, + operation_while::kTimeoutNsMaximum); + +#undef COMPARE_CONSTANTS + +} // anonymous namespace diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp new file mode 100644 index 0000000000..0e93b02a1e --- /dev/null +++ b/neuralnetworks/aidl/utils/src/Conversions.cpp @@ -0,0 +1,582 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Conversions.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define VERIFY_NON_NEGATIVE(value) \ + while (UNLIKELY(value < 0)) return NN_ERROR() + +namespace { + +template +constexpr std::underlying_type_t underlyingType(Type value) { + return static_cast>(value); +} + +constexpr auto kVersion = android::nn::Version::ANDROID_S; + +} // namespace + +namespace android::nn { +namespace { + +constexpr auto validOperandType(nn::OperandType operandType) { + switch (operandType) { + case nn::OperandType::FLOAT32: + case nn::OperandType::INT32: + case nn::OperandType::UINT32: + case nn::OperandType::TENSOR_FLOAT32: + case nn::OperandType::TENSOR_INT32: + case nn::OperandType::TENSOR_QUANT8_ASYMM: + case nn::OperandType::BOOL: + case nn::OperandType::TENSOR_QUANT16_SYMM: + case nn::OperandType::TENSOR_FLOAT16: + case nn::OperandType::TENSOR_BOOL8: + case nn::OperandType::FLOAT16: + case nn::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case nn::OperandType::TENSOR_QUANT16_ASYMM: + case nn::OperandType::TENSOR_QUANT8_SYMM: + case nn::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + case nn::OperandType::SUBGRAPH: + return true; + case nn::OperandType::OEM: + case nn::OperandType::TENSOR_OEM_BYTE: + return false; + } + return nn::isExtension(operandType); +} + +template +using UnvalidatedConvertOutput = + std::decay_t()).value())>; + +template +GeneralResult>> unvalidatedConvertVec( + const std::vector& arguments) { + std::vector> canonical; + canonical.reserve(arguments.size()); + for (const auto& argument : arguments) { + canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument))); + } + return canonical; +} + +template +GeneralResult>> unvalidatedConvert( + const std::vector& arguments) { + return unvalidatedConvertVec(arguments); +} + +template +GeneralResult> validatedConvert(const Type& halObject) { + auto canonical = NN_TRY(nn::unvalidatedConvert(halObject)); + const auto maybeVersion = validate(canonical); + if (!maybeVersion.has_value()) { + return error() << maybeVersion.error(); + } + const auto version = maybeVersion.value(); + if (version > kVersion) { + return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; + } + return canonical; +} + +template +GeneralResult>> validatedConvert( + const std::vector& arguments) { + std::vector> canonical; + canonical.reserve(arguments.size()); + for (const auto& argument : arguments) { + canonical.push_back(NN_TRY(validatedConvert(argument))); + } + return canonical; +} + +} // anonymous namespace + +GeneralResult unvalidatedConvert(const aidl_hal::OperandType& operandType) { + VERIFY_NON_NEGATIVE(underlyingType(operandType)) << "Negative operand types are not allowed."; + return static_cast(operandType); +} + +GeneralResult unvalidatedConvert(const aidl_hal::OperationType& operationType) { + VERIFY_NON_NEGATIVE(underlyingType(operationType)) + << "Negative operation types are not allowed."; + return static_cast(operationType); +} + +GeneralResult unvalidatedConvert(const aidl_hal::DeviceType& deviceType) { + return static_cast(deviceType); +} + +GeneralResult unvalidatedConvert(const aidl_hal::Priority& priority) { + return static_cast(priority); +} + +GeneralResult unvalidatedConvert(const aidl_hal::Capabilities& capabilities) { + const bool validOperandTypes = std::all_of( + capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), + [](const aidl_hal::OperandPerformance& operandPerformance) { + const auto maybeType = unvalidatedConvert(operandPerformance.type); + return !maybeType.has_value() ? false : validOperandType(maybeType.value()); + }); + if (!validOperandTypes) { + return NN_ERROR() << "Invalid OperandType when unvalidatedConverting OperandPerformance in " + "Capabilities"; + } + + auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)); + auto table = NN_TRY(hal::utils::makeGeneralFailure( + Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)), + nn::ErrorStatus::GENERAL_FAILURE)); + + return Capabilities{ + .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( + unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), + .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( + unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), + .operandPerformance = std::move(table), + .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), + .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + }; +} + +GeneralResult unvalidatedConvert( + const aidl_hal::OperandPerformance& operandPerformance) { + return Capabilities::OperandPerformance{ + .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), + .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + }; +} + +GeneralResult unvalidatedConvert( + const aidl_hal::PerformanceInfo& performanceInfo) { + return Capabilities::PerformanceInfo{ + .execTime = performanceInfo.execTime, + .powerUsage = performanceInfo.powerUsage, + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::DataLocation& location) { + VERIFY_NON_NEGATIVE(location.poolIndex) << "DataLocation: pool index must not be negative"; + VERIFY_NON_NEGATIVE(location.offset) << "DataLocation: offset must not be negative"; + VERIFY_NON_NEGATIVE(location.length) << "DataLocation: length must not be negative"; + if (location.offset > std::numeric_limits::max()) { + return NN_ERROR() << "DataLocation: offset must be <= std::numeric_limits::max()"; + } + if (location.length > std::numeric_limits::max()) { + return NN_ERROR() << "DataLocation: length must be <= std::numeric_limits::max()"; + } + return DataLocation{ + .poolIndex = static_cast(location.poolIndex), + .offset = static_cast(location.offset), + .length = static_cast(location.length), + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::Operation& operation) { + return Operation{ + .type = NN_TRY(unvalidatedConvert(operation.type)), + .inputs = NN_TRY(toUnsigned(operation.inputs)), + .outputs = NN_TRY(toUnsigned(operation.outputs)), + }; +} + +GeneralResult unvalidatedConvert( + const aidl_hal::OperandLifeTime& operandLifeTime) { + return static_cast(operandLifeTime); +} + +GeneralResult unvalidatedConvert(const aidl_hal::Operand& operand) { + return Operand{ + .type = NN_TRY(unvalidatedConvert(operand.type)), + .dimensions = NN_TRY(toUnsigned(operand.dimensions)), + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), + .location = NN_TRY(unvalidatedConvert(operand.location)), + .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + }; +} + +GeneralResult unvalidatedConvert( + const std::optional& optionalExtraParams) { + if (!optionalExtraParams.has_value()) { + return Operand::NoParams{}; + } + const auto& extraParams = optionalExtraParams.value(); + using Tag = aidl_hal::OperandExtraParams::Tag; + switch (extraParams.getTag()) { + case Tag::channelQuant: + return unvalidatedConvert(extraParams.get()); + case Tag::extension: + return extraParams.get(); + } + return NN_ERROR() << "Unrecognized Operand::ExtraParams tag: " + << underlyingType(extraParams.getTag()); +} + +GeneralResult unvalidatedConvert( + const aidl_hal::SymmPerChannelQuantParams& symmPerChannelQuantParams) { + VERIFY_NON_NEGATIVE(symmPerChannelQuantParams.channelDim) + << "Per-channel quantization channel dimension must not be negative."; + return Operand::SymmPerChannelQuantParams{ + .scales = symmPerChannelQuantParams.scales, + .channelDim = static_cast(symmPerChannelQuantParams.channelDim), + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::Model& model) { + return Model{ + .main = NN_TRY(unvalidatedConvert(model.main)), + .referenced = NN_TRY(unvalidatedConvert(model.referenced)), + .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), + .pools = NN_TRY(unvalidatedConvert(model.pools)), + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::Subgraph& subgraph) { + return Model::Subgraph{ + .operands = NN_TRY(unvalidatedConvert(subgraph.operands)), + .operations = NN_TRY(unvalidatedConvert(subgraph.operations)), + .inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes)), + .outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes)), + }; +} + +GeneralResult unvalidatedConvert( + const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix) { + return Model::ExtensionNameAndPrefix{ + .name = extensionNameAndPrefix.name, + .prefix = extensionNameAndPrefix.prefix, + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::Extension& extension) { + return Extension{ + .name = extension.name, + .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)), + }; +} + +GeneralResult unvalidatedConvert( + const aidl_hal::ExtensionOperandTypeInformation& operandTypeInformation) { + VERIFY_NON_NEGATIVE(operandTypeInformation.byteSize) + << "Extension operand type byte size must not be negative"; + return Extension::OperandTypeInformation{ + .type = operandTypeInformation.type, + .isTensor = operandTypeInformation.isTensor, + .byteSize = static_cast(operandTypeInformation.byteSize), + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::OutputShape& outputShape) { + return OutputShape{ + .dimensions = NN_TRY(toUnsigned(outputShape.dimensions)), + .isSufficient = outputShape.isSufficient, + }; +} + +GeneralResult unvalidatedConvert(bool measureTiming) { + return measureTiming ? MeasureTiming::YES : MeasureTiming::NO; +} + +GeneralResult unvalidatedConvert(const aidl_hal::Memory& memory) { + VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative"; + return Memory{ + .handle = NN_TRY(unvalidatedConvert(memory.handle)), + .size = static_cast(memory.size), + .name = memory.name, + }; +} + +GeneralResult unvalidatedConvert(const std::vector& operandValues) { + return Model::OperandValues(operandValues.data(), operandValues.size()); +} + +GeneralResult unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc) { + return BufferDesc{.dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions))}; +} + +GeneralResult unvalidatedConvert(const aidl_hal::BufferRole& bufferRole) { + VERIFY_NON_NEGATIVE(bufferRole.modelIndex) << "BufferRole: modelIndex must not be negative"; + VERIFY_NON_NEGATIVE(bufferRole.ioIndex) << "BufferRole: ioIndex must not be negative"; + return BufferRole{ + .modelIndex = static_cast(bufferRole.modelIndex), + .ioIndex = static_cast(bufferRole.ioIndex), + .frequency = bufferRole.frequency, + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::Request& request) { + return Request{ + .inputs = NN_TRY(unvalidatedConvert(request.inputs)), + .outputs = NN_TRY(unvalidatedConvert(request.outputs)), + .pools = NN_TRY(unvalidatedConvert(request.pools)), + }; +} + +GeneralResult unvalidatedConvert(const aidl_hal::RequestArgument& argument) { + const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE + : Request::Argument::LifeTime::POOL; + return Request::Argument{ + .lifetime = lifetime, + .location = NN_TRY(unvalidatedConvert(argument.location)), + .dimensions = NN_TRY(toUnsigned(argument.dimensions)), + }; +} + +GeneralResult unvalidatedConvert( + const aidl_hal::RequestMemoryPool& memoryPool) { + using Tag = aidl_hal::RequestMemoryPool::Tag; + switch (memoryPool.getTag()) { + case Tag::pool: + return unvalidatedConvert(memoryPool.get()); + case Tag::token: { + const auto token = memoryPool.get(); + VERIFY_NON_NEGATIVE(token) << "Memory pool token must not be negative"; + return static_cast(token); + } + } + return NN_ERROR() << "Invalid Request::MemoryPool tag " << underlyingType(memoryPool.getTag()); +} + +GeneralResult unvalidatedConvert(const aidl_hal::ErrorStatus& status) { + switch (status) { + case aidl_hal::ErrorStatus::NONE: + case aidl_hal::ErrorStatus::DEVICE_UNAVAILABLE: + case aidl_hal::ErrorStatus::GENERAL_FAILURE: + case aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + case aidl_hal::ErrorStatus::INVALID_ARGUMENT: + case aidl_hal::ErrorStatus::MISSED_DEADLINE_TRANSIENT: + case aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT: + case aidl_hal::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + case aidl_hal::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return static_cast(status); + } + return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status); +} + +GeneralResult unvalidatedConvert( + const aidl_hal::ExecutionPreference& executionPreference) { + return static_cast(executionPreference); +} + +GeneralResult unvalidatedConvert( + const ::aidl::android::hardware::common::NativeHandle& aidlNativeHandle) { + std::vector fds; + fds.reserve(aidlNativeHandle.fds.size()); + for (const auto& fd : aidlNativeHandle.fds) { + int dupFd = dup(fd.get()); + if (dupFd == -1) { + // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return + // here? + return NN_ERROR() << "Failed to dup the fd"; + } + fds.emplace_back(dupFd); + } + + return std::make_shared(Handle{ + .fds = std::move(fds), + .ints = aidlNativeHandle.ints, + }); +} + +GeneralResult convert( + const aidl_hal::ExecutionPreference& executionPreference) { + return validatedConvert(executionPreference); +} + +GeneralResult convert(const aidl_hal::Memory& operand) { + return validatedConvert(operand); +} + +GeneralResult convert(const aidl_hal::Model& model) { + return validatedConvert(model); +} + +GeneralResult convert(const aidl_hal::Operand& operand) { + return unvalidatedConvert(operand); +} + +GeneralResult convert(const aidl_hal::OperandType& operandType) { + return unvalidatedConvert(operandType); +} + +GeneralResult convert(const aidl_hal::Priority& priority) { + return validatedConvert(priority); +} + +GeneralResult convert(const aidl_hal::RequestMemoryPool& memoryPool) { + return unvalidatedConvert(memoryPool); +} + +GeneralResult convert(const aidl_hal::Request& request) { + return validatedConvert(request); +} + +GeneralResult> convert(const std::vector& operations) { + return unvalidatedConvert(operations); +} + +GeneralResult> convert(const std::vector& memories) { + return validatedConvert(memories); +} + +GeneralResult> toUnsigned(const std::vector& vec) { + if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) { + return NN_ERROR() << "Negative value passed to conversion from signed to unsigned"; + } + return std::vector(vec.begin(), vec.end()); +} + +} // namespace android::nn + +namespace aidl::android::hardware::neuralnetworks::utils { +namespace { + +template +using UnvalidatedConvertOutput = + std::decay_t()).value())>; + +template +nn::GeneralResult>> unvalidatedConvertVec( + const std::vector& arguments) { + std::vector> halObject(arguments.size()); + for (size_t i = 0; i < arguments.size(); ++i) { + halObject[i] = NN_TRY(unvalidatedConvert(arguments[i])); + } + return halObject; +} + +template +nn::GeneralResult> validatedConvert(const Type& canonical) { + const auto maybeVersion = nn::validate(canonical); + if (!maybeVersion.has_value()) { + return nn::error() << maybeVersion.error(); + } + const auto version = maybeVersion.value(); + if (version > kVersion) { + return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; + } + return utils::unvalidatedConvert(canonical); +} + +template +nn::GeneralResult>> validatedConvert( + const std::vector& arguments) { + std::vector> halObject(arguments.size()); + for (size_t i = 0; i < arguments.size(); ++i) { + halObject[i] = NN_TRY(validatedConvert(arguments[i])); + } + return halObject; +} + +} // namespace + +nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& sharedHandle) { + common::NativeHandle aidlNativeHandle; + aidlNativeHandle.fds.reserve(sharedHandle->fds.size()); + for (const auto& fd : sharedHandle->fds) { + int dupFd = dup(fd.get()); + if (dupFd == -1) { + // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return + // here? + return NN_ERROR() << "Failed to dup the fd"; + } + aidlNativeHandle.fds.emplace_back(dupFd); + } + aidlNativeHandle.ints = sharedHandle->ints; + return aidlNativeHandle; +} + +nn::GeneralResult unvalidatedConvert(const nn::Memory& memory) { + if (memory.size > std::numeric_limits::max()) { + return NN_ERROR() << "Memory size doesn't fit into int64_t."; + } + return Memory{ + .handle = NN_TRY(unvalidatedConvert(memory.handle)), + .size = static_cast(memory.size), + .name = memory.name, + }; +} + +nn::GeneralResult unvalidatedConvert(const nn::ErrorStatus& errorStatus) { + switch (errorStatus) { + case nn::ErrorStatus::NONE: + case nn::ErrorStatus::DEVICE_UNAVAILABLE: + case nn::ErrorStatus::GENERAL_FAILURE: + case nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + case nn::ErrorStatus::INVALID_ARGUMENT: + case nn::ErrorStatus::MISSED_DEADLINE_TRANSIENT: + case nn::ErrorStatus::MISSED_DEADLINE_PERSISTENT: + case nn::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + case nn::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return static_cast(errorStatus); + default: + return ErrorStatus::GENERAL_FAILURE; + } +} + +nn::GeneralResult unvalidatedConvert(const nn::OutputShape& outputShape) { + return OutputShape{.dimensions = NN_TRY(toSigned(outputShape.dimensions)), + .isSufficient = outputShape.isSufficient}; +} + +nn::GeneralResult convert(const nn::Memory& memory) { + return validatedConvert(memory); +} + +nn::GeneralResult convert(const nn::ErrorStatus& errorStatus) { + return validatedConvert(errorStatus); +} + +nn::GeneralResult> convert( + const std::vector& outputShapes) { + return validatedConvert(outputShapes); +} + +nn::GeneralResult> toSigned(const std::vector& vec) { + if (!std::all_of(vec.begin(), vec.end(), + [](uint32_t v) { return v <= std::numeric_limits::max(); })) { + return NN_ERROR() << "Vector contains a value that doesn't fit into int32_t."; + } + return std::vector(vec.begin(), vec.end()); +} + +} // namespace aidl::android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp new file mode 100644 index 0000000000..04aa0e9eba --- /dev/null +++ b/neuralnetworks/aidl/utils/src/Utils.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2021 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Utils.h" + +#include + +namespace aidl::android::hardware::neuralnetworks::utils { + +using ::android::nn::GeneralResult; + +GeneralResult copyModel(const Model& model) { + Model newModel{ + .main = model.main, + .referenced = model.referenced, + .operandValues = model.operandValues, + .pools = {}, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix, + }; + newModel.pools.reserve(model.pools.size()); + for (const auto& pool : model.pools) { + common::NativeHandle nativeHandle; + nativeHandle.ints = pool.handle.ints; + nativeHandle.fds.reserve(pool.handle.fds.size()); + for (const auto& fd : pool.handle.fds) { + const int newFd = dup(fd.get()); + if (newFd == -1) { + return NN_ERROR() << "Couldn't dup a file descriptor."; + } + nativeHandle.fds.emplace_back(newFd); + } + Memory memory = { + .handle = std::move(nativeHandle), + .size = pool.size, + .name = pool.name, + }; + newModel.pools.push_back(std::move(memory)); + } + return newModel; +} + +} // namespace aidl::android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h index b3989e5878..fef9d9cfb5 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h @@ -24,15 +24,21 @@ #include #include -// Shorthand +// Shorthands namespace android::hardware::neuralnetworks { namespace hal = ::android::hardware::neuralnetworks; } // namespace android::hardware::neuralnetworks -// Shorthand +// Shorthands +namespace aidl::android::hardware::neuralnetworks { +namespace aidl_hal = ::aidl::android::hardware::neuralnetworks; +} // namespace aidl::android::hardware::neuralnetworks + +// Shorthands namespace android::nn { namespace hal = ::android::hardware::neuralnetworks; -} +namespace aidl_hal = ::aidl::android::hardware::neuralnetworks; +} // namespace android::nn namespace android::hardware::neuralnetworks::utils {