mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 16:09:42 +00:00
Merge changes Ifeffea05,I966f65a1 am: 104192c28f am: ca40766dd6
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1944552 Change-Id: I6bc37dde0fa87402e944d4ec0c431c17c5dfa85b
This commit is contained in:
@@ -112,11 +112,15 @@ GeneralResult<Priority> convert(const aidl_hal::Priority& priority);
|
||||
GeneralResult<Request> convert(const aidl_hal::Request& request);
|
||||
GeneralResult<Timing> convert(const aidl_hal::Timing& timing);
|
||||
GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle);
|
||||
GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc);
|
||||
|
||||
GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
|
||||
GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
|
||||
GeneralResult<std::vector<OutputShape>> convert(
|
||||
const std::vector<aidl_hal::OutputShape>& outputShapes);
|
||||
GeneralResult<std::vector<SharedHandle>> convert(
|
||||
const std::vector<ndk::ScopedFileDescriptor>& handles);
|
||||
GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles);
|
||||
|
||||
GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec);
|
||||
|
||||
@@ -129,6 +133,7 @@ namespace nn = ::android::nn;
|
||||
nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken);
|
||||
nn::GeneralResult<BufferDesc> unvalidatedConvert(const nn::BufferDesc& bufferDesc);
|
||||
nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRole);
|
||||
nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType);
|
||||
nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming);
|
||||
nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory);
|
||||
nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
|
||||
@@ -154,14 +159,16 @@ nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request);
|
||||
nn::GeneralResult<RequestArgument> unvalidatedConvert(const nn::Request::Argument& requestArgument);
|
||||
nn::GeneralResult<RequestMemoryPool> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool);
|
||||
nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing);
|
||||
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration);
|
||||
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration);
|
||||
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalTimePoint& optionalTimePoint);
|
||||
nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SyncFence& syncFence);
|
||||
nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SharedHandle& handle);
|
||||
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities);
|
||||
nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);
|
||||
|
||||
nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken);
|
||||
nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
|
||||
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
|
||||
nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming);
|
||||
nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory);
|
||||
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
|
||||
@@ -172,6 +179,8 @@ nn::GeneralResult<Request> convert(const nn::Request& request);
|
||||
nn::GeneralResult<Timing> convert(const nn::Timing& timing);
|
||||
nn::GeneralResult<int64_t> convert(const nn::OptionalDuration& optionalDuration);
|
||||
nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& optionalTimePoint);
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::GeneralResult<Extension> convert(const nn::Extension& extension);
|
||||
|
||||
nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
|
||||
nn::GeneralResult<std::vector<OutputShape>> convert(
|
||||
@@ -180,6 +189,7 @@ nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
|
||||
const std::vector<nn::SharedHandle>& handles);
|
||||
nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
|
||||
const std::vector<nn::SyncFence>& syncFences);
|
||||
nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions);
|
||||
|
||||
nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);
|
||||
|
||||
|
||||
@@ -551,6 +551,10 @@ GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle) {
|
||||
return validatedConvert(handle);
|
||||
}
|
||||
|
||||
GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc) {
|
||||
return validatedConvert(bufferDesc);
|
||||
}
|
||||
|
||||
GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension) {
|
||||
return validatedConvert(extension);
|
||||
}
|
||||
@@ -564,6 +568,15 @@ GeneralResult<std::vector<OutputShape>> convert(
|
||||
return validatedConvert(outputShapes);
|
||||
}
|
||||
|
||||
GeneralResult<std::vector<SharedHandle>> convert(
|
||||
const std::vector<ndk::ScopedFileDescriptor>& handles) {
|
||||
return validatedConvert(handles);
|
||||
}
|
||||
|
||||
GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles) {
|
||||
return validatedConvert(roles);
|
||||
}
|
||||
|
||||
GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec) {
|
||||
if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) {
|
||||
return NN_ERROR() << "Negative value passed to conversion from signed to unsigned";
|
||||
@@ -576,42 +589,7 @@ GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec)
|
||||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
template <typename Input>
|
||||
using UnvalidatedConvertOutput =
|
||||
std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
|
||||
const std::vector<Type>& arguments) {
|
||||
std::vector<UnvalidatedConvertOutput<Type>> halObject;
|
||||
halObject.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
|
||||
}
|
||||
return halObject;
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
|
||||
const std::vector<Type>& arguments) {
|
||||
return unvalidatedConvertVec(arguments);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
|
||||
NN_TRY(compliantVersion(canonical));
|
||||
return utils::unvalidatedConvert(canonical);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
|
||||
const std::vector<Type>& arguments) {
|
||||
std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
|
||||
for (size_t i = 0; i < arguments.size(); ++i) {
|
||||
halObject[i] = NN_TRY(validatedConvert(arguments[i]));
|
||||
}
|
||||
return halObject;
|
||||
}
|
||||
using utils::unvalidatedConvert;
|
||||
|
||||
// Helper template for std::visit
|
||||
template <class... Ts>
|
||||
@@ -721,6 +699,74 @@ nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::Unknown& /*memory
|
||||
operator nn::GeneralResult<Memory>();
|
||||
}
|
||||
|
||||
nn::GeneralResult<PerformanceInfo> unvalidatedConvert(
|
||||
const nn::Capabilities::PerformanceInfo& info) {
|
||||
return PerformanceInfo{.execTime = info.execTime, .powerUsage = info.powerUsage};
|
||||
}
|
||||
|
||||
nn::GeneralResult<OperandPerformance> unvalidatedConvert(
|
||||
const nn::Capabilities::OperandPerformance& operandPerformance) {
|
||||
return OperandPerformance{.type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
|
||||
.info = NN_TRY(unvalidatedConvert(operandPerformance.info))};
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<OperandPerformance>> unvalidatedConvert(
|
||||
const nn::Capabilities::OperandPerformanceTable& table) {
|
||||
std::vector<OperandPerformance> operandPerformances;
|
||||
operandPerformances.reserve(table.asVector().size());
|
||||
for (const auto& operandPerformance : table.asVector()) {
|
||||
operandPerformances.push_back(NN_TRY(unvalidatedConvert(operandPerformance)));
|
||||
}
|
||||
return operandPerformances;
|
||||
}
|
||||
|
||||
nn::GeneralResult<ExtensionOperandTypeInformation> unvalidatedConvert(
|
||||
const nn::Extension::OperandTypeInformation& info) {
|
||||
return ExtensionOperandTypeInformation{.type = info.type,
|
||||
.isTensor = info.isTensor,
|
||||
.byteSize = static_cast<int32_t>(info.byteSize)};
|
||||
}
|
||||
|
||||
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
|
||||
if (duration < nn::Duration::zero()) {
|
||||
return NN_ERROR() << "Unable to convert invalid (negative) duration";
|
||||
}
|
||||
constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
|
||||
const auto count = duration.count();
|
||||
return static_cast<int64_t>(std::min(count, kIntMax));
|
||||
}
|
||||
|
||||
template <typename Input>
|
||||
using UnvalidatedConvertOutput =
|
||||
std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
|
||||
const std::vector<Type>& arguments) {
|
||||
std::vector<UnvalidatedConvertOutput<Type>> halObject;
|
||||
halObject.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
|
||||
}
|
||||
return halObject;
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
|
||||
NN_TRY(compliantVersion(canonical));
|
||||
return utils::unvalidatedConvert(canonical);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
|
||||
const std::vector<Type>& arguments) {
|
||||
std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
|
||||
for (size_t i = 0; i < arguments.size(); ++i) {
|
||||
halObject[i] = NN_TRY(validatedConvert(arguments[i]));
|
||||
}
|
||||
return halObject;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken) {
|
||||
@@ -743,6 +789,19 @@ nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRol
|
||||
};
|
||||
}
|
||||
|
||||
nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType) {
|
||||
switch (deviceType) {
|
||||
case nn::DeviceType::UNKNOWN:
|
||||
break;
|
||||
case nn::DeviceType::OTHER:
|
||||
case nn::DeviceType::CPU:
|
||||
case nn::DeviceType::GPU:
|
||||
case nn::DeviceType::ACCELERATOR:
|
||||
return static_cast<DeviceType>(deviceType);
|
||||
}
|
||||
return NN_ERROR() << "Invalid DeviceType " << deviceType;
|
||||
}
|
||||
|
||||
nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming) {
|
||||
return measureTiming == nn::MeasureTiming::YES;
|
||||
}
|
||||
@@ -956,15 +1015,6 @@ nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
|
||||
if (duration < nn::Duration::zero()) {
|
||||
return NN_ERROR() << "Unable to convert invalid (negative) duration";
|
||||
}
|
||||
constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
|
||||
const auto count = duration.count();
|
||||
return static_cast<int64_t>(std::min(count, kIntMax));
|
||||
}
|
||||
|
||||
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration) {
|
||||
if (!optionalDuration.has_value()) {
|
||||
return kNoTiming;
|
||||
@@ -989,6 +1039,23 @@ nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::Shared
|
||||
return ndk::ScopedFileDescriptor(duplicatedFd.release());
|
||||
}
|
||||
|
||||
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
|
||||
return Capabilities{
|
||||
.relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
|
||||
unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
|
||||
.relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
|
||||
unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
|
||||
.operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)),
|
||||
.ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
|
||||
.whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
|
||||
};
|
||||
}
|
||||
|
||||
nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
|
||||
return Extension{.name = extension.name,
|
||||
.operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
|
||||
return validatedConvert(cacheToken);
|
||||
}
|
||||
@@ -997,6 +1064,10 @@ nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
|
||||
return validatedConvert(bufferDesc);
|
||||
}
|
||||
|
||||
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
|
||||
return validatedConvert(deviceType);
|
||||
}
|
||||
|
||||
nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming) {
|
||||
return validatedConvert(measureTiming);
|
||||
}
|
||||
@@ -1037,6 +1108,14 @@ nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& outputShapes) {
|
||||
return validatedConvert(outputShapes);
|
||||
}
|
||||
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
return validatedConvert(capabilities);
|
||||
}
|
||||
|
||||
nn::GeneralResult<Extension> convert(const nn::Extension& extension) {
|
||||
return validatedConvert(extension);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
|
||||
return validatedConvert(bufferRoles);
|
||||
}
|
||||
@@ -1056,6 +1135,10 @@ nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
|
||||
return validatedConvert(syncFences);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions) {
|
||||
return validatedConvert(extensions);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
|
||||
if (!std::all_of(vec.begin(), vec.end(),
|
||||
[](uint32_t v) { return v <= std::numeric_limits<int32_t>::max(); })) {
|
||||
|
||||
42
neuralnetworks/utils/adapter/aidl/Android.bp
Normal file
42
neuralnetworks/utils/adapter/aidl/Android.bp
Normal file
@@ -0,0 +1,42 @@
|
||||
//
|
||||
// Copyright (C) 2021 The Android Open Source Project
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package {
|
||||
// See: http://go/android-license-faq
|
||||
// A large-scale-change added 'default_applicable_licenses' to import
|
||||
// all of the 'license_kinds' from "hardware_interfaces_license"
|
||||
// to get the below license kinds:
|
||||
// SPDX-license-identifier-Apache-2.0
|
||||
default_applicable_licenses: ["hardware_interfaces_license"],
|
||||
}
|
||||
|
||||
cc_library_static {
|
||||
name: "neuralnetworks_utils_hal_adapter_aidl",
|
||||
defaults: [
|
||||
"neuralnetworks_use_latest_utils_hal_aidl",
|
||||
"neuralnetworks_utils_defaults",
|
||||
],
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal/aidl/"],
|
||||
export_include_dirs: ["include"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
],
|
||||
shared_libs: [
|
||||
"libbinder_ndk",
|
||||
],
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across AIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
|
||||
/**
|
||||
* A self-contained unit of work to be executed.
|
||||
*/
|
||||
using Task = std::function<void()>;
|
||||
|
||||
/**
|
||||
* A type-erased executor which executes a task asynchronously.
|
||||
*
|
||||
* This executor is also provided an optional deadline for when the caller expects is the upper
|
||||
* bound for the amount of time to complete the task. If needed, the Executor can retrieve the
|
||||
* Application ID (Android User ID) by calling AIBinder_getCallingUid in android/binder_ibinder.h.
|
||||
*/
|
||||
using Executor = std::function<void(Task, ::android::nn::OptionalTimePoint)>;
|
||||
|
||||
/**
|
||||
* Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
|
||||
*
|
||||
* The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
|
||||
* must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
|
||||
*
|
||||
* @param device NNAPI canonical IDevice interface object to be adapted.
|
||||
* @param executor Type-erased executor to handle executing tasks asynchronously.
|
||||
* @return AIDL NN HAL IDevice interface object.
|
||||
*/
|
||||
std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor);
|
||||
|
||||
/**
|
||||
* Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
|
||||
*
|
||||
* The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
|
||||
* must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
|
||||
*
|
||||
* This function uses a default executor, which will execute tasks from a detached thread.
|
||||
*
|
||||
* @param device NNAPI canonical IDevice interface object to be adapted.
|
||||
* @return AIDL NN HAL IDevice interface object.
|
||||
*/
|
||||
std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device);
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Memory.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across AIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
|
||||
// Class that adapts nn::IBuffer to BnBuffer.
|
||||
class Buffer : public BnBuffer {
|
||||
public:
|
||||
explicit Buffer(::android::nn::SharedBuffer buffer);
|
||||
|
||||
ndk::ScopedAStatus copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) override;
|
||||
ndk::ScopedAStatus copyTo(const Memory& dst) override;
|
||||
|
||||
private:
|
||||
const ::android::nn::SharedBuffer kBuffer;
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across AIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
|
||||
// Class that adapts nn::Burst to BnBurst.
|
||||
class Burst : public BnBurst {
|
||||
public:
|
||||
// Precondition: burst != nullptr
|
||||
explicit Burst(::android::nn::SharedBurst burst);
|
||||
|
||||
ndk::ScopedAStatus executeSynchronously(const Request& request,
|
||||
const std::vector<int64_t>& memoryIdentifierTokens,
|
||||
bool measureTiming, int64_t deadlineNs,
|
||||
int64_t loopTimeoutDurationNs,
|
||||
ExecutionResult* executionResult) override;
|
||||
ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override;
|
||||
|
||||
class ThreadSafeMemoryCache {
|
||||
public:
|
||||
using Value =
|
||||
std::pair<::android::nn::SharedMemory, ::android::nn::IBurst::OptionalCacheHold>;
|
||||
|
||||
Value add(int64_t token, const ::android::nn::SharedMemory& memory,
|
||||
const ::android::nn::IBurst& burst) const;
|
||||
void remove(int64_t token) const;
|
||||
|
||||
private:
|
||||
mutable std::mutex mMutex;
|
||||
mutable std::unordered_map<int64_t, Value> mCache GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
private:
|
||||
const ::android::nn::SharedBurst kBurst;
|
||||
const ThreadSafeMemoryCache kMemoryCache;
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
|
||||
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
|
||||
|
||||
#include "nnapi/hal/aidl/Adapter.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Extension.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Model.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Priority.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across AIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
|
||||
// Class that adapts nn::IDevice to BnDevice.
|
||||
class Device : public BnDevice {
|
||||
public:
|
||||
Device(::android::nn::SharedDevice device, Executor executor);
|
||||
|
||||
ndk::ScopedAStatus allocate(const BufferDesc& desc,
|
||||
const std::vector<IPreparedModelParcel>& preparedModels,
|
||||
const std::vector<BufferRole>& inputRoles,
|
||||
const std::vector<BufferRole>& outputRoles,
|
||||
DeviceBuffer* buffer) override;
|
||||
ndk::ScopedAStatus getCapabilities(Capabilities* capabilities) override;
|
||||
ndk::ScopedAStatus getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) override;
|
||||
ndk::ScopedAStatus getSupportedExtensions(std::vector<Extension>* extensions) override;
|
||||
ndk::ScopedAStatus getSupportedOperations(const Model& model,
|
||||
std::vector<bool>* supported) override;
|
||||
ndk::ScopedAStatus getType(DeviceType* deviceType) override;
|
||||
ndk::ScopedAStatus getVersionString(std::string* version) override;
|
||||
ndk::ScopedAStatus prepareModel(
|
||||
const Model& model, ExecutionPreference preference, Priority priority,
|
||||
int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
|
||||
const std::vector<uint8_t>& token,
|
||||
const std::shared_ptr<IPreparedModelCallback>& callback) override;
|
||||
ndk::ScopedAStatus prepareModelFromCache(
|
||||
int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
|
||||
const std::vector<uint8_t>& token,
|
||||
const std::shared_ptr<IPreparedModelCallback>& callback) override;
|
||||
|
||||
protected:
|
||||
const ::android::nn::SharedDevice kDevice;
|
||||
const Executor kExecutor;
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
|
||||
|
||||
#include "nnapi/hal/aidl/Adapter.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IBurst.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across AIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
|
||||
// Class that adapts nn::IPreparedModel to BnPreparedModel.
|
||||
class PreparedModel : public BnPreparedModel {
|
||||
public:
|
||||
explicit PreparedModel(::android::nn::SharedPreparedModel preparedModel);
|
||||
|
||||
ndk::ScopedAStatus executeSynchronously(const Request& request, bool measureTiming,
|
||||
int64_t deadlineNs, int64_t loopTimeoutDurationNs,
|
||||
ExecutionResult* executionResult) override;
|
||||
ndk::ScopedAStatus executeFenced(const Request& request,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& waitFor,
|
||||
bool measureTiming, int64_t deadlineNs,
|
||||
int64_t loopTimeoutDurationNs, int64_t durationNs,
|
||||
FencedExecutionResult* executionResult) override;
|
||||
ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override;
|
||||
|
||||
::android::nn::SharedPreparedModel getUnderlyingPreparedModel() const;
|
||||
|
||||
protected:
|
||||
const ::android::nn::SharedPreparedModel kPreparedModel;
|
||||
};
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
|
||||
46
neuralnetworks/utils/adapter/aidl/src/Adapter.cpp
Normal file
46
neuralnetworks/utils/adapter/aidl/src/Adapter.cpp
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Adapter.h"
|
||||
|
||||
#include "Device.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
|
||||
#include <android/binder_interface_utils.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
|
||||
// lifetimes across processes and for protecting asynchronous calls across AIDL.
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
|
||||
std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor) {
|
||||
return ndk::SharedRefBase::make<Device>(std::move(device), std::move(executor));
|
||||
}
|
||||
|
||||
std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device) {
|
||||
Executor defaultExecutor = [](Task task, ::android::nn::OptionalTimePoint /*deadline*/) {
|
||||
std::thread(std::move(task)).detach();
|
||||
};
|
||||
return adapt(std::move(device), std::move(defaultExecutor));
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
88
neuralnetworks/utils/adapter/aidl/src/Buffer.cpp
Normal file
88
neuralnetworks/utils/adapter/aidl/src/Buffer.cpp
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Buffer.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Memory.h>
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
namespace {
|
||||
|
||||
template <typename Type>
|
||||
auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
|
||||
auto result = nn::convert(object);
|
||||
if (!result.has_value()) {
|
||||
result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<uint32_t>> inputToUnsigned(const std::vector<int32_t>& dims) {
|
||||
auto result = nn::toUnsigned(dims);
|
||||
if (!result.has_value()) {
|
||||
result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> copyTo(const nn::IBuffer& buffer, const Memory& dst) {
|
||||
const auto nnDst = NN_TRY(convertInput(dst));
|
||||
return buffer.copyTo(nnDst);
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> copyFrom(const nn::IBuffer& buffer, const Memory& src,
|
||||
const std::vector<int32_t>& dimensions) {
|
||||
const auto nnSrc = NN_TRY(convertInput(src));
|
||||
const auto nnDims = NN_TRY(inputToUnsigned(dimensions));
|
||||
return buffer.copyFrom(nnSrc, nnDims);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Buffer::Buffer(nn::SharedBuffer buffer) : kBuffer(std::move(buffer)) {
|
||||
CHECK(kBuffer != nullptr);
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Buffer::copyTo(const Memory& dst) {
|
||||
const auto result = adapter::copyTo(*kBuffer, dst);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Buffer::copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) {
|
||||
const auto result = adapter::copyFrom(*kBuffer, src, dimensions);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
179
neuralnetworks/utils/adapter/aidl/src/Burst.cpp
Normal file
179
neuralnetworks/utils/adapter/aidl/src/Burst.cpp
Normal file
@@ -0,0 +1,179 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Burst.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
#include <nnapi/hal/aidl/Utils.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
namespace {
|
||||
|
||||
using Value = Burst::ThreadSafeMemoryCache::Value;
|
||||
|
||||
template <typename Type>
|
||||
auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
|
||||
auto result = nn::convert(object);
|
||||
if (!result.has_value()) {
|
||||
result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::Duration makeDuration(int64_t durationNs) {
|
||||
return nn::Duration(std::chrono::nanoseconds(durationNs));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::OptionalDuration> makeOptionalDuration(int64_t durationNs) {
|
||||
if (durationNs < -1) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid duration " << durationNs;
|
||||
}
|
||||
return durationNs < 0 ? nn::OptionalDuration{} : makeDuration(durationNs);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
|
||||
if (durationNs < -1) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
|
||||
}
|
||||
return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
|
||||
}
|
||||
|
||||
std::vector<nn::IBurst::OptionalCacheHold> ensureAllMemoriesAreCached(
|
||||
nn::Request* request, const std::vector<int64_t>& memoryIdentifierTokens,
|
||||
const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache) {
|
||||
std::vector<nn::IBurst::OptionalCacheHold> holds;
|
||||
holds.reserve(memoryIdentifierTokens.size());
|
||||
|
||||
for (size_t i = 0; i < memoryIdentifierTokens.size(); ++i) {
|
||||
const auto& pool = request->pools[i];
|
||||
const auto token = memoryIdentifierTokens[i];
|
||||
constexpr int64_t kNoToken = -1;
|
||||
if (token == kNoToken || !std::holds_alternative<nn::SharedMemory>(pool)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto& memory = std::get<nn::SharedMemory>(pool);
|
||||
auto [storedMemory, hold] = cache.add(token, memory, burst);
|
||||
|
||||
request->pools[i] = std::move(storedMemory);
|
||||
holds.push_back(std::move(hold));
|
||||
}
|
||||
|
||||
return holds;
|
||||
}
|
||||
|
||||
nn::ExecutionResult<ExecutionResult> executeSynchronously(
|
||||
const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache, const Request& request,
|
||||
const std::vector<int64_t>& memoryIdentifierTokens, bool measureTiming, int64_t deadlineNs,
|
||||
int64_t loopTimeoutDurationNs) {
|
||||
if (request.pools.size() != memoryIdentifierTokens.size()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "request.pools.size() != memoryIdentifierTokens.size()";
|
||||
}
|
||||
if (!std::all_of(memoryIdentifierTokens.begin(), memoryIdentifierTokens.end(),
|
||||
[](int64_t token) { return token >= -1; })) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid memoryIdentifierTokens";
|
||||
}
|
||||
|
||||
auto nnRequest = NN_TRY(convertInput(request));
|
||||
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
|
||||
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
|
||||
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
|
||||
|
||||
const auto hold = ensureAllMemoriesAreCached(&nnRequest, memoryIdentifierTokens, burst, cache);
|
||||
|
||||
const auto result =
|
||||
burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
|
||||
|
||||
if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
|
||||
const auto& [message, code, outputShapes] = result.error();
|
||||
return ExecutionResult{.outputSufficientSize = false,
|
||||
.outputShapes = utils::convert(outputShapes).value(),
|
||||
.timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
|
||||
}
|
||||
|
||||
const auto& [outputShapes, timing] = NN_TRY(result);
|
||||
return ExecutionResult{.outputSufficientSize = true,
|
||||
.outputShapes = utils::convert(outputShapes).value(),
|
||||
.timing = utils::convert(timing).value()};
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Value Burst::ThreadSafeMemoryCache::add(int64_t token, const nn::SharedMemory& memory,
|
||||
const nn::IBurst& burst) const {
|
||||
std::lock_guard guard(mMutex);
|
||||
if (const auto it = mCache.find(token); it != mCache.end()) {
|
||||
return it->second;
|
||||
}
|
||||
auto hold = burst.cacheMemory(memory);
|
||||
auto [it, _] = mCache.emplace(token, std::make_pair(memory, std::move(hold)));
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void Burst::ThreadSafeMemoryCache::remove(int64_t token) const {
|
||||
std::lock_guard guard(mMutex);
|
||||
mCache.erase(token);
|
||||
}
|
||||
|
||||
Burst::Burst(nn::SharedBurst burst) : kBurst(std::move(burst)) {
|
||||
CHECK(kBurst != nullptr);
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Burst::executeSynchronously(const Request& request,
|
||||
const std::vector<int64_t>& memoryIdentifierTokens,
|
||||
bool measureTiming, int64_t deadlineNs,
|
||||
int64_t loopTimeoutDurationNs,
|
||||
ExecutionResult* executionResult) {
|
||||
auto result =
|
||||
adapter::executeSynchronously(*kBurst, kMemoryCache, request, memoryIdentifierTokens,
|
||||
measureTiming, deadlineNs, loopTimeoutDurationNs);
|
||||
if (!result.has_value()) {
|
||||
auto [message, code, _] = std::move(result).error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
*executionResult = std::move(result).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Burst::releaseMemoryResource(int64_t memoryIdentifierToken) {
|
||||
if (memoryIdentifierToken < -1) {
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(ErrorStatus::INVALID_ARGUMENT),
|
||||
"Invalid memoryIdentifierToken");
|
||||
}
|
||||
kMemoryCache.remove(memoryIdentifierToken);
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
304
neuralnetworks/utils/adapter/aidl/src/Device.cpp
Normal file
304
neuralnetworks/utils/adapter/aidl/src/Device.cpp
Normal file
@@ -0,0 +1,304 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Device.h"
|
||||
|
||||
#include "Adapter.h"
|
||||
#include "Buffer.h"
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Extension.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Model.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Priority.h>
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <android/binder_interface_utils.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
namespace {
|
||||
|
||||
template <typename Type>
|
||||
auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
|
||||
auto result = nn::convert(object);
|
||||
if (!result.has_value()) {
|
||||
result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::Duration makeDuration(int64_t durationNs) {
|
||||
return nn::Duration(std::chrono::nanoseconds(durationNs));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
|
||||
if (durationNs < -1) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
|
||||
}
|
||||
return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::CacheToken> convertCacheToken(const std::vector<uint8_t>& token) {
|
||||
nn::CacheToken nnToken;
|
||||
if (token.size() != nnToken.size()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid token";
|
||||
}
|
||||
std::copy(token.begin(), token.end(), nnToken.begin());
|
||||
return nnToken;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> downcast(const IPreparedModelParcel& preparedModel) {
|
||||
if (preparedModel.preparedModel == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr";
|
||||
}
|
||||
if (preparedModel.preparedModel->isRemote()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models";
|
||||
}
|
||||
|
||||
// This static_cast is safe because adapter::PreparedModel is the only class that implements
|
||||
// the IPreparedModel interface in the adapter service code.
|
||||
const auto* casted = static_cast<const PreparedModel*>(preparedModel.preparedModel.get());
|
||||
return casted->getUnderlyingPreparedModel();
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<nn::SharedPreparedModel>> downcastAll(
|
||||
const std::vector<IPreparedModelParcel>& preparedModels) {
|
||||
std::vector<nn::SharedPreparedModel> canonical;
|
||||
canonical.reserve(preparedModels.size());
|
||||
for (const auto& preparedModel : preparedModels) {
|
||||
canonical.push_back(NN_TRY(downcast(preparedModel)));
|
||||
}
|
||||
return canonical;
|
||||
}
|
||||
|
||||
nn::GeneralResult<DeviceBuffer> allocate(const nn::IDevice& device, const BufferDesc& desc,
|
||||
const std::vector<IPreparedModelParcel>& preparedModels,
|
||||
const std::vector<BufferRole>& inputRoles,
|
||||
const std::vector<BufferRole>& outputRoles) {
|
||||
auto nnDesc = NN_TRY(convertInput(desc));
|
||||
auto nnPreparedModels = NN_TRY(downcastAll(preparedModels));
|
||||
auto nnInputRoles = NN_TRY(convertInput(inputRoles));
|
||||
auto nnOutputRoles = NN_TRY(convertInput(outputRoles));
|
||||
|
||||
auto buffer = NN_TRY(device.allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles));
|
||||
CHECK(buffer != nullptr);
|
||||
|
||||
const nn::Request::MemoryDomainToken token = buffer->getToken();
|
||||
auto aidlBuffer = ndk::SharedRefBase::make<Buffer>(std::move(buffer));
|
||||
return DeviceBuffer{.buffer = std::move(aidlBuffer), .token = static_cast<int32_t>(token)};
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> getSupportedOperations(const nn::IDevice& device,
|
||||
const Model& model) {
|
||||
const auto nnModel = NN_TRY(convertInput(model));
|
||||
return device.getSupportedOperations(nnModel);
|
||||
}
|
||||
|
||||
using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
|
||||
|
||||
std::shared_ptr<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) {
|
||||
if (preparedModel == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
return ndk::SharedRefBase::make<PreparedModel>(std::move(preparedModel));
|
||||
}
|
||||
|
||||
void notify(IPreparedModelCallback* callback, PrepareModelResult result) {
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, status] = result.error();
|
||||
LOG(ERROR) << message;
|
||||
const auto aidlCode = utils::convert(status).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
callback->notify(aidlCode, nullptr);
|
||||
} else {
|
||||
auto preparedModel = std::move(result).value();
|
||||
auto aidlPreparedModel = adaptPreparedModel(std::move(preparedModel));
|
||||
callback->notify(ErrorStatus::NONE, std::move(aidlPreparedModel));
|
||||
}
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
|
||||
const Model& model, ExecutionPreference preference,
|
||||
Priority priority, int64_t deadlineNs,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& modelCache,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
|
||||
const std::vector<uint8_t>& token,
|
||||
const std::shared_ptr<IPreparedModelCallback>& callback) {
|
||||
if (callback.get() == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
|
||||
}
|
||||
|
||||
auto nnModel = NN_TRY(convertInput(model));
|
||||
const auto nnPreference = NN_TRY(convertInput(preference));
|
||||
const auto nnPriority = NN_TRY(convertInput(priority));
|
||||
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
|
||||
auto nnModelCache = NN_TRY(convertInput(modelCache));
|
||||
auto nnDataCache = NN_TRY(convertInput(dataCache));
|
||||
const auto nnToken = NN_TRY(convertCacheToken(token));
|
||||
|
||||
Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
|
||||
nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
|
||||
nnToken, callback] {
|
||||
auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
|
||||
nnModelCache, nnDataCache, nnToken);
|
||||
notify(callback.get(), std::move(result));
|
||||
};
|
||||
executor(std::move(task), nnDeadline);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> prepareModelFromCache(
|
||||
const nn::SharedDevice& device, const Executor& executor, int64_t deadlineNs,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& modelCache,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
|
||||
const std::shared_ptr<IPreparedModelCallback>& callback) {
|
||||
if (callback.get() == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
|
||||
}
|
||||
|
||||
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
|
||||
auto nnModelCache = NN_TRY(convertInput(modelCache));
|
||||
auto nnDataCache = NN_TRY(convertInput(dataCache));
|
||||
const auto nnToken = NN_TRY(convertCacheToken(token));
|
||||
|
||||
auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
|
||||
nnDataCache = std::move(nnDataCache), nnToken, callback] {
|
||||
auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
|
||||
notify(callback.get(), std::move(result));
|
||||
};
|
||||
executor(std::move(task), nnDeadline);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Device::Device(::android::nn::SharedDevice device, Executor executor)
|
||||
: kDevice(std::move(device)), kExecutor(std::move(executor)) {
|
||||
CHECK(kDevice != nullptr);
|
||||
CHECK(kExecutor != nullptr);
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::allocate(const BufferDesc& desc,
|
||||
const std::vector<IPreparedModelParcel>& preparedModels,
|
||||
const std::vector<BufferRole>& inputRoles,
|
||||
const std::vector<BufferRole>& outputRoles,
|
||||
DeviceBuffer* buffer) {
|
||||
auto result = adapter::allocate(*kDevice, desc, preparedModels, inputRoles, outputRoles);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
*buffer = std::move(result).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::getCapabilities(Capabilities* capabilities) {
|
||||
*capabilities = utils::convert(kDevice->getCapabilities()).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) {
|
||||
const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded();
|
||||
*numberOfCacheFiles = NumberOfCacheFiles{.numModelCache = static_cast<int32_t>(numModelCache),
|
||||
.numDataCache = static_cast<int32_t>(numDataCache)};
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::getSupportedExtensions(std::vector<Extension>* extensions) {
|
||||
*extensions = utils::convert(kDevice->getSupportedExtensions()).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::getSupportedOperations(const Model& model,
|
||||
std::vector<bool>* supported) {
|
||||
auto result = adapter::getSupportedOperations(*kDevice, model);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
*supported = std::move(result).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::getType(DeviceType* deviceType) {
|
||||
*deviceType = utils::convert(kDevice->getType()).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::getVersionString(std::string* version) {
|
||||
*version = kDevice->getVersionString();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::prepareModel(const Model& model, ExecutionPreference preference,
|
||||
Priority priority, int64_t deadlineNs,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& modelCache,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& dataCache,
|
||||
const std::vector<uint8_t>& token,
|
||||
const std::shared_ptr<IPreparedModelCallback>& callback) {
|
||||
const auto result = adapter::prepareModel(kDevice, kExecutor, model, preference, priority,
|
||||
deadlineNs, modelCache, dataCache, token, callback);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
callback->notify(aidlCode, nullptr);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus Device::prepareModelFromCache(
|
||||
int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
|
||||
const std::shared_ptr<IPreparedModelCallback>& callback) {
|
||||
const auto result = adapter::prepareModelFromCache(kDevice, kExecutor, deadlineNs, modelCache,
|
||||
dataCache, token, callback);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
callback->notify(aidlCode, nullptr);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
225
neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
Normal file
225
neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
Normal file
@@ -0,0 +1,225 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include "Burst.h"
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IBurst.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/SharedMemory.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
#include <nnapi/hal/aidl/Utils.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace aidl::android::hardware::neuralnetworks::adapter {
|
||||
namespace {
|
||||
|
||||
class FencedExecutionCallback : public BnFencedExecutionCallback {
|
||||
public:
|
||||
FencedExecutionCallback(nn::ExecuteFencedInfoCallback callback)
|
||||
: kCallback(std::move(callback)) {}
|
||||
|
||||
ndk::ScopedAStatus getExecutionInfo(Timing* timingLaunched, Timing* timingFenced,
|
||||
ErrorStatus* errorStatus) override {
|
||||
const auto result = kCallback();
|
||||
if (result.ok()) {
|
||||
const auto& [nnTimingLaunched, nnTimingFenced] = result.value();
|
||||
*timingLaunched = utils::convert(nnTimingLaunched).value();
|
||||
*timingFenced = utils::convert(nnTimingFenced).value();
|
||||
*errorStatus = ErrorStatus::NONE;
|
||||
} else {
|
||||
constexpr auto kNoTiming = Timing{.timeOnDeviceNs = -1, .timeInDriverNs = -1};
|
||||
const auto& [message, code] = result.error();
|
||||
LOG(ERROR) << "getExecutionInfo failed with " << code << ": " << message;
|
||||
const auto aidlStatus = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
*timingLaunched = kNoTiming;
|
||||
*timingFenced = kNoTiming;
|
||||
*errorStatus = aidlStatus;
|
||||
}
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
private:
|
||||
const nn::ExecuteFencedInfoCallback kCallback;
|
||||
};
|
||||
|
||||
template <typename Type>
|
||||
auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
|
||||
auto result = nn::convert(object);
|
||||
if (!result.has_value()) {
|
||||
result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
|
||||
const std::vector<ndk::ScopedFileDescriptor>& waitFor) {
|
||||
auto handles = NN_TRY(convertInput(waitFor));
|
||||
|
||||
constexpr auto valid = [](const nn::SharedHandle& handle) {
|
||||
return handle != nullptr && handle->ok();
|
||||
};
|
||||
if (!std::all_of(handles.begin(), handles.end(), valid)) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid sync fence";
|
||||
}
|
||||
|
||||
std::vector<nn::SyncFence> syncFences;
|
||||
syncFences.reserve(waitFor.size());
|
||||
for (auto& handle : handles) {
|
||||
syncFences.push_back(nn::SyncFence::create(std::move(handle)).value());
|
||||
}
|
||||
return syncFences;
|
||||
}
|
||||
|
||||
nn::Duration makeDuration(int64_t durationNs) {
|
||||
return nn::Duration(std::chrono::nanoseconds(durationNs));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::OptionalDuration> makeOptionalDuration(int64_t durationNs) {
|
||||
if (durationNs < -1) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid duration " << durationNs;
|
||||
}
|
||||
return durationNs < 0 ? nn::OptionalDuration{} : makeDuration(durationNs);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
|
||||
if (durationNs < -1) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
|
||||
}
|
||||
return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IPreparedModel& preparedModel,
|
||||
const Request& request,
|
||||
bool measureTiming, int64_t deadlineNs,
|
||||
int64_t loopTimeoutDurationNs) {
|
||||
const auto nnRequest = NN_TRY(convertInput(request));
|
||||
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
|
||||
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
|
||||
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
|
||||
|
||||
const auto result =
|
||||
preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
|
||||
|
||||
if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
|
||||
const auto& [message, code, outputShapes] = result.error();
|
||||
LOG(ERROR) << "executeSynchronously failed with " << code << ": " << message;
|
||||
return ExecutionResult{.outputSufficientSize = false,
|
||||
.outputShapes = utils::convert(outputShapes).value(),
|
||||
.timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
|
||||
}
|
||||
|
||||
const auto& [outputShapes, timing] = NN_TRY(result);
|
||||
return ExecutionResult{.outputSufficientSize = true,
|
||||
.outputShapes = utils::convert(outputShapes).value(),
|
||||
.timing = utils::convert(timing).value()};
|
||||
}
|
||||
|
||||
nn::GeneralResult<FencedExecutionResult> executeFenced(
|
||||
const nn::IPreparedModel& preparedModel, const Request& request,
|
||||
const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measureTiming,
|
||||
int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs) {
|
||||
const auto nnRequest = NN_TRY(convertInput(request));
|
||||
const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
|
||||
const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
|
||||
const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
|
||||
const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
|
||||
const auto nnDuration = NN_TRY(makeOptionalDuration(durationNs));
|
||||
|
||||
auto [syncFence, executeFencedInfoCallback] = NN_TRY(preparedModel.executeFenced(
|
||||
nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration));
|
||||
|
||||
ndk::ScopedFileDescriptor fileDescriptor;
|
||||
if (syncFence.hasFd()) {
|
||||
auto uniqueFd = NN_TRY(nn::dupFd(syncFence.getFd()));
|
||||
fileDescriptor = ndk::ScopedFileDescriptor(uniqueFd.release());
|
||||
}
|
||||
|
||||
return FencedExecutionResult{.callback = ndk::SharedRefBase::make<FencedExecutionCallback>(
|
||||
std::move(executeFencedInfoCallback)),
|
||||
.syncFence = std::move(fileDescriptor)};
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel)
|
||||
: kPreparedModel(std::move(preparedModel)) {
|
||||
CHECK(kPreparedModel != nullptr);
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus PreparedModel::executeSynchronously(const Request& request, bool measureTiming,
|
||||
int64_t deadlineNs,
|
||||
int64_t loopTimeoutDurationNs,
|
||||
ExecutionResult* executionResult) {
|
||||
auto result = adapter::executeSynchronously(*kPreparedModel, request, measureTiming, deadlineNs,
|
||||
loopTimeoutDurationNs);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code, _] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
*executionResult = std::move(result).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus PreparedModel::executeFenced(
|
||||
const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
|
||||
bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
|
||||
FencedExecutionResult* executionResult) {
|
||||
auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, measureTiming,
|
||||
deadlineNs, loopTimeoutDurationNs, durationNs);
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
*executionResult = std::move(result).value();
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
ndk::ScopedAStatus PreparedModel::configureExecutionBurst(std::shared_ptr<IBurst>* burst) {
|
||||
auto result = kPreparedModel->configureExecutionBurst();
|
||||
if (!result.has_value()) {
|
||||
const auto& [message, code] = result.error();
|
||||
const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
|
||||
return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
|
||||
static_cast<int32_t>(aidlCode), message.c_str());
|
||||
}
|
||||
*burst = ndk::SharedRefBase::make<Burst>(std::move(result).value());
|
||||
return ndk::ScopedAStatus::ok();
|
||||
}
|
||||
|
||||
nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {
|
||||
return kPreparedModel;
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::adapter
|
||||
@@ -20,7 +20,6 @@
|
||||
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <sys/types.h>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
@@ -37,10 +36,12 @@ using Task = std::function<void()>;
|
||||
/**
|
||||
* A type-erased executor which executes a task asynchronously.
|
||||
*
|
||||
* This executor is also provided with an Application ID (Android User ID) and an optional deadline
|
||||
* for when the caller expects is the upper bound for the amount of time to complete the task.
|
||||
* This executor is also provided an optional deadline for when the caller expects is the upper
|
||||
* bound for the amount of time to complete the task. If needed, the Executor can retrieve the
|
||||
* Application ID (Android User ID) by calling IPCThreadState::self()->getCallingUid() in
|
||||
* hwbinder/IPCThreadState.h.
|
||||
*/
|
||||
using Executor = std::function<void(Task, uid_t, nn::OptionalTimePoint)>;
|
||||
using Executor = std::function<void(Task, nn::OptionalTimePoint)>;
|
||||
|
||||
/**
|
||||
* Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
|
||||
@@ -39,7 +39,7 @@ namespace android::hardware::neuralnetworks::adapter {
|
||||
// Class that adapts nn::IPreparedModel to V1_3::IPreparedModel.
|
||||
class PreparedModel final : public V1_3::IPreparedModel {
|
||||
public:
|
||||
PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId);
|
||||
PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor);
|
||||
|
||||
Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
|
||||
const sp<V1_0::IExecutionCallback>& callback) override;
|
||||
@@ -71,7 +71,6 @@ class PreparedModel final : public V1_3::IPreparedModel {
|
||||
private:
|
||||
const nn::SharedPreparedModel kPreparedModel;
|
||||
const Executor kExecutor;
|
||||
const uid_t kUserId;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::adapter
|
||||
@@ -21,7 +21,6 @@
|
||||
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@@ -37,7 +36,7 @@ sp<V1_3::IDevice> adapt(nn::SharedDevice device, Executor executor) {
|
||||
}
|
||||
|
||||
sp<V1_3::IDevice> adapt(nn::SharedDevice device) {
|
||||
Executor defaultExecutor = [](Task task, uid_t /*uid*/, nn::OptionalTimePoint /*deadline*/) {
|
||||
Executor defaultExecutor = [](Task task, nn::OptionalTimePoint /*deadline*/) {
|
||||
std::thread(std::move(task)).detach();
|
||||
};
|
||||
return adapt(std::move(device), std::move(defaultExecutor));
|
||||
@@ -28,7 +28,6 @@
|
||||
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <hwbinder/IPCThreadState.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
@@ -43,7 +42,6 @@
|
||||
#include <nnapi/hal/1.2/Utils.h>
|
||||
#include <nnapi/hal/1.3/Conversions.h>
|
||||
#include <nnapi/hal/1.3/Utils.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
@@ -64,12 +62,11 @@ auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>
|
||||
|
||||
using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
|
||||
|
||||
sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor,
|
||||
uid_t userId) {
|
||||
sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) {
|
||||
if (preparedModel == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor), userId);
|
||||
return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor));
|
||||
}
|
||||
|
||||
void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status,
|
||||
@@ -108,15 +105,14 @@ void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status,
|
||||
}
|
||||
|
||||
template <typename CallbackType>
|
||||
void notify(CallbackType* callback, PrepareModelResult result, Executor executor, uid_t userId) {
|
||||
void notify(CallbackType* callback, PrepareModelResult result, Executor executor) {
|
||||
if (!result.has_value()) {
|
||||
const auto [message, status] = std::move(result).error();
|
||||
LOG(ERROR) << message;
|
||||
notify(callback, status, nullptr);
|
||||
} else {
|
||||
auto preparedModel = std::move(result).value();
|
||||
auto hidlPreparedModel =
|
||||
adaptPreparedModel(std::move(preparedModel), std::move(executor), userId);
|
||||
auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor));
|
||||
notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel));
|
||||
}
|
||||
}
|
||||
@@ -137,13 +133,12 @@ nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Execu
|
||||
|
||||
auto nnModel = NN_TRY(convertInput(model));
|
||||
|
||||
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
|
||||
Task task = [device, nnModel = std::move(nnModel), userId, executor, callback] {
|
||||
Task task = [device, nnModel = std::move(nnModel), executor, callback] {
|
||||
auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
|
||||
nn::Priority::DEFAULT, {}, {}, {}, {});
|
||||
notify(callback.get(), std::move(result), executor, userId);
|
||||
notify(callback.get(), std::move(result), executor);
|
||||
};
|
||||
executor(std::move(task), userId, {});
|
||||
executor(std::move(task), {});
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -159,13 +154,12 @@ nn::GeneralResult<void> prepareModel_1_1(const nn::SharedDevice& device, const E
|
||||
auto nnModel = NN_TRY(convertInput(model));
|
||||
const auto nnPreference = NN_TRY(convertInput(preference));
|
||||
|
||||
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
|
||||
Task task = [device, nnModel = std::move(nnModel), nnPreference, userId, executor, callback] {
|
||||
Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
|
||||
auto result =
|
||||
device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {});
|
||||
notify(callback.get(), std::move(result), executor, userId);
|
||||
notify(callback.get(), std::move(result), executor);
|
||||
};
|
||||
executor(std::move(task), userId, {});
|
||||
executor(std::move(task), {});
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -187,15 +181,14 @@ nn::GeneralResult<void> prepareModel_1_2(const nn::SharedDevice& device, const E
|
||||
auto nnDataCache = NN_TRY(convertInput(dataCache));
|
||||
const auto nnToken = nn::CacheToken(token);
|
||||
|
||||
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
|
||||
Task task = [device, nnModel = std::move(nnModel), nnPreference,
|
||||
nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
|
||||
nnToken, userId, executor, callback] {
|
||||
nnToken, executor, callback] {
|
||||
auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
|
||||
nnModelCache, nnDataCache, nnToken);
|
||||
notify(callback.get(), std::move(result), executor, userId);
|
||||
notify(callback.get(), std::move(result), executor);
|
||||
};
|
||||
executor(std::move(task), userId, {});
|
||||
executor(std::move(task), {});
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -218,15 +211,14 @@ nn::GeneralResult<void> prepareModel_1_3(
|
||||
auto nnDataCache = NN_TRY(convertInput(dataCache));
|
||||
const auto nnToken = nn::CacheToken(token);
|
||||
|
||||
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
|
||||
Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
|
||||
nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
|
||||
nnToken, userId, executor, callback] {
|
||||
nnToken, executor, callback] {
|
||||
auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
|
||||
nnModelCache, nnDataCache, nnToken);
|
||||
notify(callback.get(), std::move(result), executor, userId);
|
||||
notify(callback.get(), std::move(result), executor);
|
||||
};
|
||||
executor(std::move(task), userId, nnDeadline);
|
||||
executor(std::move(task), nnDeadline);
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -245,13 +237,12 @@ nn::GeneralResult<void> prepareModelFromCache(const nn::SharedDevice& device,
|
||||
auto nnDataCache = NN_TRY(convertInput(dataCache));
|
||||
const auto nnToken = nn::CacheToken(token);
|
||||
|
||||
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
|
||||
Task task = [device, nnModelCache = std::move(nnModelCache),
|
||||
nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] {
|
||||
nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
|
||||
auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken);
|
||||
notify(callback.get(), std::move(result), executor, userId);
|
||||
notify(callback.get(), std::move(result), executor);
|
||||
};
|
||||
executor(std::move(task), userId, {});
|
||||
executor(std::move(task), {});
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -270,13 +261,12 @@ nn::GeneralResult<void> prepareModelFromCache_1_3(
|
||||
auto nnDataCache = NN_TRY(convertInput(dataCache));
|
||||
const auto nnToken = nn::CacheToken(token);
|
||||
|
||||
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
|
||||
auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
|
||||
nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] {
|
||||
nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
|
||||
auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
|
||||
notify(callback.get(), std::move(result), executor, userId);
|
||||
notify(callback.get(), std::move(result), executor);
|
||||
};
|
||||
executor(std::move(task), userId, nnDeadline);
|
||||
executor(std::move(task), nnDeadline);
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -28,7 +28,6 @@
|
||||
#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <hwbinder/IPCThreadState.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
@@ -37,7 +36,6 @@
|
||||
#include <nnapi/hal/1.2/Utils.h>
|
||||
#include <nnapi/hal/1.3/Conversions.h>
|
||||
#include <nnapi/hal/1.3/Utils.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
@@ -145,7 +143,7 @@ void notify(CallbackType* callback, ExecutionResult result) {
|
||||
}
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, uid_t userId,
|
||||
nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel,
|
||||
const Executor& executor, const V1_0::Request& request,
|
||||
const sp<V1_0::IExecutionCallback>& callback) {
|
||||
if (callback.get() == nullptr) {
|
||||
@@ -164,12 +162,12 @@ nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, ui
|
||||
auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {});
|
||||
notify(callback.get(), std::move(result));
|
||||
};
|
||||
executor(std::move(task), userId, {});
|
||||
executor(std::move(task), {});
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel, uid_t userId,
|
||||
nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel,
|
||||
const Executor& executor, const V1_0::Request& request,
|
||||
V1_2::MeasureTiming measure,
|
||||
const sp<V1_2::IExecutionCallback>& callback) {
|
||||
@@ -190,12 +188,12 @@ nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel
|
||||
auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {});
|
||||
notify(callback.get(), std::move(result));
|
||||
};
|
||||
executor(std::move(task), userId, {});
|
||||
executor(std::move(task), {});
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel, uid_t userId,
|
||||
nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel,
|
||||
const Executor& executor, const V1_3::Request& request,
|
||||
V1_2::MeasureTiming measure,
|
||||
const V1_3::OptionalTimePoint& deadline,
|
||||
@@ -222,7 +220,7 @@ nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel
|
||||
preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration);
|
||||
notify(callback.get(), std::move(result));
|
||||
};
|
||||
executor(std::move(task), userId, nnDeadline);
|
||||
executor(std::move(task), nnDeadline);
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -305,8 +303,8 @@ nn::GeneralResult<std::pair<hidl_handle, sp<V1_3::IFencedExecutionCallback>>> ex
|
||||
|
||||
} // namespace
|
||||
|
||||
PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId)
|
||||
: kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)), kUserId(userId) {
|
||||
PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor)
|
||||
: kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)) {
|
||||
CHECK(kPreparedModel != nullptr);
|
||||
CHECK(kExecutor != nullptr);
|
||||
}
|
||||
@@ -317,7 +315,7 @@ nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {
|
||||
|
||||
Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
|
||||
const sp<V1_0::IExecutionCallback>& callback) {
|
||||
auto result = adapter::execute(kPreparedModel, kUserId, kExecutor, request, callback);
|
||||
auto result = adapter::execute(kPreparedModel, kExecutor, request, callback);
|
||||
if (!result.has_value()) {
|
||||
auto [message, code] = std::move(result).error();
|
||||
LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message;
|
||||
@@ -330,8 +328,7 @@ Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
|
||||
Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request,
|
||||
V1_2::MeasureTiming measure,
|
||||
const sp<V1_2::IExecutionCallback>& callback) {
|
||||
auto result =
|
||||
adapter::execute_1_2(kPreparedModel, kUserId, kExecutor, request, measure, callback);
|
||||
auto result = adapter::execute_1_2(kPreparedModel, kExecutor, request, measure, callback);
|
||||
if (!result.has_value()) {
|
||||
auto [message, code] = std::move(result).error();
|
||||
LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message;
|
||||
@@ -346,8 +343,8 @@ Return<V1_3::ErrorStatus> PreparedModel::execute_1_3(
|
||||
const V1_3::OptionalTimePoint& deadline,
|
||||
const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const sp<V1_3::IExecutionCallback>& callback) {
|
||||
auto result = adapter::execute_1_3(kPreparedModel, kUserId, kExecutor, request, measure,
|
||||
deadline, loopTimeoutDuration, callback);
|
||||
auto result = adapter::execute_1_3(kPreparedModel, kExecutor, request, measure, deadline,
|
||||
loopTimeoutDuration, callback);
|
||||
if (!result.has_value()) {
|
||||
auto [message, code] = std::move(result).error();
|
||||
LOG(ERROR) << "adapter::PreparedModel::execute_1_3 failed with " << code << ": " << message;
|
||||
Reference in New Issue
Block a user