mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-02 10:05:19 +00:00
Implement NNAPI canonical interfaces am: 4b276a767b am: e91a56a78f
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1501391 Change-Id: I7d9c31e4e9daaa82eaad9b00e71ba486e0b6e46d
This commit is contained in:
@@ -20,6 +20,7 @@ cc_library_static {
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal/1.0/"],
|
||||
export_include_dirs: ["include"],
|
||||
cflags: ["-Wthread-safety"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
|
||||
67
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
Normal file
67
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
class PreparedModelCallback final : public IPreparedModelCallback,
|
||||
public hal::utils::IProtectedCallback {
|
||||
public:
|
||||
using Data = nn::GeneralResult<nn::SharedPreparedModel>;
|
||||
|
||||
Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
|
||||
|
||||
void notifyAsDeadObject() override;
|
||||
|
||||
Data get();
|
||||
|
||||
private:
|
||||
void notifyInternal(Data result);
|
||||
|
||||
hal::utils::TransferValue<Data> mData;
|
||||
};
|
||||
|
||||
class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
|
||||
public:
|
||||
using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
|
||||
|
||||
Return<void> notify(ErrorStatus status) override;
|
||||
|
||||
void notifyAsDeadObject() override;
|
||||
|
||||
Data get();
|
||||
|
||||
private:
|
||||
void notifyInternal(Data result);
|
||||
|
||||
hal::utils::TransferValue<Data> mData;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H
|
||||
@@ -24,42 +24,44 @@
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
Result<OperandType> convert(const hal::V1_0::OperandType& operandType);
|
||||
Result<OperationType> convert(const hal::V1_0::OperationType& operationType);
|
||||
Result<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime);
|
||||
Result<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus);
|
||||
Result<Capabilities::PerformanceInfo> convert(const hal::V1_0::PerformanceInfo& performanceInfo);
|
||||
Result<Capabilities> convert(const hal::V1_0::Capabilities& capabilities);
|
||||
Result<DataLocation> convert(const hal::V1_0::DataLocation& location);
|
||||
Result<Operand> convert(const hal::V1_0::Operand& operand);
|
||||
Result<Operation> convert(const hal::V1_0::Operation& operation);
|
||||
Result<Model::OperandValues> convert(const hardware::hidl_vec<uint8_t>& operandValues);
|
||||
Result<Memory> convert(const hardware::hidl_memory& memory);
|
||||
Result<Model> convert(const hal::V1_0::Model& model);
|
||||
Result<Request::Argument> convert(const hal::V1_0::RequestArgument& requestArgument);
|
||||
Result<Request> convert(const hal::V1_0::Request& request);
|
||||
Result<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status);
|
||||
GeneralResult<OperandType> convert(const hal::V1_0::OperandType& operandType);
|
||||
GeneralResult<OperationType> convert(const hal::V1_0::OperationType& operationType);
|
||||
GeneralResult<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime);
|
||||
GeneralResult<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus);
|
||||
GeneralResult<Capabilities::PerformanceInfo> convert(
|
||||
const hal::V1_0::PerformanceInfo& performanceInfo);
|
||||
GeneralResult<Capabilities> convert(const hal::V1_0::Capabilities& capabilities);
|
||||
GeneralResult<DataLocation> convert(const hal::V1_0::DataLocation& location);
|
||||
GeneralResult<Operand> convert(const hal::V1_0::Operand& operand);
|
||||
GeneralResult<Operation> convert(const hal::V1_0::Operation& operation);
|
||||
GeneralResult<Model::OperandValues> convert(const hardware::hidl_vec<uint8_t>& operandValues);
|
||||
GeneralResult<Memory> convert(const hardware::hidl_memory& memory);
|
||||
GeneralResult<Model> convert(const hal::V1_0::Model& model);
|
||||
GeneralResult<Request::Argument> convert(const hal::V1_0::RequestArgument& requestArgument);
|
||||
GeneralResult<Request> convert(const hal::V1_0::Request& request);
|
||||
GeneralResult<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status);
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
nn::Result<OperandType> convert(const nn::OperandType& operandType);
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime);
|
||||
nn::Result<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
|
||||
nn::Result<PerformanceInfo> convert(const nn::Capabilities::PerformanceInfo& performanceInfo);
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::Result<DataLocation> convert(const nn::DataLocation& location);
|
||||
nn::Result<Operand> convert(const nn::Operand& operand);
|
||||
nn::Result<Operation> convert(const nn::Operation& operation);
|
||||
nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues);
|
||||
nn::Result<hidl_memory> convert(const nn::Memory& memory);
|
||||
nn::Result<Model> convert(const nn::Model& model);
|
||||
nn::Result<RequestArgument> convert(const nn::Request::Argument& requestArgument);
|
||||
nn::Result<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool);
|
||||
nn::Result<Request> convert(const nn::Request& request);
|
||||
nn::Result<ErrorStatus> convert(const nn::ErrorStatus& status);
|
||||
nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType);
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime);
|
||||
nn::GeneralResult<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
|
||||
nn::GeneralResult<PerformanceInfo> convert(
|
||||
const nn::Capabilities::PerformanceInfo& performanceInfo);
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::GeneralResult<DataLocation> convert(const nn::DataLocation& location);
|
||||
nn::GeneralResult<Operand> convert(const nn::Operand& operand);
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation);
|
||||
nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues);
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory);
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model);
|
||||
nn::GeneralResult<RequestArgument> convert(const nn::Request::Argument& requestArgument);
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool);
|
||||
nn::GeneralResult<Request> convert(const nn::Request& request);
|
||||
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& status);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
|
||||
87
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
Normal file
87
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
class Device final : public nn::IDevice {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
|
||||
sp<V1_0::IDevice> device);
|
||||
|
||||
Device(PrivateConstructorTag tag, std::string name, nn::Capabilities capabilities,
|
||||
sp<V1_0::IDevice> device, hal::utils::DeathHandler deathHandler);
|
||||
|
||||
const std::string& getName() const override;
|
||||
const std::string& getVersionString() const override;
|
||||
nn::Version getFeatureLevel() const override;
|
||||
nn::DeviceType getType() const override;
|
||||
const std::vector<nn::Extension>& getSupportedExtensions() const override;
|
||||
const nn::Capabilities& getCapabilities() const override;
|
||||
std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
|
||||
|
||||
nn::GeneralResult<void> wait() const override;
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> getSupportedOperations(
|
||||
const nn::Model& model) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const override;
|
||||
|
||||
private:
|
||||
const std::string kName;
|
||||
const std::string kVersionString = "UNKNOWN";
|
||||
const std::vector<nn::Extension> kExtensions;
|
||||
const nn::Capabilities kCapabilities;
|
||||
const sp<V1_0::IDevice> kDevice;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
class PreparedModel final : public nn::IPreparedModel {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
|
||||
sp<V1_0::IPreparedModel> preparedModel);
|
||||
|
||||
PreparedModel(PrivateConstructorTag tag, sp<V1_0::IPreparedModel> preparedModel,
|
||||
hal::utils::DeathHandler deathHandler);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
|
||||
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
|
||||
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
private:
|
||||
const sp<V1_0::IPreparedModel> kPreparedModel;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
|
||||
31
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h
Normal file
31
neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <string>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
|
||||
@@ -31,10 +32,14 @@ constexpr auto kVersion = nn::Version::ANDROID_OC_MR1;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<void> validate(const Type& halObject) {
|
||||
const auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeCanonical = nn::convert(halObject);
|
||||
if (!maybeCanonical.has_value()) {
|
||||
return nn::error() << maybeCanonical.error().message;
|
||||
}
|
||||
const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -51,9 +56,14 @@ bool valid(const Type& halObject) {
|
||||
template <typename Type>
|
||||
decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
|
||||
auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeVersion = nn::validate(canonical);
|
||||
if (!maybeVersion.has_value()) {
|
||||
return nn::error() << maybeVersion.error();
|
||||
}
|
||||
const auto version = maybeVersion.value();
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return canonical;
|
||||
}
|
||||
|
||||
97
neuralnetworks/1.0/utils/src/Callbacks.cpp
Normal file
97
neuralnetworks/1.0/utils/src/Callbacks.cpp
Normal file
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "PreparedModel.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
return NN_TRY(utils::PreparedModel::create(preparedModel));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Return<void> PreparedModelCallback::notify(ErrorStatus status,
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
||||
} else if (preparedModel == nullptr) {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Returned preparedModel is nullptr");
|
||||
} else {
|
||||
notifyInternal(convertPreparedModel(preparedModel));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::notifyAsDeadObject() {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
||||
}
|
||||
|
||||
PreparedModelCallback::Data PreparedModelCallback::get() {
|
||||
return mData.take();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
|
||||
mData.put(std::move(result));
|
||||
}
|
||||
|
||||
// ExecutionCallback methods begin here
|
||||
|
||||
Return<void> ExecutionCallback::notify(ErrorStatus status) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
||||
} else {
|
||||
notifyInternal({});
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
void ExecutionCallback::notifyAsDeadObject() {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
||||
}
|
||||
|
||||
ExecutionCallback::Data ExecutionCallback::get() {
|
||||
return mData.take();
|
||||
}
|
||||
|
||||
void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
|
||||
mData.put(std::move(result));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
@@ -52,7 +52,7 @@ template <typename Input>
|
||||
using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
std::vector<ConvertOutput<Type>> canonical;
|
||||
canonical.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
@@ -63,30 +63,31 @@ Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Result<OperandType> convert(const hal::V1_0::OperandType& operandType) {
|
||||
GeneralResult<OperandType> convert(const hal::V1_0::OperandType& operandType) {
|
||||
return static_cast<OperandType>(operandType);
|
||||
}
|
||||
|
||||
Result<OperationType> convert(const hal::V1_0::OperationType& operationType) {
|
||||
GeneralResult<OperationType> convert(const hal::V1_0::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
Result<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime) {
|
||||
GeneralResult<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime) {
|
||||
return static_cast<Operand::LifeTime>(lifetime);
|
||||
}
|
||||
|
||||
Result<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus) {
|
||||
GeneralResult<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus) {
|
||||
return static_cast<DeviceStatus>(deviceStatus);
|
||||
}
|
||||
|
||||
Result<Capabilities::PerformanceInfo> convert(const hal::V1_0::PerformanceInfo& performanceInfo) {
|
||||
GeneralResult<Capabilities::PerformanceInfo> convert(
|
||||
const hal::V1_0::PerformanceInfo& performanceInfo) {
|
||||
return Capabilities::PerformanceInfo{
|
||||
.execTime = performanceInfo.execTime,
|
||||
.powerUsage = performanceInfo.powerUsage,
|
||||
};
|
||||
}
|
||||
|
||||
Result<Capabilities> convert(const hal::V1_0::Capabilities& capabilities) {
|
||||
GeneralResult<Capabilities> convert(const hal::V1_0::Capabilities& capabilities) {
|
||||
const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance));
|
||||
const auto float32Performance = NN_TRY(convert(capabilities.float32Performance));
|
||||
|
||||
@@ -100,7 +101,7 @@ Result<Capabilities> convert(const hal::V1_0::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<DataLocation> convert(const hal::V1_0::DataLocation& location) {
|
||||
GeneralResult<DataLocation> convert(const hal::V1_0::DataLocation& location) {
|
||||
return DataLocation{
|
||||
.poolIndex = location.poolIndex,
|
||||
.offset = location.offset,
|
||||
@@ -108,7 +109,7 @@ Result<DataLocation> convert(const hal::V1_0::DataLocation& location) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operand> convert(const hal::V1_0::Operand& operand) {
|
||||
GeneralResult<Operand> convert(const hal::V1_0::Operand& operand) {
|
||||
return Operand{
|
||||
.type = NN_TRY(convert(operand.type)),
|
||||
.dimensions = operand.dimensions,
|
||||
@@ -119,7 +120,7 @@ Result<Operand> convert(const hal::V1_0::Operand& operand) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operation> convert(const hal::V1_0::Operation& operation) {
|
||||
GeneralResult<Operation> convert(const hal::V1_0::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -127,15 +128,15 @@ Result<Operation> convert(const hal::V1_0::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Model::OperandValues> convert(const hidl_vec<uint8_t>& operandValues) {
|
||||
GeneralResult<Model::OperandValues> convert(const hidl_vec<uint8_t>& operandValues) {
|
||||
return Model::OperandValues(operandValues.data(), operandValues.size());
|
||||
}
|
||||
|
||||
Result<Memory> convert(const hidl_memory& memory) {
|
||||
GeneralResult<Memory> convert(const hidl_memory& memory) {
|
||||
return createSharedMemoryFromHidlMemory(memory);
|
||||
}
|
||||
|
||||
Result<Model> convert(const hal::V1_0::Model& model) {
|
||||
GeneralResult<Model> convert(const hal::V1_0::Model& model) {
|
||||
auto operations = NN_TRY(convert(model.operations));
|
||||
|
||||
// Verify number of consumers.
|
||||
@@ -144,9 +145,9 @@ Result<Model> convert(const hal::V1_0::Model& model) {
|
||||
CHECK(model.operands.size() == numberOfConsumers.size());
|
||||
for (size_t i = 0; i < model.operands.size(); ++i) {
|
||||
if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
|
||||
return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found "
|
||||
<< model.operands[i].numberOfConsumers;
|
||||
return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,7 +165,7 @@ Result<Model> convert(const hal::V1_0::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Request::Argument> convert(const hal::V1_0::RequestArgument& argument) {
|
||||
GeneralResult<Request::Argument> convert(const hal::V1_0::RequestArgument& argument) {
|
||||
const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
|
||||
: Request::Argument::LifeTime::POOL;
|
||||
return Request::Argument{
|
||||
@@ -174,7 +175,7 @@ Result<Request::Argument> convert(const hal::V1_0::RequestArgument& argument) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Request> convert(const hal::V1_0::Request& request) {
|
||||
GeneralResult<Request> convert(const hal::V1_0::Request& request) {
|
||||
auto memories = NN_TRY(convert(request.pools));
|
||||
std::vector<Request::MemoryPool> pools;
|
||||
pools.reserve(memories.size());
|
||||
@@ -187,7 +188,7 @@ Result<Request> convert(const hal::V1_0::Request& request) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status) {
|
||||
GeneralResult<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status) {
|
||||
switch (status) {
|
||||
case hal::V1_0::ErrorStatus::NONE:
|
||||
case hal::V1_0::ErrorStatus::DEVICE_UNAVAILABLE:
|
||||
@@ -196,7 +197,8 @@ Result<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status) {
|
||||
case hal::V1_0::ErrorStatus::INVALID_ARGUMENT:
|
||||
return static_cast<ErrorStatus>(status);
|
||||
}
|
||||
return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
|
||||
return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid ErrorStatus " << underlyingType(status);
|
||||
}
|
||||
|
||||
} // namespace android::nn
|
||||
@@ -208,7 +210,7 @@ template <typename Input>
|
||||
using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
|
||||
for (size_t i = 0; i < arguments.size(); ++i) {
|
||||
halObject[i] = NN_TRY(utils::convert(arguments[i]));
|
||||
@@ -218,33 +220,35 @@ nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& argum
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
nn::Result<OperandType> convert(const nn::OperandType& operandType) {
|
||||
nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
|
||||
return static_cast<OperandType>(operandType);
|
||||
}
|
||||
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType) {
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
|
||||
nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
|
||||
if (lifetime == nn::Operand::LifeTime::POINTER) {
|
||||
return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Model cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
return static_cast<OperandLifeTime>(lifetime);
|
||||
}
|
||||
|
||||
nn::Result<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
|
||||
nn::GeneralResult<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
|
||||
return static_cast<DeviceStatus>(deviceStatus);
|
||||
}
|
||||
|
||||
nn::Result<PerformanceInfo> convert(const nn::Capabilities::PerformanceInfo& performanceInfo) {
|
||||
nn::GeneralResult<PerformanceInfo> convert(
|
||||
const nn::Capabilities::PerformanceInfo& performanceInfo) {
|
||||
return PerformanceInfo{
|
||||
.execTime = performanceInfo.execTime,
|
||||
.powerUsage = performanceInfo.powerUsage,
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
return Capabilities{
|
||||
.float32Performance = NN_TRY(convert(
|
||||
capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
|
||||
@@ -253,7 +257,7 @@ nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<DataLocation> convert(const nn::DataLocation& location) {
|
||||
nn::GeneralResult<DataLocation> convert(const nn::DataLocation& location) {
|
||||
return DataLocation{
|
||||
.poolIndex = location.poolIndex,
|
||||
.offset = location.offset,
|
||||
@@ -261,7 +265,7 @@ nn::Result<DataLocation> convert(const nn::DataLocation& location) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operand> convert(const nn::Operand& operand) {
|
||||
nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
|
||||
return Operand{
|
||||
.type = NN_TRY(convert(operand.type)),
|
||||
.dimensions = operand.dimensions,
|
||||
@@ -273,7 +277,7 @@ nn::Result<Operand> convert(const nn::Operand& operand) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -281,20 +285,21 @@ nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
return hidl_vec<uint8_t>(operandValues.data(), operandValues.data() + operandValues.size());
|
||||
}
|
||||
|
||||
nn::Result<hidl_memory> convert(const nn::Memory& memory) {
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
|
||||
const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size);
|
||||
// Copy memory to force the native_handle_t to be copied.
|
||||
auto copiedMemory = hidlMemory;
|
||||
return copiedMemory;
|
||||
}
|
||||
|
||||
nn::Result<Model> convert(const nn::Model& model) {
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model) {
|
||||
if (!hal::utils::hasNoPointerData(model)) {
|
||||
return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Mdoel cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
|
||||
auto operands = NN_TRY(convert(model.main.operands));
|
||||
@@ -317,9 +322,10 @@ nn::Result<Model> convert(const nn::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<RequestArgument> convert(const nn::Request::Argument& requestArgument) {
|
||||
nn::GeneralResult<RequestArgument> convert(const nn::Request::Argument& requestArgument) {
|
||||
if (requestArgument.lifetime == nn::Request::Argument::LifeTime::POINTER) {
|
||||
return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Request cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE;
|
||||
return RequestArgument{
|
||||
@@ -329,13 +335,14 @@ nn::Result<RequestArgument> convert(const nn::Request::Argument& requestArgument
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool) {
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool) {
|
||||
return convert(std::get<nn::Memory>(memoryPool));
|
||||
}
|
||||
|
||||
nn::Result<Request> convert(const nn::Request& request) {
|
||||
nn::GeneralResult<Request> convert(const nn::Request& request) {
|
||||
if (!hal::utils::hasNoPointerData(request)) {
|
||||
return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Request cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
|
||||
return Request{
|
||||
@@ -345,7 +352,7 @@ nn::Result<Request> convert(const nn::Request& request) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<ErrorStatus> convert(const nn::ErrorStatus& status) {
|
||||
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& status) {
|
||||
switch (status) {
|
||||
case nn::ErrorStatus::NONE:
|
||||
case nn::ErrorStatus::DEVICE_UNAVAILABLE:
|
||||
|
||||
199
neuralnetworks/1.0/utils/src/Device.cpp
Normal file
199
neuralnetworks/1.0/utils/src/Device.cpp
Normal file
@@ -0,0 +1,199 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Device.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status);
|
||||
} else {
|
||||
result = validatedConvertToCanonical(capabilities);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getCapabilities(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
|
||||
sp<V1_0::IDevice> device) {
|
||||
if (name.empty()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_0::utils::Device::create must have non-empty name";
|
||||
}
|
||||
if (device == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_0::utils::Device::create must have non-null device";
|
||||
}
|
||||
|
||||
auto capabilities = NN_TRY(initCapabilities(device.get()));
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
|
||||
return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
|
||||
std::move(capabilities), std::move(device),
|
||||
std::move(deathHandler));
|
||||
}
|
||||
|
||||
Device::Device(PrivateConstructorTag /*tag*/, std::string name, nn::Capabilities capabilities,
|
||||
sp<V1_0::IDevice> device, hal::utils::DeathHandler deathHandler)
|
||||
: kName(std::move(name)),
|
||||
kCapabilities(std::move(capabilities)),
|
||||
kDevice(std::move(device)),
|
||||
kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
const std::string& Device::getName() const {
|
||||
return kName;
|
||||
}
|
||||
|
||||
const std::string& Device::getVersionString() const {
|
||||
return kVersionString;
|
||||
}
|
||||
|
||||
nn::Version Device::getFeatureLevel() const {
|
||||
return nn::Version::ANDROID_OC_MR1;
|
||||
}
|
||||
|
||||
nn::DeviceType Device::getType() const {
|
||||
return nn::DeviceType::OTHER;
|
||||
}
|
||||
|
||||
const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
|
||||
return kExtensions;
|
||||
}
|
||||
|
||||
const nn::Capabilities& Device::getCapabilities() const {
|
||||
return kCapabilities;
|
||||
}
|
||||
|
||||
std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
|
||||
return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0);
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> Device::wait() const {
|
||||
const auto ret = kDevice->ping();
|
||||
return hal::utils::handleTransportError(ret);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical)
|
||||
<< "getSupportedOperations failed with " << toString(status);
|
||||
} else if (supportedOperations.size() != model.main.operations.size()) {
|
||||
result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "getSupportedOperations returned vector of size "
|
||||
<< supportedOperations.size() << " but expected "
|
||||
<< model.main.operations.size();
|
||||
} else {
|
||||
result = supportedOperations;
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kDevice->getSupportedOperations(hidlModel, cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
|
||||
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
|
||||
const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
|
||||
const auto cb = sp<PreparedModelCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kDevice->prepareModel(hidlModel, cb);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
|
||||
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
|
||||
const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IDevice::prepareModelFromCache not supported on 1.0 HAL service";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> Device::allocate(
|
||||
const nn::BufferDesc& /*desc*/,
|
||||
const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
|
||||
const std::vector<nn::BufferRole>& /*inputRoles*/,
|
||||
const std::vector<nn::BufferRole>& /*outputRoles*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IDevice::allocate not supported on 1.0 HAL service";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
100
neuralnetworks/1.0/utils/src/PreparedModel.cpp
Normal file
100
neuralnetworks/1.0/utils/src/PreparedModel.cpp
Normal file
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
|
||||
sp<V1_0::IPreparedModel> preparedModel) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_0::utils::PreparedModel::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
|
||||
return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(deathHandler));
|
||||
}
|
||||
|
||||
PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_0::IPreparedModel> preparedModel,
|
||||
hal::utils::DeathHandler deathHandler)
|
||||
: kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
|
||||
const nn::Request& request, nn::MeasureTiming /*measure*/,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
|
||||
const auto cb = sp<ExecutionCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kPreparedModel->execute(hidlRequest, cb);
|
||||
const auto status =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "execute failed with " << toString(status);
|
||||
}
|
||||
|
||||
auto result = NN_TRY(cb->get());
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
PreparedModel::executeFenced(
|
||||
const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
|
||||
const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
sp<V1_0::IPreparedModel> resource = kPreparedModel;
|
||||
return resource;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
41
neuralnetworks/1.0/utils/src/Service.cpp
Normal file
41
neuralnetworks/1.0/utils/src/Service.cpp
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Service.h"
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/ResilientDevice.h>
|
||||
#include <string>
|
||||
#include "Device.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_0::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
|
||||
hal::utils::ResilientDevice::Factory makeDevice =
|
||||
[name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
|
||||
auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
|
||||
if (service == nullptr) {
|
||||
return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
|
||||
}
|
||||
return Device::create(name, std::move(service));
|
||||
};
|
||||
|
||||
return hal::utils::ResilientDevice::create(std::move(makeDevice));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
@@ -20,6 +20,7 @@ cc_library_static {
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal/1.1/"],
|
||||
export_include_dirs: ["include"],
|
||||
cflags: ["-Wthread-safety"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
|
||||
@@ -24,21 +24,22 @@
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
Result<OperationType> convert(const hal::V1_1::OperationType& operationType);
|
||||
Result<Capabilities> convert(const hal::V1_1::Capabilities& capabilities);
|
||||
Result<Operation> convert(const hal::V1_1::Operation& operation);
|
||||
Result<Model> convert(const hal::V1_1::Model& model);
|
||||
Result<ExecutionPreference> convert(const hal::V1_1::ExecutionPreference& executionPreference);
|
||||
GeneralResult<OperationType> convert(const hal::V1_1::OperationType& operationType);
|
||||
GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities);
|
||||
GeneralResult<Operation> convert(const hal::V1_1::Operation& operation);
|
||||
GeneralResult<Model> convert(const hal::V1_1::Model& model);
|
||||
GeneralResult<ExecutionPreference> convert(
|
||||
const hal::V1_1::ExecutionPreference& executionPreference);
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_1::utils {
|
||||
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::Result<Operation> convert(const nn::Operation& operation);
|
||||
nn::Result<Model> convert(const nn::Model& model);
|
||||
nn::Result<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference);
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation);
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model);
|
||||
nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::utils
|
||||
|
||||
|
||||
87
neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
Normal file
87
neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_1::utils {
|
||||
|
||||
class Device final : public nn::IDevice {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
|
||||
sp<V1_1::IDevice> device);
|
||||
|
||||
Device(PrivateConstructorTag tag, std::string name, nn::Capabilities capabilities,
|
||||
sp<V1_1::IDevice> device, hal::utils::DeathHandler deathHandler);
|
||||
|
||||
const std::string& getName() const override;
|
||||
const std::string& getVersionString() const override;
|
||||
nn::Version getFeatureLevel() const override;
|
||||
nn::DeviceType getType() const override;
|
||||
const std::vector<nn::Extension>& getSupportedExtensions() const override;
|
||||
const nn::Capabilities& getCapabilities() const override;
|
||||
std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
|
||||
|
||||
nn::GeneralResult<void> wait() const override;
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> getSupportedOperations(
|
||||
const nn::Model& model) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const override;
|
||||
|
||||
private:
|
||||
const std::string kName;
|
||||
const std::string kVersionString = "UNKNOWN";
|
||||
const std::vector<nn::Extension> kExtensions;
|
||||
const nn::Capabilities kCapabilities;
|
||||
const sp<V1_1::IDevice> kDevice;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H
|
||||
31
neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h
Normal file
31
neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <string>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_1::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
@@ -33,10 +34,14 @@ constexpr auto kVersion = nn::Version::ANDROID_P;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<void> validate(const Type& halObject) {
|
||||
const auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeCanonical = nn::convert(halObject);
|
||||
if (!maybeCanonical.has_value()) {
|
||||
return nn::error() << maybeCanonical.error().message;
|
||||
}
|
||||
const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -53,9 +58,14 @@ bool valid(const Type& halObject) {
|
||||
template <typename Type>
|
||||
decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
|
||||
auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeVersion = nn::validate(canonical);
|
||||
if (!maybeVersion.has_value()) {
|
||||
return nn::error() << maybeVersion.error();
|
||||
}
|
||||
const auto version = maybeVersion.value();
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return canonical;
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ template <typename Input>
|
||||
using convertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
Result<std::vector<convertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
GeneralResult<std::vector<convertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
std::vector<convertOutput<Type>> canonical;
|
||||
canonical.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
@@ -53,11 +53,11 @@ Result<std::vector<convertOutput<Type>>> convert(const hidl_vec<Type>& arguments
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Result<OperationType> convert(const hal::V1_1::OperationType& operationType) {
|
||||
GeneralResult<OperationType> convert(const hal::V1_1::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
Result<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
|
||||
GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
|
||||
const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance));
|
||||
const auto float32Performance = NN_TRY(convert(capabilities.float32Performance));
|
||||
const auto relaxedFloat32toFloat16Performance =
|
||||
@@ -73,7 +73,7 @@ Result<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operation> convert(const hal::V1_1::Operation& operation) {
|
||||
GeneralResult<Operation> convert(const hal::V1_1::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -81,7 +81,7 @@ Result<Operation> convert(const hal::V1_1::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Model> convert(const hal::V1_1::Model& model) {
|
||||
GeneralResult<Model> convert(const hal::V1_1::Model& model) {
|
||||
auto operations = NN_TRY(convert(model.operations));
|
||||
|
||||
// Verify number of consumers.
|
||||
@@ -90,9 +90,9 @@ Result<Model> convert(const hal::V1_1::Model& model) {
|
||||
CHECK(model.operands.size() == numberOfConsumers.size());
|
||||
for (size_t i = 0; i < model.operands.size(); ++i) {
|
||||
if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
|
||||
return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found "
|
||||
<< model.operands[i].numberOfConsumers;
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,7 +111,8 @@ Result<Model> convert(const hal::V1_1::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<ExecutionPreference> convert(const hal::V1_1::ExecutionPreference& executionPreference) {
|
||||
GeneralResult<ExecutionPreference> convert(
|
||||
const hal::V1_1::ExecutionPreference& executionPreference) {
|
||||
return static_cast<ExecutionPreference>(executionPreference);
|
||||
}
|
||||
|
||||
@@ -122,20 +123,20 @@ namespace {
|
||||
|
||||
using utils::convert;
|
||||
|
||||
nn::Result<V1_0::PerformanceInfo> convert(
|
||||
nn::GeneralResult<V1_0::PerformanceInfo> convert(
|
||||
const nn::Capabilities::PerformanceInfo& performanceInfo) {
|
||||
return V1_0::utils::convert(performanceInfo);
|
||||
}
|
||||
|
||||
nn::Result<V1_0::Operand> convert(const nn::Operand& operand) {
|
||||
nn::GeneralResult<V1_0::Operand> convert(const nn::Operand& operand) {
|
||||
return V1_0::utils::convert(operand);
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
return V1_0::utils::convert(operandValues);
|
||||
}
|
||||
|
||||
nn::Result<hidl_memory> convert(const nn::Memory& memory) {
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
|
||||
return V1_0::utils::convert(memory);
|
||||
}
|
||||
|
||||
@@ -143,7 +144,7 @@ template <typename Input>
|
||||
using convertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<hidl_vec<convertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
nn::GeneralResult<hidl_vec<convertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
hidl_vec<convertOutput<Type>> halObject(arguments.size());
|
||||
for (size_t i = 0; i < arguments.size(); ++i) {
|
||||
halObject[i] = NN_TRY(convert(arguments[i]));
|
||||
@@ -153,11 +154,11 @@ nn::Result<hidl_vec<convertOutput<Type>>> convert(const std::vector<Type>& argum
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType) {
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
return Capabilities{
|
||||
.float32Performance = NN_TRY(convert(
|
||||
capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
|
||||
@@ -168,7 +169,7 @@ nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -176,9 +177,10 @@ nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Model> convert(const nn::Model& model) {
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model) {
|
||||
if (!hal::utils::hasNoPointerData(model)) {
|
||||
return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Mdoel cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
|
||||
auto operands = NN_TRY(convert(model.main.operands));
|
||||
@@ -202,7 +204,7 @@ nn::Result<Model> convert(const nn::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
|
||||
nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
|
||||
return static_cast<ExecutionPreference>(executionPreference);
|
||||
}
|
||||
|
||||
|
||||
202
neuralnetworks/1.1/utils/src/Device.cpp
Normal file
202
neuralnetworks/1.1/utils/src/Device.cpp
Normal file
@@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Device.h"
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Callbacks.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_1::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<nn::Capabilities> initCapabilities(V1_1::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status);
|
||||
} else {
|
||||
result = validatedConvertToCanonical(capabilities);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getCapabilities_1_1(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
|
||||
sp<V1_1::IDevice> device) {
|
||||
if (name.empty()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_1::utils::Device::create must have non-empty name";
|
||||
}
|
||||
if (device == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_1::utils::Device::create must have non-null device";
|
||||
}
|
||||
|
||||
auto capabilities = NN_TRY(initCapabilities(device.get()));
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
|
||||
return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
|
||||
std::move(capabilities), std::move(device),
|
||||
std::move(deathHandler));
|
||||
}
|
||||
|
||||
Device::Device(PrivateConstructorTag /*tag*/, std::string name, nn::Capabilities capabilities,
|
||||
sp<V1_1::IDevice> device, hal::utils::DeathHandler deathHandler)
|
||||
: kName(std::move(name)),
|
||||
kCapabilities(std::move(capabilities)),
|
||||
kDevice(std::move(device)),
|
||||
kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
const std::string& Device::getName() const {
|
||||
return kName;
|
||||
}
|
||||
|
||||
const std::string& Device::getVersionString() const {
|
||||
return kVersionString;
|
||||
}
|
||||
|
||||
nn::Version Device::getFeatureLevel() const {
|
||||
return nn::Version::ANDROID_P;
|
||||
}
|
||||
|
||||
nn::DeviceType Device::getType() const {
|
||||
return nn::DeviceType::UNKNOWN;
|
||||
}
|
||||
|
||||
const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
|
||||
return kExtensions;
|
||||
}
|
||||
|
||||
const nn::Capabilities& Device::getCapabilities() const {
|
||||
return kCapabilities;
|
||||
}
|
||||
|
||||
std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
|
||||
return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0);
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> Device::wait() const {
|
||||
const auto ret = kDevice->ping();
|
||||
return hal::utils::handleTransportError(ret);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
auto cb = [&result, &model](V1_0::ErrorStatus status,
|
||||
const hidl_vec<bool>& supportedOperations) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical)
|
||||
<< "getSupportedOperations_1_1 failed with " << toString(status);
|
||||
} else if (supportedOperations.size() != model.main.operations.size()) {
|
||||
result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "getSupportedOperations_1_1 returned vector of size "
|
||||
<< supportedOperations.size() << " but expected "
|
||||
<< model.main.operations.size();
|
||||
} else {
|
||||
result = supportedOperations;
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
|
||||
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
|
||||
const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
const auto hidlPreference = NN_TRY(convert(preference));
|
||||
|
||||
const auto cb = sp<V1_0::utils::PreparedModelCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
|
||||
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
|
||||
const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IDevice::prepareModelFromCache not supported on 1.1 HAL service";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> Device::allocate(
|
||||
const nn::BufferDesc& /*desc*/,
|
||||
const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
|
||||
const std::vector<nn::BufferRole>& /*inputRoles*/,
|
||||
const std::vector<nn::BufferRole>& /*outputRoles*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IDevice::allocate not supported on 1.1 HAL service";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::utils
|
||||
41
neuralnetworks/1.1/utils/src/Service.cpp
Normal file
41
neuralnetworks/1.1/utils/src/Service.cpp
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Service.h"
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/ResilientDevice.h>
|
||||
#include <string>
|
||||
#include "Device.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_1::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
|
||||
hal::utils::ResilientDevice::Factory makeDevice =
|
||||
[name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
|
||||
auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
|
||||
if (service == nullptr) {
|
||||
return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
|
||||
}
|
||||
return Device::create(name, std::move(service));
|
||||
};
|
||||
|
||||
return hal::utils::ResilientDevice::create(std::move(makeDevice));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_1::utils
|
||||
@@ -20,6 +20,7 @@ cc_library_static {
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal/1.2/"],
|
||||
export_include_dirs: ["include"],
|
||||
cflags: ["-Wthread-safety"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
|
||||
76
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
Normal file
76
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Callbacks.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
class PreparedModelCallback final : public IPreparedModelCallback,
|
||||
public hal::utils::IProtectedCallback {
|
||||
public:
|
||||
using Data = nn::GeneralResult<nn::SharedPreparedModel>;
|
||||
|
||||
Return<void> notify(V1_0::ErrorStatus status,
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) override;
|
||||
Return<void> notify_1_2(V1_0::ErrorStatus status,
|
||||
const sp<IPreparedModel>& preparedModel) override;
|
||||
|
||||
void notifyAsDeadObject() override;
|
||||
|
||||
Data get();
|
||||
|
||||
private:
|
||||
void notifyInternal(Data result);
|
||||
|
||||
hal::utils::TransferValue<Data> mData;
|
||||
};
|
||||
|
||||
class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
|
||||
public:
|
||||
using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
|
||||
|
||||
Return<void> notify(V1_0::ErrorStatus status) override;
|
||||
Return<void> notify_1_2(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing) override;
|
||||
|
||||
void notifyAsDeadObject() override;
|
||||
|
||||
Data get();
|
||||
|
||||
private:
|
||||
void notifyInternal(Data result);
|
||||
|
||||
hal::utils::TransferValue<Data> mData;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H
|
||||
@@ -24,62 +24,64 @@
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
Result<OperandType> convert(const hal::V1_2::OperandType& operandType);
|
||||
Result<OperationType> convert(const hal::V1_2::OperationType& operationType);
|
||||
Result<DeviceType> convert(const hal::V1_2::DeviceType& deviceType);
|
||||
Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities);
|
||||
Result<Capabilities::OperandPerformance> convert(
|
||||
GeneralResult<OperandType> convert(const hal::V1_2::OperandType& operandType);
|
||||
GeneralResult<OperationType> convert(const hal::V1_2::OperationType& operationType);
|
||||
GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType);
|
||||
GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities);
|
||||
GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const hal::V1_2::Capabilities::OperandPerformance& operandPerformance);
|
||||
Result<Operation> convert(const hal::V1_2::Operation& operation);
|
||||
Result<Operand::SymmPerChannelQuantParams> convert(
|
||||
GeneralResult<Operation> convert(const hal::V1_2::Operation& operation);
|
||||
GeneralResult<Operand::SymmPerChannelQuantParams> convert(
|
||||
const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams);
|
||||
Result<Operand> convert(const hal::V1_2::Operand& operand);
|
||||
Result<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams);
|
||||
Result<Model> convert(const hal::V1_2::Model& model);
|
||||
Result<Model::ExtensionNameAndPrefix> convert(
|
||||
GeneralResult<Operand> convert(const hal::V1_2::Operand& operand);
|
||||
GeneralResult<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams);
|
||||
GeneralResult<Model> convert(const hal::V1_2::Model& model);
|
||||
GeneralResult<Model::ExtensionNameAndPrefix> convert(
|
||||
const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
|
||||
Result<OutputShape> convert(const hal::V1_2::OutputShape& outputShape);
|
||||
Result<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming);
|
||||
Result<Timing> convert(const hal::V1_2::Timing& timing);
|
||||
Result<Extension> convert(const hal::V1_2::Extension& extension);
|
||||
Result<Extension::OperandTypeInformation> convert(
|
||||
GeneralResult<OutputShape> convert(const hal::V1_2::OutputShape& outputShape);
|
||||
GeneralResult<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming);
|
||||
GeneralResult<Timing> convert(const hal::V1_2::Timing& timing);
|
||||
GeneralResult<Extension> convert(const hal::V1_2::Extension& extension);
|
||||
GeneralResult<Extension::OperandTypeInformation> convert(
|
||||
const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation);
|
||||
Result<NativeHandle> convert(const hardware::hidl_handle& handle);
|
||||
GeneralResult<NativeHandle> convert(const hardware::hidl_handle& handle);
|
||||
|
||||
Result<std::vector<Extension>> convert(const hardware::hidl_vec<hal::V1_2::Extension>& extensions);
|
||||
Result<std::vector<NativeHandle>> convert(const hardware::hidl_vec<hardware::hidl_handle>& handles);
|
||||
Result<std::vector<OutputShape>> convert(
|
||||
GeneralResult<std::vector<Extension>> convert(
|
||||
const hardware::hidl_vec<hal::V1_2::Extension>& extensions);
|
||||
GeneralResult<std::vector<NativeHandle>> convert(
|
||||
const hardware::hidl_vec<hardware::hidl_handle>& handles);
|
||||
GeneralResult<std::vector<OutputShape>> convert(
|
||||
const hardware::hidl_vec<hal::V1_2::OutputShape>& outputShapes);
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
nn::Result<OperandType> convert(const nn::OperandType& operandType);
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::Result<DeviceType> convert(const nn::DeviceType& deviceType);
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::Result<Capabilities::OperandPerformance> convert(
|
||||
nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType);
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const nn::Capabilities::OperandPerformance& operandPerformance);
|
||||
nn::Result<Operation> convert(const nn::Operation& operation);
|
||||
nn::Result<SymmPerChannelQuantParams> convert(
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation);
|
||||
nn::GeneralResult<SymmPerChannelQuantParams> convert(
|
||||
const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams);
|
||||
nn::Result<Operand> convert(const nn::Operand& operand);
|
||||
nn::Result<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams);
|
||||
nn::Result<Model> convert(const nn::Model& model);
|
||||
nn::Result<Model::ExtensionNameAndPrefix> convert(
|
||||
nn::GeneralResult<Operand> convert(const nn::Operand& operand);
|
||||
nn::GeneralResult<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams);
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model);
|
||||
nn::GeneralResult<Model::ExtensionNameAndPrefix> convert(
|
||||
const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
|
||||
nn::Result<OutputShape> convert(const nn::OutputShape& outputShape);
|
||||
nn::Result<MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
|
||||
nn::Result<Timing> convert(const nn::Timing& timing);
|
||||
nn::Result<Extension> convert(const nn::Extension& extension);
|
||||
nn::Result<Extension::OperandTypeInformation> convert(
|
||||
nn::GeneralResult<OutputShape> convert(const nn::OutputShape& outputShape);
|
||||
nn::GeneralResult<MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
|
||||
nn::GeneralResult<Timing> convert(const nn::Timing& timing);
|
||||
nn::GeneralResult<Extension> convert(const nn::Extension& extension);
|
||||
nn::GeneralResult<Extension::OperandTypeInformation> convert(
|
||||
const nn::Extension::OperandTypeInformation& operandTypeInformation);
|
||||
nn::Result<hidl_handle> convert(const nn::NativeHandle& handle);
|
||||
nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle);
|
||||
|
||||
nn::Result<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions);
|
||||
nn::Result<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles);
|
||||
nn::Result<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
|
||||
nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions);
|
||||
nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles);
|
||||
nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
|
||||
98
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
Normal file
98
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
Normal file
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device);
|
||||
nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device);
|
||||
nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device);
|
||||
nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device);
|
||||
nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
|
||||
V1_2::IDevice* device);
|
||||
|
||||
class Device final : public nn::IDevice {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
|
||||
sp<V1_2::IDevice> device);
|
||||
|
||||
Device(PrivateConstructorTag tag, std::string name, std::string versionString,
|
||||
nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
|
||||
nn::Capabilities capabilities, std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
|
||||
sp<V1_2::IDevice> device, hal::utils::DeathHandler deathHandler);
|
||||
|
||||
const std::string& getName() const override;
|
||||
const std::string& getVersionString() const override;
|
||||
nn::Version getFeatureLevel() const override;
|
||||
nn::DeviceType getType() const override;
|
||||
const std::vector<nn::Extension>& getSupportedExtensions() const override;
|
||||
const nn::Capabilities& getCapabilities() const override;
|
||||
std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
|
||||
|
||||
nn::GeneralResult<void> wait() const override;
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> getSupportedOperations(
|
||||
const nn::Model& model) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const override;
|
||||
|
||||
private:
|
||||
const std::string kName;
|
||||
const std::string kVersionString;
|
||||
const nn::DeviceType kDeviceType;
|
||||
const std::vector<nn::Extension> kExtensions;
|
||||
const nn::Capabilities kCapabilities;
|
||||
const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
|
||||
const sp<V1_2::IDevice> kDevice;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H
|
||||
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
class PreparedModel final : public nn::IPreparedModel {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
|
||||
sp<V1_2::IPreparedModel> preparedModel);
|
||||
|
||||
PreparedModel(PrivateConstructorTag tag, sp<V1_2::IPreparedModel> preparedModel,
|
||||
hal::utils::DeathHandler deathHandler);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
|
||||
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
|
||||
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
private:
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
|
||||
const V1_0::Request& request, MeasureTiming measure) const;
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously(
|
||||
const V1_0::Request& request, MeasureTiming measure) const;
|
||||
|
||||
const sp<V1_2::IPreparedModel> kPreparedModel;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H
|
||||
31
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h
Normal file
31
neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <string>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
@@ -38,10 +39,14 @@ constexpr auto kVersion = nn::Version::ANDROID_Q;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<void> validate(const Type& halObject) {
|
||||
const auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeCanonical = nn::convert(halObject);
|
||||
if (!maybeCanonical.has_value()) {
|
||||
return nn::error() << maybeCanonical.error().message;
|
||||
}
|
||||
const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -58,9 +63,14 @@ bool valid(const Type& halObject) {
|
||||
template <typename Type>
|
||||
decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
|
||||
auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeVersion = nn::validate(canonical);
|
||||
if (!maybeVersion.has_value()) {
|
||||
return nn::error() << maybeVersion.error();
|
||||
}
|
||||
const auto version = maybeVersion.value();
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return canonical;
|
||||
}
|
||||
|
||||
147
neuralnetworks/1.2/utils/src/Callbacks.cpp
Normal file
147
neuralnetworks/1.2/utils/src/Callbacks.cpp
Normal file
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "PreparedModel.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/1.0/PreparedModel.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) {
|
||||
return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
return NN_TRY(utils::PreparedModel::create(preparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing) {
|
||||
return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
|
||||
NN_TRY(validatedConvertToCanonical(timing)));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
convertExecutionGeneralResults(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
|
||||
return hal::utils::makeExecutionFailure(
|
||||
convertExecutionGeneralResultsHelper(outputShapes, timing));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
||||
} else if (preparedModel == nullptr) {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Returned preparedModel is nullptr");
|
||||
} else {
|
||||
notifyInternal(convertPreparedModel(preparedModel));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
||||
} else if (preparedModel == nullptr) {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Returned preparedModel is nullptr");
|
||||
} else {
|
||||
notifyInternal(convertPreparedModel(preparedModel));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::notifyAsDeadObject() {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
||||
}
|
||||
|
||||
PreparedModelCallback::Data PreparedModelCallback::get() {
|
||||
return mData.take();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
|
||||
mData.put(std::move(result));
|
||||
}
|
||||
|
||||
// ExecutionCallback methods begin here
|
||||
|
||||
Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
||||
} else {
|
||||
notifyInternal({});
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
|
||||
const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
||||
} else {
|
||||
notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
void ExecutionCallback::notifyAsDeadObject() {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
||||
}
|
||||
|
||||
ExecutionCallback::Data ExecutionCallback::get() {
|
||||
return mData.take();
|
||||
}
|
||||
|
||||
void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
|
||||
mData.put(std::move(result));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
@@ -78,7 +79,7 @@ template <typename Input>
|
||||
using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
|
||||
GeneralResult<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
|
||||
std::vector<ConvertOutput<Type>> canonical;
|
||||
canonical.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
@@ -88,25 +89,25 @@ Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& argume
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
return convertVec(arguments);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Result<OperandType> convert(const hal::V1_2::OperandType& operandType) {
|
||||
GeneralResult<OperandType> convert(const hal::V1_2::OperandType& operandType) {
|
||||
return static_cast<OperandType>(operandType);
|
||||
}
|
||||
|
||||
Result<OperationType> convert(const hal::V1_2::OperationType& operationType) {
|
||||
GeneralResult<OperationType> convert(const hal::V1_2::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
Result<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
|
||||
GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
|
||||
return static_cast<DeviceType>(deviceType);
|
||||
}
|
||||
|
||||
Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
|
||||
GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
|
||||
const bool validOperandTypes = std::all_of(
|
||||
capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
|
||||
[](const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) {
|
||||
@@ -114,7 +115,7 @@ Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
|
||||
return !maybeType.has_value() ? false : validOperandType(maybeType.value());
|
||||
});
|
||||
if (!validOperandTypes) {
|
||||
return NN_ERROR()
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid OperandType when converting OperandPerformance in Capabilities";
|
||||
}
|
||||
|
||||
@@ -124,8 +125,9 @@ Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
|
||||
NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
|
||||
auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance));
|
||||
|
||||
auto table =
|
||||
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
|
||||
auto table = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
|
||||
nn::ErrorStatus::GENERAL_FAILURE));
|
||||
|
||||
return Capabilities{
|
||||
.relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
|
||||
@@ -134,7 +136,7 @@ Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Capabilities::OperandPerformance> convert(
|
||||
GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) {
|
||||
return Capabilities::OperandPerformance{
|
||||
.type = NN_TRY(convert(operandPerformance.type)),
|
||||
@@ -142,7 +144,7 @@ Result<Capabilities::OperandPerformance> convert(
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operation> convert(const hal::V1_2::Operation& operation) {
|
||||
GeneralResult<Operation> convert(const hal::V1_2::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -150,7 +152,7 @@ Result<Operation> convert(const hal::V1_2::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operand::SymmPerChannelQuantParams> convert(
|
||||
GeneralResult<Operand::SymmPerChannelQuantParams> convert(
|
||||
const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams) {
|
||||
return Operand::SymmPerChannelQuantParams{
|
||||
.scales = symmPerChannelQuantParams.scales,
|
||||
@@ -158,7 +160,7 @@ Result<Operand::SymmPerChannelQuantParams> convert(
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operand> convert(const hal::V1_2::Operand& operand) {
|
||||
GeneralResult<Operand> convert(const hal::V1_2::Operand& operand) {
|
||||
return Operand{
|
||||
.type = NN_TRY(convert(operand.type)),
|
||||
.dimensions = operand.dimensions,
|
||||
@@ -170,7 +172,7 @@ Result<Operand> convert(const hal::V1_2::Operand& operand) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams) {
|
||||
GeneralResult<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams) {
|
||||
using Discriminator = hal::V1_2::Operand::ExtraParams::hidl_discriminator;
|
||||
switch (extraParams.getDiscriminator()) {
|
||||
case Discriminator::none:
|
||||
@@ -180,11 +182,12 @@ Result<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extr
|
||||
case Discriminator::extension:
|
||||
return extraParams.extension();
|
||||
}
|
||||
return NN_ERROR() << "Unrecognized Operand::ExtraParams discriminator: "
|
||||
<< underlyingType(extraParams.getDiscriminator());
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Unrecognized Operand::ExtraParams discriminator: "
|
||||
<< underlyingType(extraParams.getDiscriminator());
|
||||
}
|
||||
|
||||
Result<Model> convert(const hal::V1_2::Model& model) {
|
||||
GeneralResult<Model> convert(const hal::V1_2::Model& model) {
|
||||
auto operations = NN_TRY(convert(model.operations));
|
||||
|
||||
// Verify number of consumers.
|
||||
@@ -193,9 +196,9 @@ Result<Model> convert(const hal::V1_2::Model& model) {
|
||||
CHECK(model.operands.size() == numberOfConsumers.size());
|
||||
for (size_t i = 0; i < model.operands.size(); ++i) {
|
||||
if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
|
||||
return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found "
|
||||
<< model.operands[i].numberOfConsumers;
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,7 +218,7 @@ Result<Model> convert(const hal::V1_2::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Model::ExtensionNameAndPrefix> convert(
|
||||
GeneralResult<Model::ExtensionNameAndPrefix> convert(
|
||||
const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
|
||||
return Model::ExtensionNameAndPrefix{
|
||||
.name = extensionNameAndPrefix.name,
|
||||
@@ -223,29 +226,29 @@ Result<Model::ExtensionNameAndPrefix> convert(
|
||||
};
|
||||
}
|
||||
|
||||
Result<OutputShape> convert(const hal::V1_2::OutputShape& outputShape) {
|
||||
GeneralResult<OutputShape> convert(const hal::V1_2::OutputShape& outputShape) {
|
||||
return OutputShape{
|
||||
.dimensions = outputShape.dimensions,
|
||||
.isSufficient = outputShape.isSufficient,
|
||||
};
|
||||
}
|
||||
|
||||
Result<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming) {
|
||||
GeneralResult<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming) {
|
||||
return static_cast<MeasureTiming>(measureTiming);
|
||||
}
|
||||
|
||||
Result<Timing> convert(const hal::V1_2::Timing& timing) {
|
||||
GeneralResult<Timing> convert(const hal::V1_2::Timing& timing) {
|
||||
return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
|
||||
}
|
||||
|
||||
Result<Extension> convert(const hal::V1_2::Extension& extension) {
|
||||
GeneralResult<Extension> convert(const hal::V1_2::Extension& extension) {
|
||||
return Extension{
|
||||
.name = extension.name,
|
||||
.operandTypes = NN_TRY(convert(extension.operandTypes)),
|
||||
};
|
||||
}
|
||||
|
||||
Result<Extension::OperandTypeInformation> convert(
|
||||
GeneralResult<Extension::OperandTypeInformation> convert(
|
||||
const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation) {
|
||||
return Extension::OperandTypeInformation{
|
||||
.type = operandTypeInformation.type,
|
||||
@@ -254,20 +257,21 @@ Result<Extension::OperandTypeInformation> convert(
|
||||
};
|
||||
}
|
||||
|
||||
Result<NativeHandle> convert(const hidl_handle& handle) {
|
||||
GeneralResult<NativeHandle> convert(const hidl_handle& handle) {
|
||||
auto* cloned = native_handle_clone(handle.getNativeHandle());
|
||||
return ::android::NativeHandle::create(cloned, /*ownsHandle=*/true);
|
||||
}
|
||||
|
||||
Result<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) {
|
||||
GeneralResult<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) {
|
||||
return convertVec(extensions);
|
||||
}
|
||||
|
||||
Result<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) {
|
||||
GeneralResult<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) {
|
||||
return convertVec(handles);
|
||||
}
|
||||
|
||||
Result<std::vector<OutputShape>> convert(const hidl_vec<hal::V1_2::OutputShape>& outputShapes) {
|
||||
GeneralResult<std::vector<OutputShape>> convert(
|
||||
const hidl_vec<hal::V1_2::OutputShape>& outputShapes) {
|
||||
return convertVec(outputShapes);
|
||||
}
|
||||
|
||||
@@ -278,24 +282,24 @@ namespace {
|
||||
|
||||
using utils::convert;
|
||||
|
||||
nn::Result<V1_0::OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
|
||||
nn::GeneralResult<V1_0::OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
|
||||
return V1_0::utils::convert(lifetime);
|
||||
}
|
||||
|
||||
nn::Result<V1_0::PerformanceInfo> convert(
|
||||
nn::GeneralResult<V1_0::PerformanceInfo> convert(
|
||||
const nn::Capabilities::PerformanceInfo& performanceInfo) {
|
||||
return V1_0::utils::convert(performanceInfo);
|
||||
}
|
||||
|
||||
nn::Result<V1_0::DataLocation> convert(const nn::DataLocation& location) {
|
||||
nn::GeneralResult<V1_0::DataLocation> convert(const nn::DataLocation& location) {
|
||||
return V1_0::utils::convert(location);
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
return V1_0::utils::convert(operandValues);
|
||||
}
|
||||
|
||||
nn::Result<hidl_memory> convert(const nn::Memory& memory) {
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
|
||||
return V1_0::utils::convert(memory);
|
||||
}
|
||||
|
||||
@@ -303,7 +307,7 @@ template <typename Input>
|
||||
using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
|
||||
nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
|
||||
hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
|
||||
for (size_t i = 0; i < arguments.size(); ++i) {
|
||||
halObject[i] = NN_TRY(convert(arguments[i]));
|
||||
@@ -312,22 +316,23 @@ nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& ar
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
return convertVec(arguments);
|
||||
}
|
||||
|
||||
nn::Result<Operand::ExtraParams> makeExtraParams(nn::Operand::NoParams /*noParams*/) {
|
||||
nn::GeneralResult<Operand::ExtraParams> makeExtraParams(nn::Operand::NoParams /*noParams*/) {
|
||||
return Operand::ExtraParams{};
|
||||
}
|
||||
|
||||
nn::Result<Operand::ExtraParams> makeExtraParams(
|
||||
nn::GeneralResult<Operand::ExtraParams> makeExtraParams(
|
||||
const nn::Operand::SymmPerChannelQuantParams& channelQuant) {
|
||||
Operand::ExtraParams ret;
|
||||
ret.channelQuant(NN_TRY(convert(channelQuant)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
nn::Result<Operand::ExtraParams> makeExtraParams(const nn::Operand::ExtensionParams& extension) {
|
||||
nn::GeneralResult<Operand::ExtraParams> makeExtraParams(
|
||||
const nn::Operand::ExtensionParams& extension) {
|
||||
Operand::ExtraParams ret;
|
||||
ret.extension(extension);
|
||||
return ret;
|
||||
@@ -335,28 +340,29 @@ nn::Result<Operand::ExtraParams> makeExtraParams(const nn::Operand::ExtensionPar
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
nn::Result<OperandType> convert(const nn::OperandType& operandType) {
|
||||
nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
|
||||
return static_cast<OperandType>(operandType);
|
||||
}
|
||||
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType) {
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
nn::Result<DeviceType> convert(const nn::DeviceType& deviceType) {
|
||||
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
|
||||
switch (deviceType) {
|
||||
case nn::DeviceType::UNKNOWN:
|
||||
return NN_ERROR() << "Invalid DeviceType UNKNOWN";
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid DeviceType UNKNOWN";
|
||||
case nn::DeviceType::OTHER:
|
||||
case nn::DeviceType::CPU:
|
||||
case nn::DeviceType::GPU:
|
||||
case nn::DeviceType::ACCELERATOR:
|
||||
return static_cast<DeviceType>(deviceType);
|
||||
}
|
||||
return NN_ERROR() << "Invalid DeviceType " << underlyingType(deviceType);
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid DeviceType " << underlyingType(deviceType);
|
||||
}
|
||||
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
|
||||
operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
|
||||
std::copy_if(capabilities.operandPerformance.asVector().begin(),
|
||||
@@ -375,7 +381,7 @@ nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Capabilities::OperandPerformance> convert(
|
||||
nn::GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const nn::Capabilities::OperandPerformance& operandPerformance) {
|
||||
return Capabilities::OperandPerformance{
|
||||
.type = NN_TRY(convert(operandPerformance.type)),
|
||||
@@ -383,7 +389,7 @@ nn::Result<Capabilities::OperandPerformance> convert(
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -391,7 +397,7 @@ nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<SymmPerChannelQuantParams> convert(
|
||||
nn::GeneralResult<SymmPerChannelQuantParams> convert(
|
||||
const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams) {
|
||||
return SymmPerChannelQuantParams{
|
||||
.scales = symmPerChannelQuantParams.scales,
|
||||
@@ -399,7 +405,7 @@ nn::Result<SymmPerChannelQuantParams> convert(
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operand> convert(const nn::Operand& operand) {
|
||||
nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
|
||||
return Operand{
|
||||
.type = NN_TRY(convert(operand.type)),
|
||||
.dimensions = operand.dimensions,
|
||||
@@ -412,13 +418,14 @@ nn::Result<Operand> convert(const nn::Operand& operand) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
|
||||
nn::GeneralResult<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
|
||||
return std::visit([](const auto& x) { return makeExtraParams(x); }, extraParams);
|
||||
}
|
||||
|
||||
nn::Result<Model> convert(const nn::Model& model) {
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model) {
|
||||
if (!hal::utils::hasNoPointerData(model)) {
|
||||
return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Model cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
|
||||
auto operands = NN_TRY(convert(model.main.operands));
|
||||
@@ -443,7 +450,7 @@ nn::Result<Model> convert(const nn::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Model::ExtensionNameAndPrefix> convert(
|
||||
nn::GeneralResult<Model::ExtensionNameAndPrefix> convert(
|
||||
const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
|
||||
return Model::ExtensionNameAndPrefix{
|
||||
.name = extensionNameAndPrefix.name,
|
||||
@@ -451,27 +458,27 @@ nn::Result<Model::ExtensionNameAndPrefix> convert(
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<OutputShape> convert(const nn::OutputShape& outputShape) {
|
||||
nn::GeneralResult<OutputShape> convert(const nn::OutputShape& outputShape) {
|
||||
return OutputShape{.dimensions = outputShape.dimensions,
|
||||
.isSufficient = outputShape.isSufficient};
|
||||
}
|
||||
|
||||
nn::Result<MeasureTiming> convert(const nn::MeasureTiming& measureTiming) {
|
||||
nn::GeneralResult<MeasureTiming> convert(const nn::MeasureTiming& measureTiming) {
|
||||
return static_cast<MeasureTiming>(measureTiming);
|
||||
}
|
||||
|
||||
nn::Result<Timing> convert(const nn::Timing& timing) {
|
||||
nn::GeneralResult<Timing> convert(const nn::Timing& timing) {
|
||||
return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
|
||||
}
|
||||
|
||||
nn::Result<Extension> convert(const nn::Extension& extension) {
|
||||
nn::GeneralResult<Extension> convert(const nn::Extension& extension) {
|
||||
return Extension{
|
||||
.name = extension.name,
|
||||
.operandTypes = NN_TRY(convert(extension.operandTypes)),
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Extension::OperandTypeInformation> convert(
|
||||
nn::GeneralResult<Extension::OperandTypeInformation> convert(
|
||||
const nn::Extension::OperandTypeInformation& operandTypeInformation) {
|
||||
return Extension::OperandTypeInformation{
|
||||
.type = operandTypeInformation.type,
|
||||
@@ -480,22 +487,22 @@ nn::Result<Extension::OperandTypeInformation> convert(
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<hidl_handle> convert(const nn::NativeHandle& handle) {
|
||||
nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle) {
|
||||
const auto hidlHandle = hidl_handle(handle->handle());
|
||||
// Copy memory to force the native_handle_t to be copied.
|
||||
auto copiedHandle = hidlHandle;
|
||||
return copiedHandle;
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) {
|
||||
nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) {
|
||||
return convertVec(extensions);
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) {
|
||||
nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) {
|
||||
return convertVec(handles);
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes) {
|
||||
nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes) {
|
||||
return convertVec(outputShapes);
|
||||
}
|
||||
|
||||
|
||||
318
neuralnetworks/1.2/utils/src/Device.cpp
Normal file
318
neuralnetworks/1.2/utils/src/Device.cpp
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Device.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.1/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<std::string> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status);
|
||||
} else {
|
||||
result = versionString;
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getVersionString(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<nn::DeviceType> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status);
|
||||
} else {
|
||||
result = nn::convert(deviceType);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getType(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<std::vector<nn::Extension>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status);
|
||||
} else {
|
||||
result = nn::convert(extensions);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getSupportedExtensions(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status);
|
||||
} else {
|
||||
result = validatedConvertToCanonical(capabilities);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getCapabilities_1_2(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
|
||||
V1_2::IDevice* device) {
|
||||
CHECK(device != nullptr);
|
||||
|
||||
nn::GeneralResult<std::pair<uint32_t, uint32_t>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache,
|
||||
uint32_t numDataCache) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical)
|
||||
<< "getNumberOfCacheFilesNeeded failed with " << toString(status);
|
||||
} else {
|
||||
result = std::make_pair(numModelCache, numDataCache);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = device->getNumberOfCacheFilesNeeded(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
|
||||
sp<V1_2::IDevice> device) {
|
||||
if (name.empty()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_2::utils::Device::create must have non-empty name";
|
||||
}
|
||||
if (device == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_2::utils::Device::create must have non-null device";
|
||||
}
|
||||
|
||||
auto versionString = NN_TRY(initVersionString(device.get()));
|
||||
const auto deviceType = NN_TRY(initDeviceType(device.get()));
|
||||
auto extensions = NN_TRY(initExtensions(device.get()));
|
||||
auto capabilities = NN_TRY(initCapabilities(device.get()));
|
||||
const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get()));
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
|
||||
return std::make_shared<const Device>(
|
||||
PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType,
|
||||
std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
|
||||
std::move(device), std::move(deathHandler));
|
||||
}
|
||||
|
||||
Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString,
|
||||
nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
|
||||
nn::Capabilities capabilities,
|
||||
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, sp<V1_2::IDevice> device,
|
||||
hal::utils::DeathHandler deathHandler)
|
||||
: kName(std::move(name)),
|
||||
kVersionString(std::move(versionString)),
|
||||
kDeviceType(deviceType),
|
||||
kExtensions(std::move(extensions)),
|
||||
kCapabilities(std::move(capabilities)),
|
||||
kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded),
|
||||
kDevice(std::move(device)),
|
||||
kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
const std::string& Device::getName() const {
|
||||
return kName;
|
||||
}
|
||||
|
||||
const std::string& Device::getVersionString() const {
|
||||
return kVersionString;
|
||||
}
|
||||
|
||||
nn::Version Device::getFeatureLevel() const {
|
||||
return nn::Version::ANDROID_Q;
|
||||
}
|
||||
|
||||
nn::DeviceType Device::getType() const {
|
||||
return kDeviceType;
|
||||
}
|
||||
|
||||
const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
|
||||
return kExtensions;
|
||||
}
|
||||
|
||||
const nn::Capabilities& Device::getCapabilities() const {
|
||||
return kCapabilities;
|
||||
}
|
||||
|
||||
std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
|
||||
return kNumberOfCacheFilesNeeded;
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> Device::wait() const {
|
||||
const auto ret = kDevice->ping();
|
||||
return hal::utils::handleTransportError(ret);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
auto cb = [&result, &model](V1_0::ErrorStatus status,
|
||||
const hidl_vec<bool>& supportedOperations) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical)
|
||||
<< "getSupportedOperations_1_2 failed with " << toString(status);
|
||||
} else if (supportedOperations.size() != model.main.operations.size()) {
|
||||
result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "getSupportedOperations_1_2 returned vector of size "
|
||||
<< supportedOperations.size() << " but expected "
|
||||
<< model.main.operations.size();
|
||||
} else {
|
||||
result = supportedOperations;
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
|
||||
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
|
||||
const auto hidlModelCache = NN_TRY(convert(modelCache));
|
||||
const auto hidlDataCache = NN_TRY(convert(dataCache));
|
||||
const auto hidlToken = token;
|
||||
|
||||
const auto cb = sp<PreparedModelCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache,
|
||||
hidlDataCache, hidlToken, cb);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
|
||||
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
const auto hidlModelCache = NN_TRY(convert(modelCache));
|
||||
const auto hidlDataCache = NN_TRY(convert(dataCache));
|
||||
const auto hidlToken = token;
|
||||
|
||||
const auto cb = sp<PreparedModelCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> Device::allocate(
|
||||
const nn::BufferDesc& /*desc*/,
|
||||
const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
|
||||
const std::vector<nn::BufferRole>& /*inputRoles*/,
|
||||
const std::vector<nn::BufferRole>& /*outputRoles*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IDevice::allocate not supported on 1.2 HAL service";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
161
neuralnetworks/1.2/utils/src/PreparedModel.cpp
Normal file
161
neuralnetworks/1.2/utils/src/PreparedModel.cpp
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
convertExecutionResultsHelper(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
|
||||
return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
|
||||
NN_TRY(validatedConvertToCanonical(timing)));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
|
||||
const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
|
||||
return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
|
||||
sp<V1_2::IPreparedModel> preparedModel) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_2::utils::PreparedModel::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
|
||||
return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(deathHandler));
|
||||
}
|
||||
|
||||
PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_2::IPreparedModel> preparedModel,
|
||||
hal::utils::DeathHandler deathHandler)
|
||||
: kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const {
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
|
||||
const Timing& timing) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
|
||||
} else {
|
||||
result = convertExecutionResults(outputShapes, timing);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kPreparedModel->executeSynchronously(request, measure, cb);
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming measure) const {
|
||||
const auto cb = sp<ExecutionCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kPreparedModel->execute_1_2(request, measure, cb);
|
||||
const auto status =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "execute failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
|
||||
const auto hidlRequest =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
const bool preferSynchronous = true;
|
||||
|
||||
// Execute synchronously if allowed.
|
||||
if (preferSynchronous) {
|
||||
result = executeSynchronously(hidlRequest, hidlMeasure);
|
||||
}
|
||||
|
||||
// Run asymchronous execution if execution has not already completed.
|
||||
if (!result.has_value()) {
|
||||
result = executeAsynchronously(hidlRequest, hidlMeasure);
|
||||
}
|
||||
|
||||
// Flush output buffers if suxcessful execution.
|
||||
if (result.has_value()) {
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
PreparedModel::executeFenced(
|
||||
const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
|
||||
const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
sp<V1_0::IPreparedModel> resource = kPreparedModel;
|
||||
return resource;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
41
neuralnetworks/1.2/utils/src/Service.cpp
Normal file
41
neuralnetworks/1.2/utils/src/Service.cpp
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Service.h"
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/ResilientDevice.h>
|
||||
#include <string>
|
||||
#include "Device.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
|
||||
hal::utils::ResilientDevice::Factory makeDevice =
|
||||
[name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
|
||||
auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
|
||||
if (service == nullptr) {
|
||||
return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
|
||||
}
|
||||
return Device::create(name, std::move(service));
|
||||
};
|
||||
|
||||
return hal::utils::ResilientDevice::create(std::move(makeDevice));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
@@ -20,6 +20,7 @@ cc_library_static {
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal/1.3/"],
|
||||
export_include_dirs: ["include"],
|
||||
cflags: ["-Wthread-safety"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
|
||||
52
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
Normal file
52
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.3/IBuffer.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <memory>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
class Buffer final : public nn::IBuffer {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Buffer>> create(
|
||||
sp<V1_3::IBuffer> buffer, nn::Request::MemoryDomainToken token);
|
||||
|
||||
Buffer(PrivateConstructorTag tag, sp<V1_3::IBuffer> buffer,
|
||||
nn::Request::MemoryDomainToken token);
|
||||
|
||||
nn::Request::MemoryDomainToken getToken() const override;
|
||||
|
||||
nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
|
||||
nn::GeneralResult<void> copyFrom(const nn::Memory& src,
|
||||
const nn::Dimensions& dimensions) const override;
|
||||
|
||||
private:
|
||||
const sp<V1_3::IBuffer> kBuffer;
|
||||
const nn::Request::MemoryDomainToken kToken;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H
|
||||
83
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
Normal file
83
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Callbacks.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
class PreparedModelCallback final : public IPreparedModelCallback,
|
||||
public hal::utils::IProtectedCallback {
|
||||
public:
|
||||
using Data = nn::GeneralResult<nn::SharedPreparedModel>;
|
||||
|
||||
Return<void> notify(V1_0::ErrorStatus status,
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) override;
|
||||
Return<void> notify_1_2(V1_0::ErrorStatus status,
|
||||
const sp<V1_2::IPreparedModel>& preparedModel) override;
|
||||
Return<void> notify_1_3(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
|
||||
|
||||
void notifyAsDeadObject() override;
|
||||
|
||||
Data get();
|
||||
|
||||
private:
|
||||
void notifyInternal(Data result);
|
||||
|
||||
hal::utils::TransferValue<Data> mData;
|
||||
};
|
||||
|
||||
class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
|
||||
public:
|
||||
using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
|
||||
|
||||
Return<void> notify(V1_0::ErrorStatus status) override;
|
||||
Return<void> notify_1_2(V1_0::ErrorStatus status,
|
||||
const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) override;
|
||||
Return<void> notify_1_3(ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) override;
|
||||
|
||||
void notifyAsDeadObject() override;
|
||||
|
||||
Data get();
|
||||
|
||||
private:
|
||||
void notifyInternal(Data result);
|
||||
|
||||
hal::utils::TransferValue<Data> mData;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H
|
||||
@@ -25,54 +25,54 @@
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
Result<OperandType> convert(const hal::V1_3::OperandType& operandType);
|
||||
Result<OperationType> convert(const hal::V1_3::OperationType& operationType);
|
||||
Result<Priority> convert(const hal::V1_3::Priority& priority);
|
||||
Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities);
|
||||
Result<Capabilities::OperandPerformance> convert(
|
||||
GeneralResult<OperandType> convert(const hal::V1_3::OperandType& operandType);
|
||||
GeneralResult<OperationType> convert(const hal::V1_3::OperationType& operationType);
|
||||
GeneralResult<Priority> convert(const hal::V1_3::Priority& priority);
|
||||
GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities);
|
||||
GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const hal::V1_3::Capabilities::OperandPerformance& operandPerformance);
|
||||
Result<Operation> convert(const hal::V1_3::Operation& operation);
|
||||
Result<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime);
|
||||
Result<Operand> convert(const hal::V1_3::Operand& operand);
|
||||
Result<Model> convert(const hal::V1_3::Model& model);
|
||||
Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph);
|
||||
Result<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc);
|
||||
Result<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole);
|
||||
Result<Request> convert(const hal::V1_3::Request& request);
|
||||
Result<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool);
|
||||
Result<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint);
|
||||
Result<OptionalTimeoutDuration> convert(
|
||||
GeneralResult<Operation> convert(const hal::V1_3::Operation& operation);
|
||||
GeneralResult<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime);
|
||||
GeneralResult<Operand> convert(const hal::V1_3::Operand& operand);
|
||||
GeneralResult<Model> convert(const hal::V1_3::Model& model);
|
||||
GeneralResult<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph);
|
||||
GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc);
|
||||
GeneralResult<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole);
|
||||
GeneralResult<Request> convert(const hal::V1_3::Request& request);
|
||||
GeneralResult<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool);
|
||||
GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint);
|
||||
GeneralResult<OptionalTimeoutDuration> convert(
|
||||
const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration);
|
||||
Result<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
|
||||
GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
|
||||
|
||||
Result<std::vector<BufferRole>> convert(
|
||||
GeneralResult<std::vector<BufferRole>> convert(
|
||||
const hardware::hidl_vec<hal::V1_3::BufferRole>& bufferRoles);
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
nn::Result<OperandType> convert(const nn::OperandType& operandType);
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::Result<Priority> convert(const nn::Priority& priority);
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::Result<Capabilities::OperandPerformance> convert(
|
||||
nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType);
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
|
||||
nn::GeneralResult<Priority> convert(const nn::Priority& priority);
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
|
||||
nn::GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const nn::Capabilities::OperandPerformance& operandPerformance);
|
||||
nn::Result<Operation> convert(const nn::Operation& operation);
|
||||
nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime);
|
||||
nn::Result<Operand> convert(const nn::Operand& operand);
|
||||
nn::Result<Model> convert(const nn::Model& model);
|
||||
nn::Result<Subgraph> convert(const nn::Model::Subgraph& subgraph);
|
||||
nn::Result<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
|
||||
nn::Result<BufferRole> convert(const nn::BufferRole& bufferRole);
|
||||
nn::Result<Request> convert(const nn::Request& request);
|
||||
nn::Result<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool);
|
||||
nn::Result<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint);
|
||||
nn::Result<OptionalTimeoutDuration> convert(
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation);
|
||||
nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime);
|
||||
nn::GeneralResult<Operand> convert(const nn::Operand& operand);
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model);
|
||||
nn::GeneralResult<Subgraph> convert(const nn::Model::Subgraph& subgraph);
|
||||
nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
|
||||
nn::GeneralResult<BufferRole> convert(const nn::BufferRole& bufferRole);
|
||||
nn::GeneralResult<Request> convert(const nn::Request& request);
|
||||
nn::GeneralResult<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool);
|
||||
nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint);
|
||||
nn::GeneralResult<OptionalTimeoutDuration> convert(
|
||||
const nn::OptionalTimeoutDuration& optionalTimeoutDuration);
|
||||
nn::Result<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
|
||||
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
|
||||
|
||||
nn::Result<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
|
||||
nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
|
||||
91
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
Normal file
91
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
class Device final : public nn::IDevice {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
|
||||
sp<V1_3::IDevice> device);
|
||||
|
||||
Device(PrivateConstructorTag tag, std::string name, std::string versionString,
|
||||
nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
|
||||
nn::Capabilities capabilities, std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
|
||||
sp<V1_3::IDevice> device, hal::utils::DeathHandler deathHandler);
|
||||
|
||||
const std::string& getName() const override;
|
||||
const std::string& getVersionString() const override;
|
||||
nn::Version getFeatureLevel() const override;
|
||||
nn::DeviceType getType() const override;
|
||||
const std::vector<nn::Extension>& getSupportedExtensions() const override;
|
||||
const nn::Capabilities& getCapabilities() const override;
|
||||
std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
|
||||
|
||||
nn::GeneralResult<void> wait() const override;
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> getSupportedOperations(
|
||||
const nn::Model& model) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const override;
|
||||
|
||||
private:
|
||||
const std::string kName;
|
||||
const std::string kVersionString;
|
||||
const nn::DeviceType kDeviceType;
|
||||
const std::vector<nn::Extension> kExtensions;
|
||||
const nn::Capabilities kCapabilities;
|
||||
const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
|
||||
const sp<V1_3::IDevice> kDevice;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H
|
||||
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
class PreparedModel final : public nn::IPreparedModel {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
|
||||
sp<V1_3::IPreparedModel> preparedModel);
|
||||
|
||||
PreparedModel(PrivateConstructorTag tag, sp<V1_3::IPreparedModel> preparedModel,
|
||||
hal::utils::DeathHandler deathHandler);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
|
||||
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
|
||||
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
private:
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
|
||||
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration) const;
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously(
|
||||
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration) const;
|
||||
|
||||
const sp<V1_3::IPreparedModel> kPreparedModel;
|
||||
const hal::utils::DeathHandler kDeathHandler;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H
|
||||
31
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h
Normal file
31
neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <string>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
@@ -35,10 +36,14 @@ constexpr auto kVersion = nn::Version::ANDROID_R;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<void> validate(const Type& halObject) {
|
||||
const auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeCanonical = nn::convert(halObject);
|
||||
if (!maybeCanonical.has_value()) {
|
||||
return nn::error() << maybeCanonical.error().message;
|
||||
}
|
||||
const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -55,9 +60,14 @@ bool valid(const Type& halObject) {
|
||||
template <typename Type>
|
||||
decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
|
||||
auto canonical = NN_TRY(nn::convert(halObject));
|
||||
const auto version = NN_TRY(nn::validate(canonical));
|
||||
const auto maybeVersion = nn::validate(canonical);
|
||||
if (!maybeVersion.has_value()) {
|
||||
return nn::error() << maybeVersion.error();
|
||||
}
|
||||
const auto version = maybeVersion.value();
|
||||
if (version > utils::kVersion) {
|
||||
return NN_ERROR() << "";
|
||||
return NN_ERROR() << "Insufficient version: " << version << " vs required "
|
||||
<< utils::kVersion;
|
||||
}
|
||||
return canonical;
|
||||
}
|
||||
|
||||
93
neuralnetworks/1.3/utils/src/Buffer.cpp
Normal file
93
neuralnetworks/1.3/utils/src/Buffer.cpp
Normal file
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Buffer.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IBuffer.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Buffer>> Buffer::create(
|
||||
sp<V1_3::IBuffer> buffer, nn::Request::MemoryDomainToken token) {
|
||||
if (buffer == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_3::utils::Buffer::create must have non-null buffer";
|
||||
}
|
||||
if (token == static_cast<nn::Request::MemoryDomainToken>(0)) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_3::utils::Buffer::create must have non-zero token";
|
||||
}
|
||||
|
||||
return std::make_shared<const Buffer>(PrivateConstructorTag{}, std::move(buffer), token);
|
||||
}
|
||||
|
||||
Buffer::Buffer(PrivateConstructorTag /*tag*/, sp<V1_3::IBuffer> buffer,
|
||||
nn::Request::MemoryDomainToken token)
|
||||
: kBuffer(std::move(buffer)), kToken(token) {
|
||||
CHECK(kBuffer != nullptr);
|
||||
CHECK(kToken != static_cast<nn::Request::MemoryDomainToken>(0));
|
||||
}
|
||||
|
||||
nn::Request::MemoryDomainToken Buffer::getToken() const {
|
||||
return kToken;
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> Buffer::copyTo(const nn::Memory& dst) const {
|
||||
const auto hidlDst = NN_TRY(V1_0::utils::convert(dst));
|
||||
|
||||
const auto ret = kBuffer->copyTo(hidlDst);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> Buffer::copyFrom(const nn::Memory& src,
|
||||
const nn::Dimensions& dimensions) const {
|
||||
const auto hidlSrc = NN_TRY(V1_0::utils::convert(src));
|
||||
const auto hidlDimensions = hidl_vec<uint32_t>(dimensions);
|
||||
|
||||
const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
184
neuralnetworks/1.3/utils/src/Callbacks.cpp
Normal file
184
neuralnetworks/1.3/utils/src/Callbacks.cpp
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
#include "Conversions.h"
|
||||
#include "PreparedModel.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/1.0/PreparedModel.h>
|
||||
#include <nnapi/hal/1.2/Conversions.h>
|
||||
#include <nnapi/hal/1.2/PreparedModel.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) {
|
||||
return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
||||
const sp<V1_2::IPreparedModel>& preparedModel) {
|
||||
return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
return NN_TRY(utils::PreparedModel::create(preparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) {
|
||||
return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
|
||||
NN_TRY(validatedConvertToCanonical(timing)));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
convertExecutionGeneralResults(const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) {
|
||||
return hal::utils::makeExecutionFailure(
|
||||
convertExecutionGeneralResultsHelper(outputShapes, timing));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
|
||||
const sp<V1_0::IPreparedModel>& preparedModel) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
||||
} else if (preparedModel == nullptr) {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Returned preparedModel is nullptr");
|
||||
} else {
|
||||
notifyInternal(convertPreparedModel(preparedModel));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
|
||||
const sp<V1_2::IPreparedModel>& preparedModel) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
||||
} else if (preparedModel == nullptr) {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Returned preparedModel is nullptr");
|
||||
} else {
|
||||
notifyInternal(convertPreparedModel(preparedModel));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> PreparedModelCallback::notify_1_3(ErrorStatus status,
|
||||
const sp<IPreparedModel>& preparedModel) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
||||
} else if (preparedModel == nullptr) {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Returned preparedModel is nullptr");
|
||||
} else {
|
||||
notifyInternal(convertPreparedModel(preparedModel));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::notifyAsDeadObject() {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
||||
}
|
||||
|
||||
PreparedModelCallback::Data PreparedModelCallback::get() {
|
||||
return mData.take();
|
||||
}
|
||||
|
||||
void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
|
||||
mData.put(std::move(result));
|
||||
}
|
||||
|
||||
// ExecutionCallback methods begin here
|
||||
|
||||
Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
||||
} else {
|
||||
notifyInternal({});
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
|
||||
const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) {
|
||||
if (status != V1_0::ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
||||
} else {
|
||||
notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<void> ExecutionCallback::notify_1_3(ErrorStatus status,
|
||||
const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
||||
} else {
|
||||
notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
void ExecutionCallback::notifyAsDeadObject() {
|
||||
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
||||
}
|
||||
|
||||
ExecutionCallback::Data ExecutionCallback::get() {
|
||||
return mData.take();
|
||||
}
|
||||
|
||||
void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
|
||||
mData.put(std::move(result));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/1.2/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
@@ -79,7 +80,7 @@ template <typename Input>
|
||||
using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
|
||||
GeneralResult<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
|
||||
std::vector<ConvertOutput<Type>> canonical;
|
||||
canonical.reserve(arguments.size());
|
||||
for (const auto& argument : arguments) {
|
||||
@@ -89,25 +90,25 @@ Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& argume
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
|
||||
return convertVec(arguments);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Result<OperandType> convert(const hal::V1_3::OperandType& operandType) {
|
||||
GeneralResult<OperandType> convert(const hal::V1_3::OperandType& operandType) {
|
||||
return static_cast<OperandType>(operandType);
|
||||
}
|
||||
|
||||
Result<OperationType> convert(const hal::V1_3::OperationType& operationType) {
|
||||
GeneralResult<OperationType> convert(const hal::V1_3::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
Result<Priority> convert(const hal::V1_3::Priority& priority) {
|
||||
GeneralResult<Priority> convert(const hal::V1_3::Priority& priority) {
|
||||
return static_cast<Priority>(priority);
|
||||
}
|
||||
|
||||
Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
|
||||
GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
|
||||
const bool validOperandTypes = std::all_of(
|
||||
capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
|
||||
[](const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
|
||||
@@ -115,13 +116,14 @@ Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
|
||||
return !maybeType.has_value() ? false : validOperandType(maybeType.value());
|
||||
});
|
||||
if (!validOperandTypes) {
|
||||
return NN_ERROR()
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid OperandType when converting OperandPerformance in Capabilities";
|
||||
}
|
||||
|
||||
auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance));
|
||||
auto table =
|
||||
NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
|
||||
auto table = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
|
||||
nn::ErrorStatus::GENERAL_FAILURE));
|
||||
|
||||
return Capabilities{
|
||||
.relaxedFloat32toFloat16PerformanceScalar =
|
||||
@@ -134,7 +136,7 @@ Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Capabilities::OperandPerformance> convert(
|
||||
GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
|
||||
return Capabilities::OperandPerformance{
|
||||
.type = NN_TRY(convert(operandPerformance.type)),
|
||||
@@ -142,7 +144,7 @@ Result<Capabilities::OperandPerformance> convert(
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operation> convert(const hal::V1_3::Operation& operation) {
|
||||
GeneralResult<Operation> convert(const hal::V1_3::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -150,11 +152,11 @@ Result<Operation> convert(const hal::V1_3::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime) {
|
||||
GeneralResult<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime) {
|
||||
return static_cast<Operand::LifeTime>(operandLifeTime);
|
||||
}
|
||||
|
||||
Result<Operand> convert(const hal::V1_3::Operand& operand) {
|
||||
GeneralResult<Operand> convert(const hal::V1_3::Operand& operand) {
|
||||
return Operand{
|
||||
.type = NN_TRY(convert(operand.type)),
|
||||
.dimensions = operand.dimensions,
|
||||
@@ -166,7 +168,7 @@ Result<Operand> convert(const hal::V1_3::Operand& operand) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Model> convert(const hal::V1_3::Model& model) {
|
||||
GeneralResult<Model> convert(const hal::V1_3::Model& model) {
|
||||
return Model{
|
||||
.main = NN_TRY(convert(model.main)),
|
||||
.referenced = NN_TRY(convert(model.referenced)),
|
||||
@@ -177,7 +179,7 @@ Result<Model> convert(const hal::V1_3::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
|
||||
GeneralResult<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
|
||||
auto operations = NN_TRY(convert(subgraph.operations));
|
||||
|
||||
// Verify number of consumers.
|
||||
@@ -186,9 +188,10 @@ Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
|
||||
CHECK(subgraph.operands.size() == numberOfConsumers.size());
|
||||
for (size_t i = 0; i < subgraph.operands.size(); ++i) {
|
||||
if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) {
|
||||
return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found "
|
||||
<< subgraph.operands[i].numberOfConsumers;
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid numberOfConsumers for operand " << i << ", expected "
|
||||
<< numberOfConsumers[i] << " but found "
|
||||
<< subgraph.operands[i].numberOfConsumers;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,11 +203,11 @@ Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) {
|
||||
GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) {
|
||||
return BufferDesc{.dimensions = bufferDesc.dimensions};
|
||||
}
|
||||
|
||||
Result<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
|
||||
GeneralResult<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
|
||||
return BufferRole{
|
||||
.modelIndex = bufferRole.modelIndex,
|
||||
.ioIndex = bufferRole.ioIndex,
|
||||
@@ -212,7 +215,7 @@ Result<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Request> convert(const hal::V1_3::Request& request) {
|
||||
GeneralResult<Request> convert(const hal::V1_3::Request& request) {
|
||||
return Request{
|
||||
.inputs = NN_TRY(convert(request.inputs)),
|
||||
.outputs = NN_TRY(convert(request.outputs)),
|
||||
@@ -220,7 +223,7 @@ Result<Request> convert(const hal::V1_3::Request& request) {
|
||||
};
|
||||
}
|
||||
|
||||
Result<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool) {
|
||||
GeneralResult<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool) {
|
||||
using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
|
||||
switch (memoryPool.getDiscriminator()) {
|
||||
case Discriminator::hidlMemory:
|
||||
@@ -228,15 +231,16 @@ Result<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memory
|
||||
case Discriminator::token:
|
||||
return static_cast<Request::MemoryDomainToken>(memoryPool.token());
|
||||
}
|
||||
return NN_ERROR() << "Invalid Request::MemoryPool discriminator "
|
||||
<< underlyingType(memoryPool.getDiscriminator());
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid Request::MemoryPool discriminator "
|
||||
<< underlyingType(memoryPool.getDiscriminator());
|
||||
}
|
||||
|
||||
Result<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
|
||||
GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
|
||||
constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count();
|
||||
const auto makeTimePoint = [](uint64_t count) -> Result<OptionalTimePoint> {
|
||||
const auto makeTimePoint = [](uint64_t count) -> GeneralResult<OptionalTimePoint> {
|
||||
if (count > kTimePointMaxCount) {
|
||||
return NN_ERROR()
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Unable to convert OptionalTimePoint because the count exceeds the max";
|
||||
}
|
||||
const auto nanoseconds = std::chrono::nanoseconds{count};
|
||||
@@ -250,16 +254,17 @@ Result<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTi
|
||||
case Discriminator::nanosecondsSinceEpoch:
|
||||
return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch());
|
||||
}
|
||||
return NN_ERROR() << "Invalid OptionalTimePoint discriminator "
|
||||
<< underlyingType(optionalTimePoint.getDiscriminator());
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid OptionalTimePoint discriminator "
|
||||
<< underlyingType(optionalTimePoint.getDiscriminator());
|
||||
}
|
||||
|
||||
Result<OptionalTimeoutDuration> convert(
|
||||
GeneralResult<OptionalTimeoutDuration> convert(
|
||||
const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
|
||||
constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count();
|
||||
const auto makeTimeoutDuration = [](uint64_t count) -> Result<OptionalTimeoutDuration> {
|
||||
const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult<OptionalTimeoutDuration> {
|
||||
if (count > kTimeoutDurationMaxCount) {
|
||||
return NN_ERROR()
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Unable to convert OptionalTimeoutDuration because the count exceeds the max";
|
||||
}
|
||||
return TimeoutDuration{count};
|
||||
@@ -272,11 +277,12 @@ Result<OptionalTimeoutDuration> convert(
|
||||
case Discriminator::nanoseconds:
|
||||
return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds());
|
||||
}
|
||||
return NN_ERROR() << "Invalid OptionalTimeoutDuration discriminator "
|
||||
<< underlyingType(optionalTimeoutDuration.getDiscriminator());
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid OptionalTimeoutDuration discriminator "
|
||||
<< underlyingType(optionalTimeoutDuration.getDiscriminator());
|
||||
}
|
||||
|
||||
Result<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
|
||||
GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
|
||||
switch (status) {
|
||||
case hal::V1_3::ErrorStatus::NONE:
|
||||
case hal::V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
|
||||
@@ -289,10 +295,11 @@ Result<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
|
||||
case hal::V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
|
||||
return static_cast<ErrorStatus>(status);
|
||||
}
|
||||
return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Invalid ErrorStatus " << underlyingType(status);
|
||||
}
|
||||
|
||||
Result<std::vector<BufferRole>> convert(
|
||||
GeneralResult<std::vector<BufferRole>> convert(
|
||||
const hardware::hidl_vec<hal::V1_3::BufferRole>& bufferRoles) {
|
||||
return convertVec(bufferRoles);
|
||||
}
|
||||
@@ -304,32 +311,32 @@ namespace {
|
||||
|
||||
using utils::convert;
|
||||
|
||||
nn::Result<V1_0::PerformanceInfo> convert(
|
||||
nn::GeneralResult<V1_0::PerformanceInfo> convert(
|
||||
const nn::Capabilities::PerformanceInfo& performanceInfo) {
|
||||
return V1_0::utils::convert(performanceInfo);
|
||||
}
|
||||
|
||||
nn::Result<V1_0::DataLocation> convert(const nn::DataLocation& dataLocation) {
|
||||
nn::GeneralResult<V1_0::DataLocation> convert(const nn::DataLocation& dataLocation) {
|
||||
return V1_0::utils::convert(dataLocation);
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
|
||||
return V1_0::utils::convert(operandValues);
|
||||
}
|
||||
|
||||
nn::Result<hidl_memory> convert(const nn::Memory& memory) {
|
||||
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
|
||||
return V1_0::utils::convert(memory);
|
||||
}
|
||||
|
||||
nn::Result<V1_0::RequestArgument> convert(const nn::Request::Argument& argument) {
|
||||
nn::GeneralResult<V1_0::RequestArgument> convert(const nn::Request::Argument& argument) {
|
||||
return V1_0::utils::convert(argument);
|
||||
}
|
||||
|
||||
nn::Result<V1_2::Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
|
||||
nn::GeneralResult<V1_2::Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
|
||||
return V1_2::utils::convert(extraParams);
|
||||
}
|
||||
|
||||
nn::Result<V1_2::Model::ExtensionNameAndPrefix> convert(
|
||||
nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> convert(
|
||||
const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
|
||||
return V1_2::utils::convert(extensionNameAndPrefix);
|
||||
}
|
||||
@@ -338,7 +345,7 @@ template <typename Input>
|
||||
using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
|
||||
nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
|
||||
hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
|
||||
for (size_t i = 0; i < arguments.size(); ++i) {
|
||||
halObject[i] = NN_TRY(convert(arguments[i]));
|
||||
@@ -347,42 +354,41 @@ nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& ar
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
|
||||
return convertVec(arguments);
|
||||
}
|
||||
|
||||
nn::Result<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
|
||||
nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
|
||||
Request::MemoryPool ret;
|
||||
ret.hidlMemory(NN_TRY(convert(memory)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
nn::Result<Request::MemoryPool> makeMemoryPool(const nn::Request::MemoryDomainToken& token) {
|
||||
nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Request::MemoryDomainToken& token) {
|
||||
Request::MemoryPool ret;
|
||||
ret.token(underlyingType(token));
|
||||
return ret;
|
||||
}
|
||||
|
||||
nn::Result<Request::MemoryPool> makeMemoryPool(
|
||||
const std::shared_ptr<const nn::IBuffer>& /*buffer*/) {
|
||||
return NN_ERROR() << "Unable to make memory pool from IBuffer";
|
||||
nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::SharedBuffer& /*buffer*/) {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to make memory pool from IBuffer";
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
nn::Result<OperandType> convert(const nn::OperandType& operandType) {
|
||||
nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
|
||||
return static_cast<OperandType>(operandType);
|
||||
}
|
||||
|
||||
nn::Result<OperationType> convert(const nn::OperationType& operationType) {
|
||||
nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
|
||||
return static_cast<OperationType>(operationType);
|
||||
}
|
||||
|
||||
nn::Result<Priority> convert(const nn::Priority& priority) {
|
||||
nn::GeneralResult<Priority> convert(const nn::Priority& priority) {
|
||||
return static_cast<Priority>(priority);
|
||||
}
|
||||
|
||||
nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
|
||||
operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
|
||||
std::copy_if(capabilities.operandPerformance.asVector().begin(),
|
||||
@@ -403,7 +409,7 @@ nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Capabilities::OperandPerformance> convert(
|
||||
nn::GeneralResult<Capabilities::OperandPerformance> convert(
|
||||
const nn::Capabilities::OperandPerformance& operandPerformance) {
|
||||
return Capabilities::OperandPerformance{
|
||||
.type = NN_TRY(convert(operandPerformance.type)),
|
||||
@@ -411,7 +417,7 @@ nn::Result<Capabilities::OperandPerformance> convert(
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
|
||||
return Operation{
|
||||
.type = NN_TRY(convert(operation.type)),
|
||||
.inputs = operation.inputs,
|
||||
@@ -419,14 +425,15 @@ nn::Result<Operation> convert(const nn::Operation& operation) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime) {
|
||||
nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime) {
|
||||
if (operandLifeTime == nn::Operand::LifeTime::POINTER) {
|
||||
return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Model cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
return static_cast<OperandLifeTime>(operandLifeTime);
|
||||
}
|
||||
|
||||
nn::Result<Operand> convert(const nn::Operand& operand) {
|
||||
nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
|
||||
return Operand{
|
||||
.type = NN_TRY(convert(operand.type)),
|
||||
.dimensions = operand.dimensions,
|
||||
@@ -439,9 +446,10 @@ nn::Result<Operand> convert(const nn::Operand& operand) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Model> convert(const nn::Model& model) {
|
||||
nn::GeneralResult<Model> convert(const nn::Model& model) {
|
||||
if (!hal::utils::hasNoPointerData(model)) {
|
||||
return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Model cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
|
||||
return Model{
|
||||
@@ -454,7 +462,7 @@ nn::Result<Model> convert(const nn::Model& model) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
|
||||
nn::GeneralResult<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
|
||||
auto operands = NN_TRY(convert(subgraph.operands));
|
||||
|
||||
// Update number of consumers.
|
||||
@@ -473,11 +481,11 @@ nn::Result<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
|
||||
nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
|
||||
return BufferDesc{.dimensions = bufferDesc.dimensions};
|
||||
}
|
||||
|
||||
nn::Result<BufferRole> convert(const nn::BufferRole& bufferRole) {
|
||||
nn::GeneralResult<BufferRole> convert(const nn::BufferRole& bufferRole) {
|
||||
return BufferRole{
|
||||
.modelIndex = bufferRole.modelIndex,
|
||||
.ioIndex = bufferRole.ioIndex,
|
||||
@@ -485,9 +493,10 @@ nn::Result<BufferRole> convert(const nn::BufferRole& bufferRole) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Request> convert(const nn::Request& request) {
|
||||
nn::GeneralResult<Request> convert(const nn::Request& request) {
|
||||
if (!hal::utils::hasNoPointerData(request)) {
|
||||
return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Request cannot be converted because it contains pointer-based memory";
|
||||
}
|
||||
|
||||
return Request{
|
||||
@@ -497,30 +506,31 @@ nn::Result<Request> convert(const nn::Request& request) {
|
||||
};
|
||||
}
|
||||
|
||||
nn::Result<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool) {
|
||||
nn::GeneralResult<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool) {
|
||||
return std::visit([](const auto& o) { return makeMemoryPool(o); }, memoryPool);
|
||||
}
|
||||
|
||||
nn::Result<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint) {
|
||||
nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint) {
|
||||
OptionalTimePoint ret;
|
||||
if (optionalTimePoint.has_value()) {
|
||||
const auto count = optionalTimePoint.value().time_since_epoch().count();
|
||||
if (count < 0) {
|
||||
return NN_ERROR() << "Unable to convert OptionalTimePoint because time since epoch "
|
||||
"count is negative";
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Unable to convert OptionalTimePoint because time since epoch count is "
|
||||
"negative";
|
||||
}
|
||||
ret.nanosecondsSinceEpoch(count);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
nn::Result<OptionalTimeoutDuration> convert(
|
||||
nn::GeneralResult<OptionalTimeoutDuration> convert(
|
||||
const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
|
||||
OptionalTimeoutDuration ret;
|
||||
if (optionalTimeoutDuration.has_value()) {
|
||||
const auto count = optionalTimeoutDuration.value().count();
|
||||
if (count < 0) {
|
||||
return NN_ERROR()
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Unable to convert OptionalTimeoutDuration because count is negative";
|
||||
}
|
||||
ret.nanoseconds(count);
|
||||
@@ -528,7 +538,7 @@ nn::Result<OptionalTimeoutDuration> convert(
|
||||
return ret;
|
||||
}
|
||||
|
||||
nn::Result<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
|
||||
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
|
||||
switch (errorStatus) {
|
||||
case nn::ErrorStatus::NONE:
|
||||
case nn::ErrorStatus::DEVICE_UNAVAILABLE:
|
||||
@@ -545,7 +555,7 @@ nn::Result<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
nn::Result<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
|
||||
nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
|
||||
return convertVec(bufferRoles);
|
||||
}
|
||||
|
||||
|
||||
269
neuralnetworks/1.3/utils/src/Device.cpp
Normal file
269
neuralnetworks/1.3/utils/src/Device.cpp
Normal file
@@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Device.h"
|
||||
|
||||
#include "Buffer.h"
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "PreparedModel.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/OperandTypes.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.1/Conversions.h>
|
||||
#include <nnapi/hal/1.2/Conversions.h>
|
||||
#include <nnapi/hal/1.2/Device.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <any>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<hidl_vec<sp<IPreparedModel>>> convert(
|
||||
const std::vector<nn::SharedPreparedModel>& preparedModels) {
|
||||
hidl_vec<sp<IPreparedModel>> hidlPreparedModels(preparedModels.size());
|
||||
for (size_t i = 0; i < preparedModels.size(); ++i) {
|
||||
std::any underlyingResource = preparedModels[i]->getUnderlyingResource();
|
||||
if (const auto* hidlPreparedModel =
|
||||
std::any_cast<sp<IPreparedModel>>(&underlyingResource)) {
|
||||
hidlPreparedModels[i] = *hidlPreparedModel;
|
||||
} else {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "Unable to convert from nn::IPreparedModel to V1_3::IPreparedModel";
|
||||
}
|
||||
}
|
||||
return hidlPreparedModels;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> convert(
|
||||
nn::GeneralResult<std::shared_ptr<const Buffer>> result) {
|
||||
return NN_TRY(std::move(result));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
|
||||
sp<V1_3::IDevice> device) {
|
||||
if (name.empty()) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_3::utils::Device::create must have non-empty name";
|
||||
}
|
||||
if (device == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_3::utils::Device::create must have non-null device";
|
||||
}
|
||||
|
||||
auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get()));
|
||||
const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get()));
|
||||
auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get()));
|
||||
auto capabilities = NN_TRY(V1_2::utils::initCapabilities(device.get()));
|
||||
const auto numberOfCacheFilesNeeded =
|
||||
NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get()));
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
|
||||
return std::make_shared<const Device>(
|
||||
PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType,
|
||||
std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
|
||||
std::move(device), std::move(deathHandler));
|
||||
}
|
||||
|
||||
Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString,
|
||||
nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
|
||||
nn::Capabilities capabilities,
|
||||
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, sp<V1_3::IDevice> device,
|
||||
hal::utils::DeathHandler deathHandler)
|
||||
: kName(std::move(name)),
|
||||
kVersionString(std::move(versionString)),
|
||||
kDeviceType(deviceType),
|
||||
kExtensions(std::move(extensions)),
|
||||
kCapabilities(std::move(capabilities)),
|
||||
kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded),
|
||||
kDevice(std::move(device)),
|
||||
kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
const std::string& Device::getName() const {
|
||||
return kName;
|
||||
}
|
||||
|
||||
const std::string& Device::getVersionString() const {
|
||||
return kVersionString;
|
||||
}
|
||||
|
||||
nn::Version Device::getFeatureLevel() const {
|
||||
return nn::Version::ANDROID_R;
|
||||
}
|
||||
|
||||
nn::DeviceType Device::getType() const {
|
||||
return kDeviceType;
|
||||
}
|
||||
|
||||
const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
|
||||
return kExtensions;
|
||||
}
|
||||
|
||||
const nn::Capabilities& Device::getCapabilities() const {
|
||||
return kCapabilities;
|
||||
}
|
||||
|
||||
std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
|
||||
return kNumberOfCacheFilesNeeded;
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> Device::wait() const {
|
||||
const auto ret = kDevice->ping();
|
||||
return hal::utils::handleTransportError(ret);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical)
|
||||
<< "IDevice::getSupportedOperations_1_3 failed with " << toString(status);
|
||||
} else if (supportedOperations.size() != model.main.operations.size()) {
|
||||
result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IDevice::getSupportedOperations_1_3 returned vector of size "
|
||||
<< supportedOperations.size() << " but expected "
|
||||
<< model.main.operations.size();
|
||||
} else {
|
||||
result = supportedOperations;
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
// Ensure that model is ready for IPC.
|
||||
std::optional<nn::Model> maybeModelInShared;
|
||||
const nn::Model& modelInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
|
||||
|
||||
const auto hidlModel = NN_TRY(convert(modelInShared));
|
||||
const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
|
||||
const auto hidlPriority = NN_TRY(convert(priority));
|
||||
const auto hidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
|
||||
const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
|
||||
const auto hidlToken = token;
|
||||
|
||||
const auto cb = sp<PreparedModelCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret =
|
||||
kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
|
||||
hidlModelCache, hidlDataCache, hidlToken, cb);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
const auto hidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
|
||||
const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
|
||||
const auto hidlToken = token;
|
||||
|
||||
const auto cb = sp<PreparedModelCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
|
||||
hidlToken, cb);
|
||||
const auto status = NN_TRY(hal::utils::handleTransportError(ret));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> Device::allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const {
|
||||
const auto hidlDesc = NN_TRY(convert(desc));
|
||||
const auto hidlPreparedModels = NN_TRY(convert(preparedModels));
|
||||
const auto hidlInputRoles = NN_TRY(convert(inputRoles));
|
||||
const auto hidlOutputRoles = NN_TRY(convert(outputRoles));
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "uninitialized";
|
||||
auto cb = [&result](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status);
|
||||
} else if (buffer == nullptr) {
|
||||
result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr";
|
||||
} else if (token == 0) {
|
||||
result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)";
|
||||
} else {
|
||||
result = convert(
|
||||
Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token)));
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret =
|
||||
kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
267
neuralnetworks/1.3/utils/src/PreparedModel.cpp
Normal file
267
neuralnetworks/1.3/utils/src/PreparedModel.cpp
Normal file
@@ -0,0 +1,267 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "PreparedModel.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Conversions.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/types.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.2/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
namespace {
|
||||
|
||||
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
convertExecutionResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) {
|
||||
return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
|
||||
NN_TRY(validatedConvertToCanonical(timing)));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
|
||||
const hidl_vec<V1_2::OutputShape>& outputShapes, const V1_2::Timing& timing) {
|
||||
return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
|
||||
}
|
||||
|
||||
nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
|
||||
const std::vector<nn::SyncFence>& syncFences) {
|
||||
hidl_vec<hidl_handle> handles(syncFences.size());
|
||||
for (size_t i = 0; i < syncFences.size(); ++i) {
|
||||
handles[i] = NN_TRY(V1_2::utils::convert(syncFences[i].getHandle()));
|
||||
}
|
||||
return handles;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
|
||||
const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
|
||||
return std::make_pair(NN_TRY(validatedConvertToCanonical(timingLaunched)),
|
||||
NN_TRY(validatedConvertToCanonical(timingFenced)));
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
convertExecuteFencedResults(const hidl_handle& syncFence,
|
||||
const sp<IFencedExecutionCallback>& callback) {
|
||||
auto resultSyncFence = nn::SyncFence::createAsSignaled();
|
||||
if (syncFence.getNativeHandle() != nullptr) {
|
||||
auto nativeHandle = NN_TRY(validatedConvertToCanonical(syncFence));
|
||||
resultSyncFence = NN_TRY(hal::utils::makeGeneralFailure(
|
||||
nn::SyncFence::create(std::move(nativeHandle)), nn::ErrorStatus::GENERAL_FAILURE));
|
||||
}
|
||||
|
||||
if (callback == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null";
|
||||
}
|
||||
|
||||
// Create callback which can be used to retrieve the execution error status and timings.
|
||||
nn::ExecuteFencedInfoCallback resultCallback =
|
||||
[callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
|
||||
nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched,
|
||||
const V1_2::Timing& timingFenced) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical = validatedConvertToCanonical(status).value_or(
|
||||
nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status);
|
||||
} else {
|
||||
result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = callback->getExecutionInfo(cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
|
||||
sp<V1_3::IPreparedModel> preparedModel) {
|
||||
if (preparedModel == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "V1_3::utils::PreparedModel::create must have non-null preparedModel";
|
||||
}
|
||||
|
||||
auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
|
||||
return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
|
||||
std::move(deathHandler));
|
||||
}
|
||||
|
||||
PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_3::IPreparedModel> preparedModel,
|
||||
hal::utils::DeathHandler deathHandler)
|
||||
: kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure,
|
||||
const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration) const {
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
const auto cb = [&result](ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
|
||||
const V1_2::Timing& timing) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
|
||||
} else {
|
||||
result = convertExecutionResults(outputShapes, timing);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline,
|
||||
loopTimeoutDuration, cb);
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming measure,
|
||||
const OptionalTimePoint& deadline,
|
||||
const OptionalTimeoutDuration& loopTimeoutDuration) const {
|
||||
const auto cb = sp<ExecutionCallback>::make();
|
||||
const auto scoped = kDeathHandler.protectCallback(cb.get());
|
||||
|
||||
const auto ret =
|
||||
kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
|
||||
const auto status =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status);
|
||||
}
|
||||
|
||||
return cb->get();
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
|
||||
|
||||
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
|
||||
const auto hidlMeasure =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure)));
|
||||
const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
const auto hidlLoopTimeoutDuration =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
const bool preferSynchronous = true;
|
||||
|
||||
// Execute synchronously if allowed.
|
||||
if (preferSynchronous) {
|
||||
result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
|
||||
hidlLoopTimeoutDuration);
|
||||
}
|
||||
|
||||
// Run asymchronous execution if execution has not already completed.
|
||||
if (!result.has_value()) {
|
||||
result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
|
||||
hidlLoopTimeoutDuration);
|
||||
}
|
||||
|
||||
// Flush output buffers if suxcessful execution.
|
||||
if (result.has_value()) {
|
||||
NN_TRY(hal::utils::makeExecutionFailure(
|
||||
hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
|
||||
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
|
||||
|
||||
const auto hidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
|
||||
const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure));
|
||||
const auto hidlDeadline = NN_TRY(convert(deadline));
|
||||
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> result =
|
||||
NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
|
||||
auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence,
|
||||
const sp<IFencedExecutionCallback>& callback) {
|
||||
if (status != ErrorStatus::NONE) {
|
||||
const auto canonical =
|
||||
validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
||||
result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status);
|
||||
} else {
|
||||
result = convertExecuteFencedResults(syncFence, callback);
|
||||
}
|
||||
};
|
||||
|
||||
const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
|
||||
hidlDeadline, hidlLoopTimeoutDuration,
|
||||
hidlTimeoutDurationAfterFence, cb);
|
||||
NN_TRY(hal::utils::handleTransportError(ret));
|
||||
auto [syncFence, callback] = NN_TRY(std::move(result));
|
||||
|
||||
// If executeFenced required the request memory to be moved into shared memory, block here until
|
||||
// the fenced execution has completed and flush the memory back.
|
||||
if (maybeRequestInShared.has_value()) {
|
||||
const auto state = syncFence.syncWait({});
|
||||
if (state != nn::SyncFence::FenceState::SIGNALED) {
|
||||
return NN_ERROR() << "syncWait failed with " << state;
|
||||
}
|
||||
NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
|
||||
}
|
||||
|
||||
return std::make_pair(std::move(syncFence), std::move(callback));
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
sp<V1_3::IPreparedModel> resource = kPreparedModel;
|
||||
return resource;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
41
neuralnetworks/1.3/utils/src/Service.cpp
Normal file
41
neuralnetworks/1.3/utils/src/Service.cpp
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Service.h"
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/ResilientDevice.h>
|
||||
#include <string>
|
||||
#include "Device.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_3::utils {
|
||||
|
||||
nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
|
||||
hal::utils::ResilientDevice::Factory makeDevice =
|
||||
[name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
|
||||
auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
|
||||
if (service == nullptr) {
|
||||
return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
|
||||
}
|
||||
return Device::create(name, std::move(service));
|
||||
};
|
||||
|
||||
return hal::utils::ResilientDevice::create(std::move(makeDevice));
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_3::utils
|
||||
@@ -20,6 +20,7 @@ cc_library_static {
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal"],
|
||||
export_include_dirs: ["include"],
|
||||
cflags: ["-Wthread-safety"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
],
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
// Shorthand
|
||||
@@ -42,14 +43,16 @@ bool hasNoPointerData(const nn::Model& model);
|
||||
bool hasNoPointerData(const nn::Request& request);
|
||||
|
||||
// Relocate pointer-based data to shared memory.
|
||||
nn::Result<nn::Model> flushDataFromPointerToShared(const nn::Model& model);
|
||||
nn::Result<nn::Request> flushDataFromPointerToShared(const nn::Request& request);
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
|
||||
const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
|
||||
const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
|
||||
|
||||
// Undoes `flushDataFromPointerToShared` on a Request object. More specifically,
|
||||
// `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed
|
||||
// Request object back to the output pointer-based memory in the original Request object.
|
||||
nn::Result<void> unflushDataFromSharedToPointer(const nn::Request& request,
|
||||
const nn::Request& requestInShared);
|
||||
nn::GeneralResult<void> unflushDataFromSharedToPointer(
|
||||
const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared);
|
||||
|
||||
std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
|
||||
const std::vector<nn::Operation>& operations);
|
||||
|
||||
101
neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
Normal file
101
neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <android/hidl/base/1.0/IBase.h>
|
||||
#include <hidl/HidlSupport.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<Type> handleTransportError(const hardware::Return<Type>& ret) {
|
||||
if (ret.isDeadObject()) {
|
||||
return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT)
|
||||
<< "Return<>::isDeadObject returned true: " << ret.description();
|
||||
}
|
||||
if (!ret.isOk()) {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Return<>::isOk returned false: " << ret.description();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline nn::GeneralResult<void> handleTransportError(const hardware::Return<void>& ret) {
|
||||
if (ret.isDeadObject()) {
|
||||
return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT)
|
||||
<< "Return<>::isDeadObject returned true: " << ret.description();
|
||||
}
|
||||
if (!ret.isOk()) {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "Return<>::isOk returned false: " << ret.description();
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::GeneralResult<Type> makeGeneralFailure(nn::Result<Type> result, nn::ErrorStatus status) {
|
||||
if (!result.has_value()) {
|
||||
return nn::error(status) << std::move(result).error();
|
||||
}
|
||||
return std::move(result).value();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline nn::GeneralResult<void> makeGeneralFailure(nn::Result<void> result, nn::ErrorStatus status) {
|
||||
if (!result.has_value()) {
|
||||
return nn::error(status) << std::move(result).error();
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::ExecutionResult<Type> makeExecutionFailure(nn::Result<Type> result, nn::ErrorStatus status) {
|
||||
if (!result.has_value()) {
|
||||
return nn::error(status) << std::move(result).error();
|
||||
}
|
||||
return std::move(result).value();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline nn::ExecutionResult<void> makeExecutionFailure(nn::Result<void> result,
|
||||
nn::ErrorStatus status) {
|
||||
if (!result.has_value()) {
|
||||
return nn::error(status) << std::move(result).error();
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
nn::ExecutionResult<Type> makeExecutionFailure(nn::GeneralResult<Type> result) {
|
||||
if (!result.has_value()) {
|
||||
const auto [message, status] = std::move(result).error();
|
||||
return nn::error(status) << message;
|
||||
}
|
||||
return std::move(result).value();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline nn::ExecutionResult<void> makeExecutionFailure(nn::GeneralResult<void> result) {
|
||||
if (!result.has_value()) {
|
||||
const auto [message, status] = std::move(result).error();
|
||||
return nn::error(status) << message;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
|
||||
|
||||
#include <android-base/scopeguard.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <android/hidl/base/1.0/IBase.h>
|
||||
#include <hidl/HidlSupport.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
class IProtectedCallback {
|
||||
public:
|
||||
/**
|
||||
* Marks this object as a dead object.
|
||||
*/
|
||||
virtual void notifyAsDeadObject() = 0;
|
||||
|
||||
// Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers.
|
||||
// E.g., std::unique_ptr<IProtectedCallback>.
|
||||
virtual ~IProtectedCallback() = default;
|
||||
|
||||
protected:
|
||||
// Protect the non-destructor special member functions to prevent object slicing.
|
||||
IProtectedCallback() = default;
|
||||
IProtectedCallback(const IProtectedCallback&) = default;
|
||||
IProtectedCallback(IProtectedCallback&&) noexcept = default;
|
||||
IProtectedCallback& operator=(const IProtectedCallback&) = default;
|
||||
IProtectedCallback& operator=(IProtectedCallback&&) noexcept = default;
|
||||
};
|
||||
|
||||
// Thread safe class
|
||||
class DeathRecipient final : public hidl_death_recipient {
|
||||
public:
|
||||
void serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) override;
|
||||
// Precondition: `killable` must be non-null.
|
||||
void add(IProtectedCallback* killable) const;
|
||||
// Precondition: `killable` must be non-null.
|
||||
void remove(IProtectedCallback* killable) const;
|
||||
|
||||
private:
|
||||
mutable std::mutex mMutex;
|
||||
mutable std::vector<IProtectedCallback*> mObjects GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
class DeathHandler final {
|
||||
public:
|
||||
static nn::GeneralResult<DeathHandler> create(sp<hidl::base::V1_0::IBase> object);
|
||||
|
||||
DeathHandler(const DeathHandler&) = delete;
|
||||
DeathHandler(DeathHandler&&) noexcept = default;
|
||||
DeathHandler& operator=(const DeathHandler&) = delete;
|
||||
DeathHandler& operator=(DeathHandler&&) noexcept = delete;
|
||||
~DeathHandler();
|
||||
|
||||
using Cleanup = std::function<void()>;
|
||||
// Precondition: `killable` must be non-null.
|
||||
[[nodiscard]] base::ScopeGuard<Cleanup> protectCallback(IProtectedCallback* killable) const;
|
||||
|
||||
private:
|
||||
DeathHandler(sp<hidl::base::V1_0::IBase> object, sp<DeathRecipient> deathRecipient);
|
||||
|
||||
sp<hidl::base::V1_0::IBase> kObject;
|
||||
sp<DeathRecipient> kDeathRecipient;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
class ResilientBuffer final : public nn::IBuffer {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
using Factory = std::function<nn::GeneralResult<nn::SharedBuffer>(bool blocking)>;
|
||||
|
||||
static nn::GeneralResult<std::shared_ptr<const ResilientBuffer>> create(Factory makeBuffer);
|
||||
|
||||
explicit ResilientBuffer(PrivateConstructorTag tag, Factory makeBuffer,
|
||||
nn::SharedBuffer buffer);
|
||||
|
||||
nn::SharedBuffer getBuffer() const;
|
||||
nn::SharedBuffer recover(const nn::IBuffer* failingBuffer, bool blocking) const;
|
||||
|
||||
nn::Request::MemoryDomainToken getToken() const override;
|
||||
|
||||
nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
|
||||
|
||||
nn::GeneralResult<void> copyFrom(const nn::Memory& src,
|
||||
const nn::Dimensions& dimensions) const override;
|
||||
|
||||
private:
|
||||
const Factory kMakeBuffer;
|
||||
mutable std::mutex mMutex;
|
||||
mutable nn::SharedBuffer mBuffer GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H
|
||||
107
neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
Normal file
107
neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
class ResilientDevice final : public nn::IDevice,
|
||||
public std::enable_shared_from_this<ResilientDevice> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
using Factory = std::function<nn::GeneralResult<nn::SharedDevice>(bool blocking)>;
|
||||
|
||||
static nn::GeneralResult<std::shared_ptr<const ResilientDevice>> create(Factory makeDevice);
|
||||
|
||||
explicit ResilientDevice(PrivateConstructorTag tag, Factory makeDevice, std::string name,
|
||||
std::string versionString, std::vector<nn::Extension> extensions,
|
||||
nn::Capabilities capabilities, nn::SharedDevice device);
|
||||
|
||||
nn::SharedDevice getDevice() const;
|
||||
nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const;
|
||||
|
||||
const std::string& getName() const override;
|
||||
const std::string& getVersionString() const override;
|
||||
nn::Version getFeatureLevel() const override;
|
||||
nn::DeviceType getType() const override;
|
||||
const std::vector<nn::Extension>& getSupportedExtensions() const override;
|
||||
const nn::Capabilities& getCapabilities() const override;
|
||||
std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
|
||||
|
||||
nn::GeneralResult<void> wait() const override;
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> getSupportedOperations(
|
||||
const nn::Model& model) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache,
|
||||
const nn::CacheToken& token) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const override;
|
||||
|
||||
private:
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
|
||||
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
|
||||
nn::Priority priority, nn::OptionalTimePoint deadline,
|
||||
const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
|
||||
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
|
||||
bool blocking, nn::OptionalTimePoint deadline,
|
||||
const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
|
||||
nn::GeneralResult<nn::SharedBuffer> allocateInternal(
|
||||
bool blocking, const nn::BufferDesc& desc,
|
||||
const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const;
|
||||
|
||||
const Factory kMakeDevice;
|
||||
const std::string kName;
|
||||
const std::string kVersionString;
|
||||
const std::vector<nn::Extension> kExtensions;
|
||||
const nn::Capabilities kCapabilities;
|
||||
mutable std::mutex mMutex;
|
||||
mutable nn::SharedDevice mDevice GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H
|
||||
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
class ResilientPreparedModel final : public nn::IPreparedModel {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
using Factory = std::function<nn::GeneralResult<nn::SharedPreparedModel>(bool blocking)>;
|
||||
|
||||
static nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> create(
|
||||
Factory makePreparedModel);
|
||||
|
||||
explicit ResilientPreparedModel(PrivateConstructorTag tag, Factory makePreparedModel,
|
||||
nn::SharedPreparedModel preparedModel);
|
||||
|
||||
nn::SharedPreparedModel getPreparedModel() const;
|
||||
nn::SharedPreparedModel recover(const nn::IPreparedModel* failingPreparedModel,
|
||||
bool blocking) const;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
|
||||
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
|
||||
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
std::any getUnderlyingResource() const override;
|
||||
|
||||
private:
|
||||
const Factory kMakePreparedModel;
|
||||
mutable std::mutex mMutex;
|
||||
mutable nn::SharedPreparedModel mPreparedModel GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
|
||||
|
||||
#include <android-base/thread_annotations.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
// This class is thread safe.
|
||||
template <typename Type>
|
||||
class TransferValue final {
|
||||
public:
|
||||
void put(Type object) const;
|
||||
[[nodiscard]] Type take() const;
|
||||
|
||||
private:
|
||||
mutable std::mutex mMutex;
|
||||
mutable std::condition_variable mCondition;
|
||||
mutable std::optional<Type> mObject GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
// template implementation
|
||||
|
||||
template <typename Type>
|
||||
void TransferValue<Type>::put(Type object) const {
|
||||
{
|
||||
std::lock_guard guard(mMutex);
|
||||
// Immediately return if value already exists.
|
||||
if (mObject.has_value()) return;
|
||||
mObject.emplace(std::move(object));
|
||||
}
|
||||
mCondition.notify_all();
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
Type TransferValue<Type>::take() const {
|
||||
std::unique_lock lock(mMutex);
|
||||
base::ScopedLockAssertion lockAssertion(mMutex);
|
||||
mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); });
|
||||
std::optional<Type> object;
|
||||
std::swap(object, mObject);
|
||||
return std::move(object).value();
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
|
||||
@@ -16,6 +16,8 @@
|
||||
|
||||
#include "CommonUtils.h"
|
||||
|
||||
#include "HandleError.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/SharedMemory.h>
|
||||
@@ -25,6 +27,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <any>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
@@ -111,8 +114,18 @@ bool hasNoPointerData(const nn::Request& request) {
|
||||
return hasNoPointerData(request.inputs) && hasNoPointerData(request.outputs);
|
||||
}
|
||||
|
||||
nn::Result<nn::Model> flushDataFromPointerToShared(const nn::Model& model) {
|
||||
auto modelInShared = model;
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
|
||||
const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut) {
|
||||
CHECK(model != nullptr);
|
||||
CHECK(maybeModelInSharedOut != nullptr);
|
||||
|
||||
if (hasNoPointerData(*model)) {
|
||||
return *model;
|
||||
}
|
||||
|
||||
// Make a copy of the model in order to make modifications. The modified model is returned to
|
||||
// the caller through `maybeModelInSharedOut` if the function succeeds.
|
||||
nn::Model modelInShared = *model;
|
||||
|
||||
nn::ConstantMemoryBuilder memoryBuilder(modelInShared.pools.size());
|
||||
copyPointersToSharedMemory(&modelInShared.main, &memoryBuilder);
|
||||
@@ -126,11 +139,22 @@ nn::Result<nn::Model> flushDataFromPointerToShared(const nn::Model& model) {
|
||||
modelInShared.pools.push_back(std::move(memory));
|
||||
}
|
||||
|
||||
return modelInShared;
|
||||
*maybeModelInSharedOut = modelInShared;
|
||||
return **maybeModelInSharedOut;
|
||||
}
|
||||
|
||||
nn::Result<nn::Request> flushDataFromPointerToShared(const nn::Request& request) {
|
||||
auto requestInShared = request;
|
||||
nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
|
||||
const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
|
||||
CHECK(request != nullptr);
|
||||
CHECK(maybeRequestInSharedOut != nullptr);
|
||||
|
||||
if (hasNoPointerData(*request)) {
|
||||
return *request;
|
||||
}
|
||||
|
||||
// Make a copy of the request in order to make modifications. The modified request is returned
|
||||
// to the caller through `maybeRequestInSharedOut` if the function succeeds.
|
||||
nn::Request requestInShared = *request;
|
||||
|
||||
// Change input pointers to shared memory.
|
||||
nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
|
||||
@@ -171,15 +195,17 @@ nn::Result<nn::Request> flushDataFromPointerToShared(const nn::Request& request)
|
||||
requestInShared.pools.push_back(std::move(memory));
|
||||
}
|
||||
|
||||
return requestInShared;
|
||||
*maybeRequestInSharedOut = requestInShared;
|
||||
return **maybeRequestInSharedOut;
|
||||
}
|
||||
|
||||
nn::Result<void> unflushDataFromSharedToPointer(const nn::Request& request,
|
||||
const nn::Request& requestInShared) {
|
||||
if (requestInShared.pools.empty() ||
|
||||
!std::holds_alternative<nn::Memory>(requestInShared.pools.back())) {
|
||||
nn::GeneralResult<void> unflushDataFromSharedToPointer(
|
||||
const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
|
||||
if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
|
||||
!std::holds_alternative<nn::Memory>(maybeRequestInShared->pools.back())) {
|
||||
return {};
|
||||
}
|
||||
const auto& requestInShared = *maybeRequestInShared;
|
||||
|
||||
// Map the memory.
|
||||
const auto& outputMemory = std::get<nn::Memory>(requestInShared.pools.back());
|
||||
|
||||
95
neuralnetworks/utils/common/src/ProtectCallback.cpp
Normal file
95
neuralnetworks/utils/common/src/ProtectCallback.cpp
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ProtectCallback.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/scopeguard.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <android/hidl/base/1.0/IBase.h>
|
||||
#include <hidl/HidlSupport.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
void DeathRecipient::serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) {
|
||||
std::lock_guard guard(mMutex);
|
||||
std::for_each(mObjects.begin(), mObjects.end(),
|
||||
[](IProtectedCallback* killable) { killable->notifyAsDeadObject(); });
|
||||
}
|
||||
|
||||
void DeathRecipient::add(IProtectedCallback* killable) const {
|
||||
CHECK(killable != nullptr);
|
||||
std::lock_guard guard(mMutex);
|
||||
mObjects.push_back(killable);
|
||||
}
|
||||
|
||||
void DeathRecipient::remove(IProtectedCallback* killable) const {
|
||||
CHECK(killable != nullptr);
|
||||
std::lock_guard guard(mMutex);
|
||||
const auto removedIter = std::remove(mObjects.begin(), mObjects.end(), killable);
|
||||
mObjects.erase(removedIter);
|
||||
}
|
||||
|
||||
nn::GeneralResult<DeathHandler> DeathHandler::create(sp<hidl::base::V1_0::IBase> object) {
|
||||
if (object == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "utils::DeathHandler::create must have non-null object";
|
||||
}
|
||||
auto deathRecipient = sp<DeathRecipient>::make();
|
||||
|
||||
const auto ret = object->linkToDeath(deathRecipient, /*cookie=*/0);
|
||||
const bool success = NN_TRY(handleTransportError(ret));
|
||||
if (!success) {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IBase::linkToDeath returned false";
|
||||
}
|
||||
|
||||
return DeathHandler(std::move(object), std::move(deathRecipient));
|
||||
}
|
||||
|
||||
DeathHandler::DeathHandler(sp<hidl::base::V1_0::IBase> object, sp<DeathRecipient> deathRecipient)
|
||||
: kObject(std::move(object)), kDeathRecipient(std::move(deathRecipient)) {
|
||||
CHECK(kObject != nullptr);
|
||||
CHECK(kDeathRecipient != nullptr);
|
||||
}
|
||||
|
||||
DeathHandler::~DeathHandler() {
|
||||
if (kObject != nullptr && kDeathRecipient != nullptr) {
|
||||
const auto ret = kObject->unlinkToDeath(kDeathRecipient);
|
||||
const auto maybeSuccess = handleTransportError(ret);
|
||||
if (!maybeSuccess.has_value()) {
|
||||
LOG(ERROR) << maybeSuccess.error().message;
|
||||
} else if (!maybeSuccess.value()) {
|
||||
LOG(ERROR) << "IBase::linkToDeath returned false";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] base::ScopeGuard<DeathHandler::Cleanup> DeathHandler::protectCallback(
|
||||
IProtectedCallback* killable) const {
|
||||
CHECK(killable != nullptr);
|
||||
kDeathRecipient->add(killable);
|
||||
return base::make_scope_guard(
|
||||
[deathRecipient = kDeathRecipient, killable] { deathRecipient->remove(killable); });
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
75
neuralnetworks/utils/common/src/ResilientBuffer.cpp
Normal file
75
neuralnetworks/utils/common/src/ResilientBuffer.cpp
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ResilientBuffer.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ResilientBuffer>> ResilientBuffer::create(
|
||||
Factory makeBuffer) {
|
||||
if (makeBuffer == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "utils::ResilientBuffer::create must have non-empty makeBuffer";
|
||||
}
|
||||
auto buffer = NN_TRY(makeBuffer(/*blocking=*/true));
|
||||
CHECK(buffer != nullptr);
|
||||
return std::make_shared<const ResilientBuffer>(PrivateConstructorTag{}, std::move(makeBuffer),
|
||||
std::move(buffer));
|
||||
}
|
||||
|
||||
ResilientBuffer::ResilientBuffer(PrivateConstructorTag /*tag*/, Factory makeBuffer,
|
||||
nn::SharedBuffer buffer)
|
||||
: kMakeBuffer(std::move(makeBuffer)), mBuffer(std::move(buffer)) {
|
||||
CHECK(kMakeBuffer != nullptr);
|
||||
CHECK(mBuffer != nullptr);
|
||||
}
|
||||
|
||||
nn::SharedBuffer ResilientBuffer::getBuffer() const {
|
||||
std::lock_guard guard(mMutex);
|
||||
return mBuffer;
|
||||
}
|
||||
nn::SharedBuffer ResilientBuffer::recover(const nn::IBuffer* /*failingBuffer*/,
|
||||
bool /*blocking*/) const {
|
||||
std::lock_guard guard(mMutex);
|
||||
return mBuffer;
|
||||
}
|
||||
|
||||
nn::Request::MemoryDomainToken ResilientBuffer::getToken() const {
|
||||
return getBuffer()->getToken();
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> ResilientBuffer::copyTo(const nn::Memory& dst) const {
|
||||
return getBuffer()->copyTo(dst);
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> ResilientBuffer::copyFrom(const nn::Memory& src,
|
||||
const nn::Dimensions& dimensions) const {
|
||||
return getBuffer()->copyFrom(src, dimensions);
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
236
neuralnetworks/utils/common/src/ResilientDevice.cpp
Normal file
236
neuralnetworks/utils/common/src/ResilientDevice.cpp
Normal file
@@ -0,0 +1,236 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ResilientDevice.h"
|
||||
|
||||
#include "ResilientBuffer.h"
|
||||
#include "ResilientPreparedModel.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <nnapi/IBuffer.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
template <typename FnType>
|
||||
auto protect(const ResilientDevice& resilientDevice, const FnType& fn, bool blocking)
|
||||
-> decltype(fn(*resilientDevice.getDevice())) {
|
||||
auto device = resilientDevice.getDevice();
|
||||
auto result = fn(*device);
|
||||
|
||||
// Immediately return if device is not dead.
|
||||
if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
|
||||
return result;
|
||||
}
|
||||
|
||||
device = resilientDevice.recover(device.get(), blocking);
|
||||
return fn(*device);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ResilientDevice>> ResilientDevice::create(
|
||||
Factory makeDevice) {
|
||||
if (makeDevice == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "utils::ResilientDevice::create must have non-empty makeDevice";
|
||||
}
|
||||
auto device = NN_TRY(makeDevice(/*blocking=*/true));
|
||||
CHECK(device != nullptr);
|
||||
|
||||
auto name = device->getName();
|
||||
auto versionString = device->getVersionString();
|
||||
auto extensions = device->getSupportedExtensions();
|
||||
auto capabilities = device->getCapabilities();
|
||||
|
||||
return std::make_shared<ResilientDevice>(PrivateConstructorTag{}, std::move(makeDevice),
|
||||
std::move(name), std::move(versionString),
|
||||
std::move(extensions), std::move(capabilities),
|
||||
std::move(device));
|
||||
}
|
||||
|
||||
ResilientDevice::ResilientDevice(PrivateConstructorTag /*tag*/, Factory makeDevice,
|
||||
std::string name, std::string versionString,
|
||||
std::vector<nn::Extension> extensions,
|
||||
nn::Capabilities capabilities, nn::SharedDevice device)
|
||||
: kMakeDevice(std::move(makeDevice)),
|
||||
kName(std::move(name)),
|
||||
kVersionString(std::move(versionString)),
|
||||
kExtensions(std::move(extensions)),
|
||||
kCapabilities(std::move(capabilities)),
|
||||
mDevice(std::move(device)) {
|
||||
CHECK(kMakeDevice != nullptr);
|
||||
CHECK(mDevice != nullptr);
|
||||
}
|
||||
|
||||
nn::SharedDevice ResilientDevice::getDevice() const {
|
||||
std::lock_guard guard(mMutex);
|
||||
return mDevice;
|
||||
}
|
||||
|
||||
nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool blocking) const {
|
||||
std::lock_guard guard(mMutex);
|
||||
|
||||
// Another caller updated the failing device.
|
||||
if (mDevice.get() != failingDevice) {
|
||||
return mDevice;
|
||||
}
|
||||
|
||||
auto maybeDevice = kMakeDevice(blocking);
|
||||
if (!maybeDevice.has_value()) {
|
||||
const auto& [message, code] = maybeDevice.error();
|
||||
LOG(ERROR) << "Failed to recover dead device with error " << code << ": " << message;
|
||||
return mDevice;
|
||||
}
|
||||
auto device = std::move(maybeDevice).value();
|
||||
|
||||
// TODO(b/173081926): Instead of CHECKing to ensure the cache has not been changed, return an
|
||||
// invalid/"null" IDevice object that always fails.
|
||||
CHECK_EQ(kName, device->getName());
|
||||
CHECK_EQ(kVersionString, device->getVersionString());
|
||||
CHECK(kExtensions == device->getSupportedExtensions());
|
||||
CHECK_EQ(kCapabilities, device->getCapabilities());
|
||||
|
||||
mDevice = std::move(device);
|
||||
return mDevice;
|
||||
}
|
||||
|
||||
const std::string& ResilientDevice::getName() const {
|
||||
return kName;
|
||||
}
|
||||
|
||||
const std::string& ResilientDevice::getVersionString() const {
|
||||
return kVersionString;
|
||||
}
|
||||
|
||||
nn::Version ResilientDevice::getFeatureLevel() const {
|
||||
return getDevice()->getFeatureLevel();
|
||||
}
|
||||
|
||||
nn::DeviceType ResilientDevice::getType() const {
|
||||
return getDevice()->getType();
|
||||
}
|
||||
|
||||
const std::vector<nn::Extension>& ResilientDevice::getSupportedExtensions() const {
|
||||
return kExtensions;
|
||||
}
|
||||
|
||||
const nn::Capabilities& ResilientDevice::getCapabilities() const {
|
||||
return kCapabilities;
|
||||
}
|
||||
|
||||
std::pair<uint32_t, uint32_t> ResilientDevice::getNumberOfCacheFilesNeeded() const {
|
||||
return getDevice()->getNumberOfCacheFilesNeeded();
|
||||
}
|
||||
|
||||
nn::GeneralResult<void> ResilientDevice::wait() const {
|
||||
const auto fn = [](const nn::IDevice& device) { return device.wait(); };
|
||||
return protect(*this, fn, /*blocking=*/true);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::vector<bool>> ResilientDevice::getSupportedOperations(
|
||||
const nn::Model& model) const {
|
||||
const auto fn = [&model](const nn::IDevice& device) {
|
||||
return device.getSupportedOperations(model);
|
||||
};
|
||||
return protect(*this, fn, /*blocking=*/false);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
|
||||
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
auto self = shared_from_this();
|
||||
ResilientPreparedModel::Factory makePreparedModel =
|
||||
[device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
|
||||
token](bool blocking) -> nn::GeneralResult<nn::SharedPreparedModel> {
|
||||
return device->prepareModelInternal(blocking, model, preference, priority, deadline,
|
||||
modelCache, dataCache, token);
|
||||
};
|
||||
return ResilientPreparedModel::create(std::move(makePreparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
|
||||
nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
auto self = shared_from_this();
|
||||
ResilientPreparedModel::Factory makePreparedModel =
|
||||
[device = std::move(self), deadline, modelCache, dataCache,
|
||||
token](bool blocking) -> nn::GeneralResult<nn::SharedPreparedModel> {
|
||||
return device->prepareModelFromCacheInternal(blocking, deadline, modelCache, dataCache,
|
||||
token);
|
||||
};
|
||||
return ResilientPreparedModel::create(std::move(makePreparedModel));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocate(
|
||||
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const {
|
||||
auto self = shared_from_this();
|
||||
ResilientBuffer::Factory makeBuffer =
|
||||
[device = std::move(self), desc, preparedModels, inputRoles,
|
||||
outputRoles](bool blocking) -> nn::GeneralResult<nn::SharedBuffer> {
|
||||
return device->allocateInternal(blocking, desc, preparedModels, inputRoles, outputRoles);
|
||||
};
|
||||
return ResilientBuffer::create(std::move(makeBuffer));
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
|
||||
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
|
||||
nn::Priority priority, nn::OptionalTimePoint deadline,
|
||||
const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
|
||||
token](const nn::IDevice& device) {
|
||||
return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
|
||||
token);
|
||||
};
|
||||
return protect(*this, fn, blocking);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
|
||||
bool blocking, nn::OptionalTimePoint deadline,
|
||||
const std::vector<nn::NativeHandle>& modelCache,
|
||||
const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
|
||||
const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
|
||||
return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
|
||||
};
|
||||
return protect(*this, fn, blocking);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocateInternal(
|
||||
bool blocking, const nn::BufferDesc& desc,
|
||||
const std::vector<nn::SharedPreparedModel>& preparedModels,
|
||||
const std::vector<nn::BufferRole>& inputRoles,
|
||||
const std::vector<nn::BufferRole>& outputRoles) const {
|
||||
const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) {
|
||||
return device.allocate(desc, preparedModels, inputRoles, outputRoles);
|
||||
};
|
||||
return protect(*this, fn, blocking);
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
85
neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
Normal file
85
neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ResilientPreparedModel.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> ResilientPreparedModel::create(
|
||||
Factory makePreparedModel) {
|
||||
if (makePreparedModel == nullptr) {
|
||||
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
|
||||
<< "utils::ResilientPreparedModel::create must have non-empty makePreparedModel";
|
||||
}
|
||||
auto preparedModel = NN_TRY(makePreparedModel(/*blocking=*/true));
|
||||
CHECK(preparedModel != nullptr);
|
||||
return std::make_shared<ResilientPreparedModel>(
|
||||
PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel));
|
||||
}
|
||||
|
||||
ResilientPreparedModel::ResilientPreparedModel(PrivateConstructorTag /*tag*/,
|
||||
Factory makePreparedModel,
|
||||
nn::SharedPreparedModel preparedModel)
|
||||
: kMakePreparedModel(std::move(makePreparedModel)), mPreparedModel(std::move(preparedModel)) {
|
||||
CHECK(kMakePreparedModel != nullptr);
|
||||
CHECK(mPreparedModel != nullptr);
|
||||
}
|
||||
|
||||
nn::SharedPreparedModel ResilientPreparedModel::getPreparedModel() const {
|
||||
std::lock_guard guard(mMutex);
|
||||
return mPreparedModel;
|
||||
}
|
||||
|
||||
nn::SharedPreparedModel ResilientPreparedModel::recover(
|
||||
const nn::IPreparedModel* /*failingPreparedModel*/, bool /*blocking*/) const {
|
||||
std::lock_guard guard(mMutex);
|
||||
return mPreparedModel;
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
|
||||
return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
ResilientPreparedModel::executeFenced(
|
||||
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
|
||||
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
|
||||
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
|
||||
return getPreparedModel()->executeFenced(request, waitFor, measure, deadline,
|
||||
loopTimeoutDuration, timeoutDurationAfterFence);
|
||||
}
|
||||
|
||||
std::any ResilientPreparedModel::getUnderlyingResource() const {
|
||||
return getPreparedModel()->getUnderlyingResource();
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
36
neuralnetworks/utils/service/Android.bp
Normal file
36
neuralnetworks/utils/service/Android.bp
Normal file
@@ -0,0 +1,36 @@
|
||||
//
|
||||
// Copyright (C) 2020 The Android Open Source Project
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
cc_library_static {
|
||||
name: "neuralnetworks_utils_hal_service",
|
||||
defaults: ["neuralnetworks_utils_defaults"],
|
||||
srcs: ["src/*"],
|
||||
local_include_dirs: ["include/nnapi/hal"],
|
||||
export_include_dirs: ["include"],
|
||||
static_libs: [
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_1_0",
|
||||
"neuralnetworks_utils_hal_1_1",
|
||||
"neuralnetworks_utils_hal_1_2",
|
||||
"neuralnetworks_utils_hal_1_3",
|
||||
],
|
||||
shared_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
"android.hardware.neuralnetworks@1.3",
|
||||
],
|
||||
}
|
||||
31
neuralnetworks/utils/service/include/nnapi/hal/Service.h
Normal file
31
neuralnetworks/utils/service/include/nnapi/hal/Service.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H
|
||||
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace android::nn::hal {
|
||||
|
||||
std::vector<nn::SharedDevice> getDevices();
|
||||
|
||||
} // namespace android::nn::hal
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H
|
||||
94
neuralnetworks/utils/service/src/Service.cpp
Normal file
94
neuralnetworks/utils/service/src/Service.cpp
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Service.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
|
||||
#include <android/hidl/manager/1.2/IServiceManager.h>
|
||||
#include <hidl/ServiceManagement.h>
|
||||
#include <nnapi/IDevice.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/1.0/Service.h>
|
||||
#include <nnapi/hal/1.1/Service.h>
|
||||
#include <nnapi/hal/1.2/Service.h>
|
||||
#include <nnapi/hal/1.3/Service.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
namespace android::hardware::neuralnetworks::service {
|
||||
namespace {
|
||||
|
||||
using getDeviceFn = std::add_pointer_t<nn::GeneralResult<nn::SharedDevice>(const std::string&)>;
|
||||
|
||||
void getDevicesForVersion(const std::string& descriptor, getDeviceFn getDevice,
|
||||
std::vector<nn::SharedDevice>* devices,
|
||||
std::unordered_set<std::string>* registeredDevices) {
|
||||
CHECK(devices != nullptr);
|
||||
CHECK(registeredDevices != nullptr);
|
||||
|
||||
const auto names = getAllHalInstanceNames(descriptor);
|
||||
for (const auto& name : names) {
|
||||
if (const auto [it, unregistered] = registeredDevices->insert(name); unregistered) {
|
||||
auto maybeDevice = getDevice(name);
|
||||
if (maybeDevice.has_value()) {
|
||||
auto device = std::move(maybeDevice).value();
|
||||
CHECK(device != nullptr);
|
||||
devices->push_back(std::move(device));
|
||||
} else {
|
||||
LOG(ERROR) << "getDevice(" << name << ") failed with " << maybeDevice.error().code
|
||||
<< ": " << maybeDevice.error().message;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<nn::SharedDevice> getDevices() {
|
||||
std::vector<nn::SharedDevice> devices;
|
||||
std::unordered_set<std::string> registeredDevices;
|
||||
|
||||
getDevicesForVersion(V1_3::IDevice::descriptor, &V1_3::utils::getDevice, &devices,
|
||||
®isteredDevices);
|
||||
getDevicesForVersion(V1_2::IDevice::descriptor, &V1_2::utils::getDevice, &devices,
|
||||
®isteredDevices);
|
||||
getDevicesForVersion(V1_1::IDevice::descriptor, &V1_1::utils::getDevice, &devices,
|
||||
®isteredDevices);
|
||||
getDevicesForVersion(V1_0::IDevice::descriptor, &V1_0::utils::getDevice, &devices,
|
||||
®isteredDevices);
|
||||
|
||||
return devices;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace android::hardware::neuralnetworks::service
|
||||
|
||||
namespace android::nn::hal {
|
||||
|
||||
std::vector<nn::SharedDevice> getDevices() {
|
||||
return hardware::neuralnetworks::service::getDevices();
|
||||
}
|
||||
|
||||
} // namespace android::nn::hal
|
||||
Reference in New Issue
Block a user