mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 16:50:18 +00:00
This CL copies information from packages/modules/NeuralNetworks/runtime/VersionedInterfaces.cpp and modifies the description to be more appropriate for the NN HAL utility code. Specific sections added to the README: * "HIDL Interface Lifetimes across Processes" * "Protecting Asynchronous Calls across HIDL" Bug: 170289677 Test: mma Change-Id: Id381895535d708b627f4746687b4d12e16560639
146 lines
5.5 KiB
C++
146 lines
5.5 KiB
C++
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include "Callbacks.h"
|
|
|
|
#include "Conversions.h"
|
|
#include "PreparedModel.h"
|
|
#include "Utils.h"
|
|
|
|
#include <android/hardware/neuralnetworks/1.0/types.h>
|
|
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
|
|
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
|
|
#include <android/hardware/neuralnetworks/1.2/types.h>
|
|
#include <nnapi/IPreparedModel.h>
|
|
#include <nnapi/Result.h>
|
|
#include <nnapi/Types.h>
|
|
#include <nnapi/hal/1.0/Conversions.h>
|
|
#include <nnapi/hal/1.0/PreparedModel.h>
|
|
#include <nnapi/hal/CommonUtils.h>
|
|
#include <nnapi/hal/HandleError.h>
|
|
#include <nnapi/hal/ProtectCallback.h>
|
|
#include <nnapi/hal/TransferValue.h>
|
|
|
|
#include <utility>
|
|
|
|
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
|
|
// lifetimes across processes and for protecting asynchronous calls across HIDL.
|
|
|
|
namespace android::hardware::neuralnetworks::V1_2::utils {
|
|
namespace {
|
|
|
|
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
|
const sp<V1_0::IPreparedModel>& preparedModel) {
|
|
return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
|
|
}
|
|
|
|
nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
|
|
const sp<IPreparedModel>& preparedModel) {
|
|
return NN_TRY(utils::PreparedModel::create(preparedModel));
|
|
}
|
|
|
|
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
|
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
|
|
const Timing& timing) {
|
|
return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
|
|
}
|
|
|
|
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
|
convertExecutionGeneralResults(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
|
|
return hal::utils::makeExecutionFailure(
|
|
convertExecutionGeneralResultsHelper(outputShapes, timing));
|
|
}
|
|
|
|
} // namespace
|
|
|
|
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
|
|
const sp<V1_0::IPreparedModel>& preparedModel) {
|
|
if (status != V1_0::ErrorStatus::NONE) {
|
|
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
|
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
|
} else if (preparedModel == nullptr) {
|
|
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
|
<< "Returned preparedModel is nullptr");
|
|
} else {
|
|
notifyInternal(convertPreparedModel(preparedModel));
|
|
}
|
|
return Void();
|
|
}
|
|
|
|
Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
|
|
const sp<IPreparedModel>& preparedModel) {
|
|
if (status != V1_0::ErrorStatus::NONE) {
|
|
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
|
notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
|
|
} else if (preparedModel == nullptr) {
|
|
notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
|
<< "Returned preparedModel is nullptr");
|
|
} else {
|
|
notifyInternal(convertPreparedModel(preparedModel));
|
|
}
|
|
return Void();
|
|
}
|
|
|
|
void PreparedModelCallback::notifyAsDeadObject() {
|
|
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
|
}
|
|
|
|
PreparedModelCallback::Data PreparedModelCallback::get() {
|
|
return mData.take();
|
|
}
|
|
|
|
void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
|
|
mData.put(std::move(result));
|
|
}
|
|
|
|
// ExecutionCallback methods begin here
|
|
|
|
Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
|
|
if (status != V1_0::ErrorStatus::NONE) {
|
|
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
|
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
|
} else {
|
|
notifyInternal({});
|
|
}
|
|
return Void();
|
|
}
|
|
|
|
Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
|
|
const hidl_vec<OutputShape>& outputShapes,
|
|
const Timing& timing) {
|
|
if (status != V1_0::ErrorStatus::NONE) {
|
|
const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
|
|
notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
|
|
} else {
|
|
notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
|
|
}
|
|
return Void();
|
|
}
|
|
|
|
void ExecutionCallback::notifyAsDeadObject() {
|
|
notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
|
|
}
|
|
|
|
ExecutionCallback::Data ExecutionCallback::get() {
|
|
return mData.take();
|
|
}
|
|
|
|
void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
|
|
mData.put(std::move(result));
|
|
}
|
|
|
|
} // namespace android::hardware::neuralnetworks::V1_2::utils
|