Files
hardware_interfaces/neuralnetworks/1.1/utils/src/Conversions.cpp
Michael Butler 7fd03c265e Cleanup NN callback error handling
This CL introduces a new templated class CallbackValue to handle HIDL
"return value" callbacks in a terser and more readable way.

This CL also introduces a new macro HANDLE_HAL_STATUS to return from the
current function when an error is present with the ability to append a
more descriptive error message.

Finally, this CL changes the behavior of synchronous executions. Prior
to this CL, IPreparedModel fell back to an asynchronous execution if the
synchronous execution was allowed and failed. This change instead
returns a failure if synchronous execution is allowed and fails.

Bug: 173084343
Test: mma
Change-Id: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707
Merged-In: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707
(cherry picked from commit 98ed9baf5d)
2020-12-21 21:09:37 -08:00

291 lines
10 KiB
C++

/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Conversions.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
#include <nnapi/OperandTypes.h>
#include <nnapi/OperationTypes.h>
#include <nnapi/Result.h>
#include <nnapi/SharedMemory.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <nnapi/Validation.h>
#include <nnapi/hal/1.0/Conversions.h>
#include <nnapi/hal/CommonUtils.h>
#include <algorithm>
#include <functional>
#include <iterator>
#include <type_traits>
#include <utility>
namespace {
constexpr auto kVersion = android::nn::Version::ANDROID_P;
} // namespace
namespace android::nn {
namespace {
using hardware::hidl_vec;
template <typename Input>
using unvalidatedConvertOutput =
std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
template <typename Type>
GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
const hidl_vec<Type>& arguments) {
std::vector<unvalidatedConvertOutput<Type>> canonical;
canonical.reserve(arguments.size());
for (const auto& argument : arguments) {
canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
}
return canonical;
}
template <typename Type>
decltype(nn::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& halObject) {
auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
const auto maybeVersion = validate(canonical);
if (!maybeVersion.has_value()) {
return error() << maybeVersion.error();
}
const auto version = maybeVersion.value();
if (version > kVersion) {
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
}
return canonical;
}
} // anonymous namespace
GeneralResult<OperationType> unvalidatedConvert(const hal::V1_1::OperationType& operationType) {
return static_cast<OperationType>(operationType);
}
GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_1::Capabilities& capabilities) {
const auto quantized8Performance =
NN_TRY(unvalidatedConvert(capabilities.quantized8Performance));
const auto float32Performance = NN_TRY(unvalidatedConvert(capabilities.float32Performance));
const auto relaxedFloat32toFloat16Performance =
NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16Performance));
auto table = hal::utils::makeQuantized8PerformanceConsistentWithP(float32Performance,
quantized8Performance);
return Capabilities{
.relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16Performance,
.relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16Performance,
.operandPerformance = std::move(table),
};
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_1::Operation& operation) {
return Operation{
.type = NN_TRY(unvalidatedConvert(operation.type)),
.inputs = operation.inputs,
.outputs = operation.outputs,
};
}
GeneralResult<Model> unvalidatedConvert(const hal::V1_1::Model& model) {
auto operations = NN_TRY(unvalidatedConvert(model.operations));
// Verify number of consumers.
const auto numberOfConsumers =
hal::utils::countNumberOfConsumers(model.operands.size(), operations);
CHECK(model.operands.size() == numberOfConsumers.size());
for (size_t i = 0; i < model.operands.size(); ++i) {
if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "Invalid numberOfConsumers for operand " << i << ", expected "
<< numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
}
}
auto main = Model::Subgraph{
.operands = NN_TRY(unvalidatedConvert(model.operands)),
.operations = std::move(operations),
.inputIndexes = model.inputIndexes,
.outputIndexes = model.outputIndexes,
};
return Model{
.main = std::move(main),
.operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
.pools = NN_TRY(unvalidatedConvert(model.pools)),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
};
}
GeneralResult<ExecutionPreference> unvalidatedConvert(
const hal::V1_1::ExecutionPreference& executionPreference) {
return static_cast<ExecutionPreference>(executionPreference);
}
GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
return validatedConvert(capabilities);
}
GeneralResult<Model> convert(const hal::V1_1::Model& model) {
return validatedConvert(model);
}
GeneralResult<ExecutionPreference> convert(
const hal::V1_1::ExecutionPreference& executionPreference) {
return validatedConvert(executionPreference);
}
} // namespace android::nn
namespace android::hardware::neuralnetworks::V1_1::utils {
namespace {
using utils::unvalidatedConvert;
nn::GeneralResult<V1_0::PerformanceInfo> unvalidatedConvert(
const nn::Capabilities::PerformanceInfo& performanceInfo) {
return V1_0::utils::unvalidatedConvert(performanceInfo);
}
nn::GeneralResult<V1_0::Operand> unvalidatedConvert(const nn::Operand& operand) {
return V1_0::utils::unvalidatedConvert(operand);
}
nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
const nn::Model::OperandValues& operandValues) {
return V1_0::utils::unvalidatedConvert(operandValues);
}
nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Memory& memory) {
return V1_0::utils::unvalidatedConvert(memory);
}
template <typename Input>
using unvalidatedConvertOutput =
std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
template <typename Type>
nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
const std::vector<Type>& arguments) {
hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
for (size_t i = 0; i < arguments.size(); ++i) {
halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
}
return halObject;
}
template <typename Type>
decltype(utils::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& canonical) {
const auto maybeVersion = nn::validate(canonical);
if (!maybeVersion.has_value()) {
return nn::error() << maybeVersion.error();
}
const auto version = maybeVersion.value();
if (version > kVersion) {
return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
}
return utils::unvalidatedConvert(canonical);
}
} // anonymous namespace
nn::GeneralResult<OperationType> unvalidatedConvert(const nn::OperationType& operationType) {
return static_cast<OperationType>(operationType);
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
return Capabilities{
.float32Performance = NN_TRY(unvalidatedConvert(
capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
.quantized8Performance = NN_TRY(unvalidatedConvert(
capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))),
.relaxedFloat32toFloat16Performance = NN_TRY(
unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
return Operation{
.type = NN_TRY(unvalidatedConvert(operation.type)),
.inputs = operation.inputs,
.outputs = operation.outputs,
};
}
nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
if (!hal::utils::hasNoPointerData(model)) {
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
<< "Mdoel cannot be unvalidatedConverted because it contains pointer-based memory";
}
auto operands = NN_TRY(unvalidatedConvert(model.main.operands));
// Update number of consumers.
const auto numberOfConsumers =
hal::utils::countNumberOfConsumers(operands.size(), model.main.operations);
CHECK(operands.size() == numberOfConsumers.size());
for (size_t i = 0; i < operands.size(); ++i) {
operands[i].numberOfConsumers = numberOfConsumers[i];
}
return Model{
.operands = std::move(operands),
.operations = NN_TRY(unvalidatedConvert(model.main.operations)),
.inputIndexes = model.main.inputIndexes,
.outputIndexes = model.main.outputIndexes,
.operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
.pools = NN_TRY(unvalidatedConvert(model.pools)),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
};
}
nn::GeneralResult<ExecutionPreference> unvalidatedConvert(
const nn::ExecutionPreference& executionPreference) {
return static_cast<ExecutionPreference>(executionPreference);
}
nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
return validatedConvert(capabilities);
}
nn::GeneralResult<Model> convert(const nn::Model& model) {
return validatedConvert(model);
}
nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
return validatedConvert(executionPreference);
}
nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
return V1_0::utils::convert(deviceStatus);
}
nn::GeneralResult<V1_0::Request> convert(const nn::Request& request) {
return V1_0::utils::convert(request);
}
nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status) {
return V1_0::utils::convert(status);
}
} // namespace android::hardware::neuralnetworks::V1_1::utils