Implement partial canonical Burst in NN util code

This CL adds a simple implementation of IBurst that dispatches calls to
an IPreparedModel object and changes
IPreparedModel::configureExecutionBurst to return this new object
(instead of returning an error).

This CL additionally defines an InvalidBurst class that returns errors
whenever it is used and a ResilientBurst class to recover an IBurst
object when it has died.

Bug: 177267324
Test: mma
Change-Id: I4c7e7ff4e6559aeb5e62c4fa02f2e751fef9d87d
This commit is contained in:
Michael Butler
2020-12-18 20:53:55 -08:00
parent b6a7ed5d5f
commit 44f324fb0d
14 changed files with 400 additions and 10 deletions

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_BURST_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_BURST_H
#include <nnapi/IBurst.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <optional>
#include <utility>
// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
// lifetimes across processes and for protecting asynchronous calls across HIDL.
namespace android::hardware::neuralnetworks::V1_0::utils {
// Class that adapts nn::IPreparedModel to nn::IBurst.
class Burst final : public nn::IBurst {
struct PrivateConstructorTag {};
public:
static nn::GeneralResult<std::shared_ptr<const Burst>> create(
nn::SharedPreparedModel preparedModel);
Burst(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel);
OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure) const override;
private:
const nn::SharedPreparedModel kPreparedModel;
};
} // namespace android::hardware::neuralnetworks::V1_0::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_BURST_H

View File

@@ -35,7 +35,8 @@
namespace android::hardware::neuralnetworks::V1_0::utils {
// Class that adapts V1_0::IPreparedModel to nn::IPreparedModel.
class PreparedModel final : public nn::IPreparedModel {
class PreparedModel final : public nn::IPreparedModel,
public std::enable_shared_from_this<PreparedModel> {
struct PrivateConstructorTag {};
public:

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Burst.h"
#include <android-base/logging.h>
#include <nnapi/IBurst.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <optional>
#include <utility>
namespace android::hardware::neuralnetworks::V1_0::utils {
nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
nn::SharedPreparedModel preparedModel) {
if (preparedModel == nullptr) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "V1_0::utils::Burst::create must have non-null preparedModel";
}
return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(preparedModel));
}
Burst::Burst(PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel)
: kPreparedModel(std::move(preparedModel)) {
CHECK(kPreparedModel != nullptr);
}
Burst::OptionalCacheHold Burst::cacheMemory(const nn::Memory& /*memory*/) const {
return nullptr;
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
const nn::Request& request, nn::MeasureTiming measure) const {
return kPreparedModel->execute(request, measure, {}, {});
}
} // namespace android::hardware::neuralnetworks::V1_0::utils

View File

@@ -16,6 +16,7 @@
#include "PreparedModel.h"
#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
#include "Utils.h"
@@ -91,7 +92,7 @@ PreparedModel::executeFenced(const nn::Request& /*request*/,
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Not yet implemented";
return Burst::create(shared_from_this());
}
std::any PreparedModel::getUnderlyingResource() const {

View File

@@ -36,7 +36,8 @@
namespace android::hardware::neuralnetworks::V1_2::utils {
// Class that adapts V1_2::IPreparedModel to nn::IPreparedModel.
class PreparedModel final : public nn::IPreparedModel {
class PreparedModel final : public nn::IPreparedModel,
public std::enable_shared_from_this<PreparedModel> {
struct PrivateConstructorTag {};
public:

View File

@@ -27,6 +27,7 @@
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <nnapi/hal/1.0/Burst.h>
#include <nnapi/hal/1.0/Conversions.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
@@ -118,7 +119,7 @@ PreparedModel::executeFenced(const nn::Request& /*request*/,
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Not yet implemented";
return V1_0::utils::Burst::create(shared_from_this());
}
std::any PreparedModel::getUnderlyingResource() const {

View File

@@ -35,7 +35,8 @@
namespace android::hardware::neuralnetworks::V1_3::utils {
// Class that adapts V1_3::IPreparedModel to nn::IPreparedModel.
class PreparedModel final : public nn::IPreparedModel {
class PreparedModel final : public nn::IPreparedModel,
public std::enable_shared_from_this<PreparedModel> {
struct PrivateConstructorTag {};
public:

View File

@@ -29,6 +29,7 @@
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <nnapi/hal/1.0/Burst.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
@@ -198,7 +199,7 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Not yet implemented";
return V1_0::utils::Burst::create(shared_from_this());
}
std::any PreparedModel::getUnderlyingResource() const {

View File

@@ -0,0 +1,40 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BURST_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BURST_H
#include <nnapi/IBurst.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <optional>
#include <utility>
namespace android::hardware::neuralnetworks::utils {
class InvalidBurst final : public nn::IBurst {
public:
OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure) const override;
};
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BURST_H

View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BURST_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BURST_H
#include <android-base/thread_annotations.h>
#include <nnapi/IBurst.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <utility>
namespace android::hardware::neuralnetworks::utils {
class ResilientBurst final : public nn::IBurst,
public std::enable_shared_from_this<ResilientBurst> {
struct PrivateConstructorTag {};
public:
using Factory = std::function<nn::GeneralResult<nn::SharedBurst>()>;
static nn::GeneralResult<std::shared_ptr<const ResilientBurst>> create(Factory makeBurst);
ResilientBurst(PrivateConstructorTag tag, Factory makeBurst, nn::SharedBurst burst);
nn::SharedBurst getBurst() const;
nn::GeneralResult<nn::SharedBurst> recover(const nn::IBurst* failingBurst) const;
OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure) const override;
private:
const Factory kMakeBurst;
mutable std::mutex mMutex;
mutable nn::SharedBurst mBurst GUARDED_BY(mMutex);
};
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BURST_H

View File

@@ -30,7 +30,8 @@
namespace android::hardware::neuralnetworks::utils {
class ResilientPreparedModel final : public nn::IPreparedModel {
class ResilientPreparedModel final : public nn::IPreparedModel,
public std::enable_shared_from_this<ResilientPreparedModel> {
struct PrivateConstructorTag {};
public:
@@ -62,6 +63,9 @@ class ResilientPreparedModel final : public nn::IPreparedModel {
std::any getUnderlyingResource() const override;
private:
bool isValidInternal() const EXCLUDES(mMutex);
nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
const Factory kMakePreparedModel;
mutable std::mutex mMutex;
mutable nn::SharedPreparedModel mPreparedModel GUARDED_BY(mMutex);

View File

@@ -0,0 +1,38 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "InvalidBurst.h"
#include <nnapi/IBurst.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <optional>
#include <utility>
namespace android::hardware::neuralnetworks::utils {
InvalidBurst::OptionalCacheHold InvalidBurst::cacheMemory(const nn::Memory& /*memory*/) const {
return nullptr;
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute(
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/) const {
return NN_ERROR() << "InvalidBurst";
}
} // namespace android::hardware::neuralnetworks::utils

View File

@@ -0,0 +1,109 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ResilientBurst.h"
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
#include <nnapi/IBurst.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <utility>
namespace android::hardware::neuralnetworks::utils {
namespace {
template <typename FnType>
auto protect(const ResilientBurst& resilientBurst, const FnType& fn)
-> decltype(fn(*resilientBurst.getBurst())) {
auto burst = resilientBurst.getBurst();
auto result = fn(*burst);
// Immediately return if burst is not dead.
if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
return result;
}
// Attempt recovery and return if it fails.
auto maybeBurst = resilientBurst.recover(burst.get());
if (!maybeBurst.has_value()) {
auto [resultErrorMessage, resultErrorCode, resultOutputShapes] = std::move(result).error();
const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBurst.error();
return nn::error(resultErrorCode, std::move(resultOutputShapes))
<< resultErrorMessage << ", and failed to recover dead burst object with error "
<< recoveryErrorCode << ": " << recoveryErrorMessage;
}
burst = std::move(maybeBurst).value();
return fn(*burst);
}
} // namespace
nn::GeneralResult<std::shared_ptr<const ResilientBurst>> ResilientBurst::create(Factory makeBurst) {
if (makeBurst == nullptr) {
return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
<< "utils::ResilientBurst::create must have non-empty makeBurst";
}
auto burst = NN_TRY(makeBurst());
CHECK(burst != nullptr);
return std::make_shared<ResilientBurst>(PrivateConstructorTag{}, std::move(makeBurst),
std::move(burst));
}
ResilientBurst::ResilientBurst(PrivateConstructorTag /*tag*/, Factory makeBurst,
nn::SharedBurst burst)
: kMakeBurst(std::move(makeBurst)), mBurst(std::move(burst)) {
CHECK(kMakeBurst != nullptr);
CHECK(mBurst != nullptr);
}
nn::SharedBurst ResilientBurst::getBurst() const {
std::lock_guard guard(mMutex);
return mBurst;
}
nn::GeneralResult<nn::SharedBurst> ResilientBurst::recover(const nn::IBurst* failingBurst) const {
std::lock_guard guard(mMutex);
// Another caller updated the failing burst.
if (mBurst.get() != failingBurst) {
return mBurst;
}
mBurst = NN_TRY(kMakeBurst());
return mBurst;
}
ResilientBurst::OptionalCacheHold ResilientBurst::cacheMemory(const nn::Memory& memory) const {
return getBurst()->cacheMemory(memory);
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute(
const nn::Request& request, nn::MeasureTiming measure) const {
const auto fn = [&request, measure](const nn::IBurst& burst) {
return burst.execute(request, measure);
};
return protect(*this, fn);
}
} // namespace android::hardware::neuralnetworks::utils

View File

@@ -16,6 +16,9 @@
#include "ResilientPreparedModel.h"
#include "InvalidBurst.h"
#include "ResilientBurst.h"
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
#include <nnapi/IPreparedModel.h>
@@ -125,14 +128,34 @@ ResilientPreparedModel::executeFenced(const nn::Request& request,
}
nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
const auto fn = [](const nn::IPreparedModel& preparedModel) {
return preparedModel.configureExecutionBurst();
#if 0
auto self = shared_from_this();
ResilientBurst::Factory makeBurst =
[preparedModel = std::move(self)]() -> nn::GeneralResult<nn::SharedBurst> {
return preparedModel->configureExecutionBurst();
};
return protect(*this, fn);
return ResilientBurst::create(std::move(makeBurst));
#else
return configureExecutionBurstInternal();
#endif
}
std::any ResilientPreparedModel::getUnderlyingResource() const {
return getPreparedModel()->getUnderlyingResource();
}
bool ResilientPreparedModel::isValidInternal() const {
return true;
}
nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurstInternal() const {
if (!isValidInternal()) {
return std::make_shared<const InvalidBurst>();
}
const auto fn = [](const nn::IPreparedModel& preparedModel) {
return preparedModel.configureExecutionBurst();
};
return protect(*this, fn);
}
} // namespace android::hardware::neuralnetworks::utils