mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 16:50:18 +00:00
Merge changes from topic "aosp-nnapi-reusable-execution-canonical" am: 43ae2ecfe4
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1701932 Change-Id: If503fe78b3281a7a6feffdceacff45fb1b0c93bb
This commit is contained in:
@@ -48,6 +48,10 @@ class Burst final : public nn::IBurst {
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
private:
|
||||
const nn::SharedPreparedModel kPreparedModel;
|
||||
};
|
||||
|
||||
@@ -55,4 +55,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
||||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_0::utils
|
||||
|
||||
@@ -28,9 +28,11 @@
|
||||
#include <fmq/MessageQueue.h>
|
||||
#include <hidl/MQDescriptor.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
|
||||
#include <atomic>
|
||||
@@ -51,14 +53,14 @@ namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
* across FMQ, making it appear to the runtime as a regular synchronous inference. Additionally,
|
||||
* this class manages the burst's memory cache.
|
||||
*/
|
||||
class ExecutionBurstController final : public nn::IBurst {
|
||||
class ExecutionBurstController final
|
||||
: public nn::IBurst,
|
||||
public std::enable_shared_from_this<ExecutionBurstController> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
using FallbackFunction =
|
||||
std::function<nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>(
|
||||
const nn::Request&, nn::MeasureTiming, const nn::OptionalTimePoint&,
|
||||
const nn::OptionalDuration&)>;
|
||||
using FallbackFunction = std::function<
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>()>;
|
||||
|
||||
/**
|
||||
* NN runtime memory cache.
|
||||
@@ -154,10 +156,10 @@ class ExecutionBurstController final : public nn::IBurst {
|
||||
* @return ExecutionBurstController Execution burst controller object.
|
||||
*/
|
||||
static nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> create(
|
||||
const sp<IPreparedModel>& preparedModel, FallbackFunction fallback,
|
||||
nn::SharedPreparedModel preparedModel, const sp<IPreparedModel>& hidlPreparedModel,
|
||||
std::chrono::microseconds pollingTimeWindow);
|
||||
|
||||
ExecutionBurstController(PrivateConstructorTag tag, FallbackFunction fallback,
|
||||
ExecutionBurstController(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
@@ -173,9 +175,21 @@ class ExecutionBurstController final : public nn::IBurst {
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
// See IBurst::createReusableExecution for information on this method.
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
// If fallback is not nullptr, this method will invoke the fallback function to try another
|
||||
// execution path if the packet could not be sent. Otherwise, failing to send the packet will
|
||||
// result in an error.
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const std::vector<FmqRequestDatum>& requestPacket,
|
||||
const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const;
|
||||
|
||||
private:
|
||||
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
|
||||
const FallbackFunction kFallback;
|
||||
const nn::SharedPreparedModel kPreparedModel;
|
||||
const std::unique_ptr<RequestChannelSender> mRequestChannelSender;
|
||||
const std::unique_ptr<ResultChannelReceiver> mResultChannelReceiver;
|
||||
const sp<ExecutionBurstCallback> mBurstCallback;
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <nnapi/Types.h>
|
||||
#include <nnapi/Validation.h>
|
||||
#include <nnapi/hal/1.0/Conversions.h>
|
||||
#include <nnapi/hal/CommonUtils.h>
|
||||
#include <nnapi/hal/HandleError.h>
|
||||
#include <nnapi/hal/ProtectCallback.h>
|
||||
#include <nnapi/hal/TransferValue.h>
|
||||
@@ -50,6 +51,35 @@
|
||||
namespace android::hardware::neuralnetworks::V1_2::utils {
|
||||
namespace {
|
||||
|
||||
class BurstExecution final : public nn::IExecution,
|
||||
public std::enable_shared_from_this<BurstExecution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
|
||||
|
||||
BurstExecution(PrivateConstructorTag tag,
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const ExecutionBurstController> kController;
|
||||
const std::vector<FmqRequestDatum> kRequest;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
|
||||
};
|
||||
|
||||
nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
|
||||
V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
|
||||
HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status "
|
||||
@@ -209,10 +239,10 @@ Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
|
||||
// ExecutionBurstController methods
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
|
||||
const sp<V1_2::IPreparedModel>& preparedModel, FallbackFunction fallback,
|
||||
nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
|
||||
std::chrono::microseconds pollingTimeWindow) {
|
||||
// check inputs
|
||||
if (preparedModel == nullptr) {
|
||||
if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
|
||||
return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
|
||||
}
|
||||
|
||||
@@ -236,7 +266,7 @@ nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurs
|
||||
auto cb = hal::utils::CallbackValue(executionBurstResultCallback);
|
||||
|
||||
// configure burst
|
||||
const Return<void> ret = preparedModel->configureExecutionBurst(
|
||||
const Return<void> ret = hidlPreparedModel->configureExecutionBurst(
|
||||
burstCallback, *requestChannelDescriptor, *resultChannelDescriptor, cb);
|
||||
HANDLE_TRANSPORT_FAILURE(ret);
|
||||
|
||||
@@ -250,18 +280,18 @@ nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurs
|
||||
|
||||
// make and return controller
|
||||
return std::make_shared<const ExecutionBurstController>(
|
||||
PrivateConstructorTag{}, std::move(fallback), std::move(requestChannelSender),
|
||||
PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
|
||||
std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
|
||||
std::move(memoryCache), std::move(deathHandler));
|
||||
}
|
||||
|
||||
ExecutionBurstController::ExecutionBurstController(
|
||||
PrivateConstructorTag /*tag*/, FallbackFunction fallback,
|
||||
PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
|
||||
std::unique_ptr<RequestChannelSender> requestChannelSender,
|
||||
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
|
||||
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
|
||||
std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
|
||||
: kFallback(std::move(fallback)),
|
||||
: kPreparedModel(std::move(preparedModel)),
|
||||
mRequestChannelSender(std::move(requestChannelSender)),
|
||||
mResultChannelReceiver(std::move(resultChannelReceiver)),
|
||||
mBurstCallback(std::move(callback)),
|
||||
@@ -283,26 +313,96 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
||||
// systraces. Note that the first point we can begin collecting systraces in
|
||||
// ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
|
||||
// ExecutionBurstServer collects systraces at different points in the code.
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
|
||||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
|
||||
version > nn::Version::ANDROID_Q) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
if (kFallback) {
|
||||
return kFallback(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
return NN_ERROR() << "Request object has features not supported by IBurst::execute";
|
||||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
// ensure that request is ready for IPC
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation)));
|
||||
|
||||
// clear pools field of request, as they will be provided via slots
|
||||
const auto requestWithoutPools =
|
||||
nn::Request{.inputs = request.inputs, .outputs = request.outputs, .pools = {}};
|
||||
const auto requestWithoutPools = nn::Request{
|
||||
.inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
|
||||
auto hidlRequest = NN_TRY(
|
||||
hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
|
||||
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
slots.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
|
||||
slots.push_back(slot);
|
||||
holds.push_back(std::move(hold));
|
||||
}
|
||||
|
||||
// send request packet
|
||||
const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
|
||||
const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
|
||||
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
return executeInternal(requestPacket, relocation, fallback);
|
||||
}
|
||||
|
||||
// See IBurst::createReusableExecution for information on this method.
|
||||
nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
|
||||
|
||||
// if the request is valid but of a higher version than what's supported in burst execution,
|
||||
// fall back to another execution path
|
||||
if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
|
||||
version > nn::Version::ANDROID_Q) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
}
|
||||
|
||||
// ensure that request is ready for IPC
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
// clear pools field of request, as they will be provided via slots
|
||||
const auto requestWithoutPools = nn::Request{
|
||||
.inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
|
||||
auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
|
||||
const auto hidlMeasure = NN_TRY(convert(measure));
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
slots.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
|
||||
slots.push_back(slot);
|
||||
holds.push_back(std::move(hold));
|
||||
}
|
||||
|
||||
const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
|
||||
return BurstExecution::create(shared_from_this(), std::move(requestPacket),
|
||||
std::move(relocation), std::move(holds));
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
|
||||
ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
|
||||
const hal::utils::RequestRelocation& relocation,
|
||||
FallbackFunction fallback) const {
|
||||
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
|
||||
"ExecutionBurstController::executeInternal");
|
||||
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
if (alreadyInFlight) {
|
||||
@@ -310,22 +410,16 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
||||
}
|
||||
const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); });
|
||||
|
||||
std::vector<int32_t> slots;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
slots.reserve(request.pools.size());
|
||||
holds.reserve(request.pools.size());
|
||||
for (const auto& memoryPool : request.pools) {
|
||||
auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
|
||||
slots.push_back(slot);
|
||||
holds.push_back(std::move(hold));
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
// send request packet
|
||||
const auto sendStatus = mRequestChannelSender->send(hidlRequest, hidlMeasure, slots);
|
||||
const auto sendStatus = mRequestChannelSender->sendPacket(requestPacket);
|
||||
if (!sendStatus.ok()) {
|
||||
// fallback to another execution path if the packet could not be sent
|
||||
if (kFallback) {
|
||||
return kFallback(request, measure, deadline, loopTimeoutDuration);
|
||||
if (fallback) {
|
||||
return fallback();
|
||||
}
|
||||
return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
|
||||
}
|
||||
@@ -333,7 +427,47 @@ ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming
|
||||
// get result packet
|
||||
const auto [status, outputShapes, timing] =
|
||||
NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
|
||||
|
||||
if (relocation.output) {
|
||||
relocation.output->flush();
|
||||
}
|
||||
return executionCallback(status, outputShapes, timing);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
|
||||
if (controller == nullptr) {
|
||||
return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
|
||||
}
|
||||
|
||||
return std::make_shared<const BurstExecution>(PrivateConstructorTag{}, std::move(controller),
|
||||
std::move(request), std::move(relocation),
|
||||
std::move(cacheHolds));
|
||||
}
|
||||
|
||||
BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
|
||||
std::shared_ptr<const ExecutionBurstController> controller,
|
||||
std::vector<FmqRequestDatum> request,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
|
||||
: kController(std::move(controller)),
|
||||
kRequest(std::move(request)),
|
||||
kRelocation(std::move(relocation)),
|
||||
kCacheHolds(std::move(cacheHolds)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
|
||||
const nn::OptionalTimePoint& /*deadline*/) const {
|
||||
return kController->executeInternal(kRequest, kRelocation, /*fallback=*/nullptr);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IExecution::computeFenced is not supported on burst object";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::V1_2::utils
|
||||
|
||||
@@ -158,7 +158,7 @@ nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() cons
|
||||
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
|
||||
return ExecutionBurstController::create(kPreparedModel, std::move(fallback), pollingTimeWindow);
|
||||
return ExecutionBurstController::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
|
||||
}
|
||||
|
||||
std::any PreparedModel::getUnderlyingResource() const {
|
||||
|
||||
@@ -255,7 +255,7 @@ nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() cons
|
||||
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
|
||||
};
|
||||
const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
|
||||
return V1_2::utils::ExecutionBurstController::create(kPreparedModel, std::move(fallback),
|
||||
return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
|
||||
pollingTimeWindow);
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
|
||||
// Class that adapts aidl_hal::IBurst to nn::IBurst.
|
||||
class Burst final : public nn::IBurst {
|
||||
class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
@@ -100,6 +100,16 @@ class Burst final : public nn::IBurst {
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
// See IBurst::createReusableExecution for information.
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
|
||||
const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
|
||||
bool measure, int64_t deadline, int64_t loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const;
|
||||
|
||||
private:
|
||||
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
|
||||
const std::shared_ptr<aidl_hal::IBurst> kBurst;
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <android-base/logging.h>
|
||||
#include <android/binder_auto_utils.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/IExecution.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
@@ -35,6 +36,39 @@
|
||||
namespace aidl::android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
class BurstExecution final : public nn::IExecution,
|
||||
public std::enable_shared_from_this<BurstExecution> {
|
||||
struct PrivateConstructorTag {};
|
||||
|
||||
public:
|
||||
static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
|
||||
std::shared_ptr<const Burst> burst, Request request,
|
||||
std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds);
|
||||
|
||||
BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> burst, Request request,
|
||||
std::vector<int64_t> memoryIdentifierTokens, bool measure,
|
||||
int64_t loopTimeoutDuration, hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds);
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
|
||||
const nn::OptionalTimePoint& deadline) const override;
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
|
||||
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const Burst> kBurst;
|
||||
const Request kRequest;
|
||||
const std::vector<int64_t>& kMemoryIdentifierTokens;
|
||||
const bool kMeasure;
|
||||
const int64_t kLoopTimeoutDuration;
|
||||
const hal::utils::RequestRelocation kRelocation;
|
||||
const std::vector<Burst::OptionalCacheHold> kCacheHolds;
|
||||
};
|
||||
|
||||
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
|
||||
const std::vector<OutputShape>& outputShapes, const Timing& timing) {
|
||||
return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
|
||||
@@ -139,13 +173,6 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
if (alreadyInFlight) {
|
||||
return NN_ERROR() << "IBurst already has an execution in flight";
|
||||
}
|
||||
const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
|
||||
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
@@ -161,9 +188,9 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
||||
|
||||
std::vector<int64_t> memoryIdentifierTokens;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
memoryIdentifierTokens.reserve(request.pools.size());
|
||||
holds.reserve(request.pools.size());
|
||||
for (const auto& memoryPool : request.pools) {
|
||||
memoryIdentifierTokens.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
|
||||
if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
|
||||
auto& [identifier, hold] = *cached;
|
||||
@@ -174,16 +201,30 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
||||
}
|
||||
memoryIdentifierTokens.push_back(-1);
|
||||
}
|
||||
CHECK_EQ(request.pools.size(), memoryIdentifierTokens.size());
|
||||
CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
|
||||
|
||||
return executeInternal(aidlRequest, memoryIdentifierTokens, aidlMeasure, aidlDeadline,
|
||||
aidlLoopTimeoutDuration, relocation);
|
||||
}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
|
||||
const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, bool measure,
|
||||
int64_t deadline, int64_t loopTimeoutDuration,
|
||||
const hal::utils::RequestRelocation& relocation) const {
|
||||
// Ensure that at most one execution is in flight at any given time.
|
||||
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
|
||||
if (alreadyInFlight) {
|
||||
return NN_ERROR() << "IBurst already has an execution in flight";
|
||||
}
|
||||
const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
|
||||
|
||||
if (relocation.input) {
|
||||
relocation.input->flush();
|
||||
}
|
||||
|
||||
ExecutionResult executionResult;
|
||||
const auto ret =
|
||||
kBurst->executeSynchronously(aidlRequest, memoryIdentifierTokens, aidlMeasure,
|
||||
aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
|
||||
const auto ret = kBurst->executeSynchronously(request, memoryIdentifierTokens, measure,
|
||||
deadline, loopTimeoutDuration, &executionResult);
|
||||
HANDLE_ASTATUS(ret) << "execute failed";
|
||||
if (!executionResult.outputSufficientSize) {
|
||||
auto canonicalOutputShapes =
|
||||
@@ -200,4 +241,82 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::
|
||||
return std::make_pair(std::move(outputShapes), timing);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
// Ensure that request is ready for IPC.
|
||||
std::optional<nn::Request> maybeRequestInShared;
|
||||
hal::utils::RequestRelocation relocation;
|
||||
const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
|
||||
&request, &maybeRequestInShared, &relocation));
|
||||
|
||||
auto aidlRequest = NN_TRY(convert(requestInShared));
|
||||
const auto aidlMeasure = NN_TRY(convert(measure));
|
||||
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
|
||||
|
||||
std::vector<int64_t> memoryIdentifierTokens;
|
||||
std::vector<OptionalCacheHold> holds;
|
||||
memoryIdentifierTokens.reserve(requestInShared.pools.size());
|
||||
holds.reserve(requestInShared.pools.size());
|
||||
for (const auto& memoryPool : requestInShared.pools) {
|
||||
if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
|
||||
if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
|
||||
auto& [identifier, hold] = *cached;
|
||||
memoryIdentifierTokens.push_back(identifier);
|
||||
holds.push_back(std::move(hold));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
memoryIdentifierTokens.push_back(-1);
|
||||
}
|
||||
CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
|
||||
|
||||
return BurstExecution::create(shared_from_this(), std::move(aidlRequest),
|
||||
std::move(memoryIdentifierTokens), aidlMeasure,
|
||||
aidlLoopTimeoutDuration, std::move(relocation), std::move(holds));
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
|
||||
std::shared_ptr<const Burst> burst, Request request,
|
||||
std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds) {
|
||||
if (burst == nullptr) {
|
||||
return NN_ERROR() << "aidl::utils::BurstExecution::create must have non-null burst";
|
||||
}
|
||||
|
||||
return std::make_shared<const BurstExecution>(
|
||||
PrivateConstructorTag{}, std::move(burst), std::move(request),
|
||||
std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, std::move(relocation),
|
||||
std::move(cacheHolds));
|
||||
}
|
||||
|
||||
BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<const Burst> burst,
|
||||
Request request, std::vector<int64_t> memoryIdentifierTokens,
|
||||
bool measure, int64_t loopTimeoutDuration,
|
||||
hal::utils::RequestRelocation relocation,
|
||||
std::vector<Burst::OptionalCacheHold> cacheHolds)
|
||||
: kBurst(std::move(burst)),
|
||||
kRequest(std::move(request)),
|
||||
kMemoryIdentifierTokens(std::move(memoryIdentifierTokens)),
|
||||
kMeasure(measure),
|
||||
kLoopTimeoutDuration(loopTimeoutDuration),
|
||||
kRelocation(std::move(relocation)),
|
||||
kCacheHolds(std::move(cacheHolds)) {}
|
||||
|
||||
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
|
||||
const nn::OptionalTimePoint& deadline) const {
|
||||
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
|
||||
return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
|
||||
kLoopTimeoutDuration, kRelocation);
|
||||
}
|
||||
|
||||
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
|
||||
BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
|
||||
const nn::OptionalTimePoint& /*deadline*/,
|
||||
const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
|
||||
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
|
||||
<< "IExecution::computeFenced is not supported on burst object";
|
||||
}
|
||||
|
||||
} // namespace aidl::android::hardware::neuralnetworks::utils
|
||||
|
||||
@@ -35,6 +35,10 @@ class InvalidBurst final : public nn::IBurst {
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
};
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
@@ -51,7 +51,16 @@ class ResilientBurst final : public nn::IBurst,
|
||||
const nn::OptionalTimePoint& deadline,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const override;
|
||||
|
||||
private:
|
||||
bool isValidInternal() const EXCLUDES(mMutex);
|
||||
nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const;
|
||||
|
||||
const Factory kMakeBurst;
|
||||
mutable std::mutex mMutex;
|
||||
mutable nn::SharedBurst mBurst GUARDED_BY(mMutex);
|
||||
|
||||
@@ -38,4 +38,10 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Invalid
|
||||
return NN_ERROR() << "InvalidBurst";
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution(
|
||||
const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
|
||||
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
|
||||
return NN_ERROR() << "InvalidBurst";
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <android-base/logging.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
#include <nnapi/IBurst.h>
|
||||
#include <nnapi/IPreparedModel.h>
|
||||
#include <nnapi/Result.h>
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Types.h>
|
||||
@@ -29,6 +30,9 @@
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
#include "InvalidExecution.h"
|
||||
#include "ResilientExecution.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::utils {
|
||||
namespace {
|
||||
|
||||
@@ -46,11 +50,11 @@ auto protect(const ResilientBurst& resilientBurst, const FnType& fn)
|
||||
// Attempt recovery and return if it fails.
|
||||
auto maybeBurst = resilientBurst.recover(burst.get());
|
||||
if (!maybeBurst.has_value()) {
|
||||
auto [resultErrorMessage, resultErrorCode, resultOutputShapes] = std::move(result).error();
|
||||
const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBurst.error();
|
||||
return nn::error(resultErrorCode, std::move(resultOutputShapes))
|
||||
<< resultErrorMessage << ", and failed to recover dead burst object with error "
|
||||
<< recoveryErrorCode << ": " << recoveryErrorMessage;
|
||||
const auto& [message, code] = maybeBurst.error();
|
||||
std::ostringstream oss;
|
||||
oss << ", and failed to recover dead burst object with error " << code << ": " << message;
|
||||
result.error().message += oss.str();
|
||||
return result;
|
||||
}
|
||||
burst = std::move(maybeBurst).value();
|
||||
|
||||
@@ -109,4 +113,35 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Resilie
|
||||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
#if 0
|
||||
auto self = shared_from_this();
|
||||
ResilientExecution::Factory makeExecution =
|
||||
[burst = std::move(self), request, measure, loopTimeoutDuration] {
|
||||
return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
|
||||
};
|
||||
return ResilientExecution::create(std::move(makeExecution));
|
||||
#else
|
||||
return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
|
||||
#endif
|
||||
}
|
||||
|
||||
nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal(
|
||||
const nn::Request& request, nn::MeasureTiming measure,
|
||||
const nn::OptionalDuration& loopTimeoutDuration) const {
|
||||
if (!isValidInternal()) {
|
||||
return std::make_shared<const InvalidExecution>();
|
||||
}
|
||||
const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) {
|
||||
return burst.createReusableExecution(request, measure, loopTimeoutDuration);
|
||||
};
|
||||
return protect(*this, fn);
|
||||
}
|
||||
|
||||
bool ResilientBurst::isValidInternal() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace android::hardware::neuralnetworks::utils
|
||||
|
||||
Reference in New Issue
Block a user