Files
hardware_interfaces/neuralnetworks/1.0/utils/src/Burst.cpp
Xusong Wang b2e8085ce2 Introduce reusable burst to canonical interface -- HAL.
This CL modifies the canonical interface for reusable burst executions:
- Add new method IBurst::createExecution

The reusable burst execution will not fallback to another execution path
if sending request packet fails. The behavior of single-time burst
execution remains unchanged.

Additionally, this CL enables pointer -> shared memory conversion in
1.2/1.3 burst implementation.

Bug: 184073769
Test: NNT_static
Test: neuralnetworks_utils_hal_1_0_test
Test: neuralnetworks_utils_hal_1_1_test
Test: neuralnetworks_utils_hal_1_2_test
Test: neuralnetworks_utils_hal_1_3_test
Test: neuralnetworks_utils_hal_common_test
Change-Id: Iaac81668d247c2cb76d70e6abbd10f00b397b19f
Merged-In: Iaac81668d247c2cb76d70e6abbd10f00b397b19f
(cherry picked from commit ead6d37ae9)
2021-05-10 15:22:08 -07:00

65 lines
2.3 KiB
C++

/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Burst.h"
#include <android-base/logging.h>
#include <nnapi/IBurst.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <memory>
#include <optional>
#include <utility>
namespace android::hardware::neuralnetworks::V1_0::utils {
nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
nn::SharedPreparedModel preparedModel) {
if (preparedModel == nullptr) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "V1_0::utils::Burst::create must have non-null preparedModel";
}
return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(preparedModel));
}
Burst::Burst(PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel)
: kPreparedModel(std::move(preparedModel)) {
CHECK(kPreparedModel != nullptr);
}
Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& /*memory*/) const {
return nullptr;
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const {
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
}
nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalDuration& loopTimeoutDuration) const {
return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
}
} // namespace android::hardware::neuralnetworks::V1_0::utils