Add dynamic interface casting to NN utility code

Prior to this CL, the NN utility code would always use the type of
IPreparedModel provided by IPreparedModeCallback::notify*. This means
that an IPreparedModel returned as a dynamic type of V1_X but static
type of V1_Y would be used by the utility code as V1_Y. This CL adds
dynamic casting, such that an IPreparedModel returned as a dynamic type
of V1_X but static type V1_Y will be dynamically cast to V1_X and used
as a V1_X::IPreparedModel.

This CL also adds the utility functions
V1_[0123]::convertFromNonCanonical to convert from a non-canonical type
to another non-canonical type by using canonical types as an
intermediate conversion "hop."

Bug: 178180472
Test: mma
Change-Id: I709b2a8944af2cc78b089aade55df1e2ab7b40cc
Merged-In: I709b2a8944af2cc78b089aade55df1e2ab7b40cc
(cherry picked from commit 49b5e4ebea)
This commit is contained in:
Michael Butler
2021-02-01 18:16:14 -08:00
committed by Lev Proleev
parent b755e019fd
commit e5e67024c6
6 changed files with 51 additions and 3 deletions

View File

@@ -44,6 +44,12 @@ bool valid(const Type& halObject) {
return result.has_value();
}
template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
-> decltype(convert(nn::convert(nonCanonicalObject).value())) {
return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}
} // namespace android::hardware::neuralnetworks::V1_0::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_H

View File

@@ -47,6 +47,12 @@ bool valid(const Type& halObject) {
return result.has_value();
}
template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
-> decltype(convert(nn::convert(nonCanonicalObject).value())) {
return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}
} // namespace android::hardware::neuralnetworks::V1_1::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_H

View File

@@ -54,6 +54,12 @@ bool valid(const Type& halObject) {
return result.has_value();
}
template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
-> decltype(convert(nn::convert(nonCanonicalObject).value())) {
return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}
} // namespace android::hardware::neuralnetworks::V1_2::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_H

View File

@@ -43,6 +43,15 @@
namespace android::hardware::neuralnetworks::V1_2::utils {
namespace {
nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
if (const auto dynamicPreparedModel =
V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
}
return V1_0::utils::prepareModelCallback(status, preparedModel);
}
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
@@ -72,7 +81,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executi
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
const sp<V1_0::IPreparedModel>& preparedModel) {
mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
mData.put(prepareModelCallback(status, preparedModel));
return Void();
}

View File

@@ -49,6 +49,12 @@ bool valid(const Type& halObject) {
return result.has_value();
}
template <typename Type>
auto convertFromNonCanonical(const Type& nonCanonicalObject)
-> decltype(convert(nn::convert(nonCanonicalObject).value())) {
return convert(NN_TRY(nn::convert(nonCanonicalObject)));
}
} // namespace android::hardware::neuralnetworks::V1_3::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_H

View File

@@ -28,6 +28,7 @@
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <nnapi/hal/1.0/Callbacks.h>
#include <nnapi/hal/1.0/Conversions.h>
#include <nnapi/hal/1.0/PreparedModel.h>
#include <nnapi/hal/1.2/Callbacks.h>
@@ -46,6 +47,20 @@
namespace android::hardware::neuralnetworks::V1_3::utils {
namespace {
nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
if (const auto dynamicPreparedModel =
V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
const auto currentVersionStatus = NN_TRY(convertFromNonCanonical(status));
return V1_3::utils::prepareModelCallback(currentVersionStatus, dynamicPreparedModel);
}
if (const auto dynamicPreparedModel =
V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
}
return V1_0::utils::prepareModelCallback(status, preparedModel);
}
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
const V1_2::Timing& timing) {
@@ -82,13 +97,13 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executi
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
const sp<V1_0::IPreparedModel>& preparedModel) {
mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
mData.put(prepareModelCallback(status, preparedModel));
return Void();
}
Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
const sp<V1_2::IPreparedModel>& preparedModel) {
mData.put(V1_2::utils::prepareModelCallback(status, preparedModel));
mData.put(prepareModelCallback(status, preparedModel));
return Void();
}