mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 11:36:00 +00:00
Merge changes from topics "executeFenced_tests", "nnapi-decouple-1.2"
* changes: NNAPI VTS: decouple 1.2 tests from 1.3 types Add tests to make sure executeFenced validate unspecified output shapes Add more tests exercising IPreparedModel::executeFenced API
This commit is contained in:
@@ -28,7 +28,7 @@ cc_library_static {
|
||||
],
|
||||
header_libs: [
|
||||
"libbase_headers",
|
||||
]
|
||||
],
|
||||
}
|
||||
|
||||
cc_test {
|
||||
@@ -39,9 +39,9 @@ cc_test {
|
||||
"CompilationCachingTests.cpp",
|
||||
"GeneratedTestHarness.cpp",
|
||||
"TestAssertions.cpp",
|
||||
"ValidateBurst.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"ValidateBurst.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
local_include_dirs: ["include"],
|
||||
@@ -50,18 +50,17 @@ cc_test {
|
||||
"libnativewindow",
|
||||
],
|
||||
static_libs: [
|
||||
"VtsHalNeuralNetworksV1_0_utils",
|
||||
"VtsHalNeuralNetworksV1_2Callbacks",
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
"android.hardware.neuralnetworks@1.3",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_utils",
|
||||
"VtsHalNeuralNetworksV1_0_utils",
|
||||
"VtsHalNeuralNetworksV1_2Callbacks",
|
||||
],
|
||||
whole_static_libs: [
|
||||
"neuralnetworks_generated_V1_0_example",
|
||||
@@ -71,5 +70,8 @@ cc_test {
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
],
|
||||
test_suites: ["general-tests", "vts-core"],
|
||||
test_suites: [
|
||||
"general-tests",
|
||||
"vts-core",
|
||||
],
|
||||
}
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "MemoryUtils.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
// Forward declaration of the mobilenet generated test models in
|
||||
|
||||
@@ -43,7 +43,6 @@
|
||||
#include "ExecutionBurstController.h"
|
||||
#include "MemoryUtils.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
@@ -273,7 +272,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
|
||||
int n;
|
||||
std::tie(n, outputShapes, timing, std::ignore) =
|
||||
controller->compute(request, testConfig.measureTiming, keys);
|
||||
executionStatus = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n));
|
||||
executionStatus = nn::legacyConvertResultCodeToErrorStatus(n);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include "ExecutionBurstServer.h"
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <chrono>
|
||||
@@ -296,8 +295,7 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
|
||||
// collect serialized result by running regular burst
|
||||
const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
|
||||
controllerRegular->compute(request, MeasureTiming::NO, keys);
|
||||
const ErrorStatus statusRegular =
|
||||
nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular));
|
||||
const ErrorStatus statusRegular = nn::legacyConvertResultCodeToErrorStatus(nRegular);
|
||||
EXPECT_FALSE(fallbackRegular);
|
||||
|
||||
// skip test if regular burst output isn't useful for testing a failure
|
||||
@@ -313,7 +311,7 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
|
||||
// large enough to return the serialized result
|
||||
const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
|
||||
controllerSmall->compute(request, MeasureTiming::NO, keys);
|
||||
const ErrorStatus statusSmall = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall));
|
||||
const ErrorStatus statusSmall = nn::legacyConvertResultCodeToErrorStatus(nSmall);
|
||||
EXPECT_NE(ErrorStatus::NONE, statusSmall);
|
||||
EXPECT_EQ(0u, outputShapesSmall.size());
|
||||
EXPECT_TRUE(badTiming(timingSmall));
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "ExecutionBurstController.h"
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
||||
@@ -107,7 +106,7 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
|
||||
|
||||
// execute and verify
|
||||
const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
|
||||
const ErrorStatus status = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n));
|
||||
const ErrorStatus status = nn::legacyConvertResultCodeToErrorStatus(n);
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
EXPECT_EQ(outputShapes.size(), 0);
|
||||
EXPECT_TRUE(badTiming(timing));
|
||||
|
||||
@@ -626,21 +626,28 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
||||
ErrorStatus result;
|
||||
hidl_handle syncFenceHandle;
|
||||
sp<IFencedExecutionCallback> fencedCallback;
|
||||
Return<void> ret = preparedModel->executeFenced(
|
||||
request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {},
|
||||
[&result, &syncFenceHandle, &fencedCallback](
|
||||
ErrorStatus error, const hidl_handle& handle,
|
||||
const sp<IFencedExecutionCallback>& callback) {
|
||||
result = error;
|
||||
syncFenceHandle = handle;
|
||||
fencedCallback = callback;
|
||||
});
|
||||
auto callbackFunc = [&result, &syncFenceHandle, &fencedCallback](
|
||||
ErrorStatus error, const hidl_handle& handle,
|
||||
const sp<IFencedExecutionCallback>& callback) {
|
||||
result = error;
|
||||
syncFenceHandle = handle;
|
||||
fencedCallback = callback;
|
||||
};
|
||||
Return<void> ret =
|
||||
preparedModel->executeFenced(request, {}, testConfig.measureTiming, {},
|
||||
loopTimeoutDuration, {}, callbackFunc);
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
if (result != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
|
||||
ASSERT_EQ(fencedCallback, nullptr);
|
||||
executionStatus = ErrorStatus::GENERAL_FAILURE;
|
||||
executionStatus = result;
|
||||
} else if (syncFenceHandle.getNativeHandle()) {
|
||||
// If a sync fence is returned, try start another run waiting for the sync fence.
|
||||
ret = preparedModel->executeFenced(request, {syncFenceHandle},
|
||||
testConfig.measureTiming, {},
|
||||
loopTimeoutDuration, {}, callbackFunc);
|
||||
ASSERT_TRUE(ret.isOk());
|
||||
ASSERT_EQ(result, ErrorStatus::NONE);
|
||||
waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]);
|
||||
}
|
||||
if (result == ErrorStatus::NONE) {
|
||||
@@ -656,9 +663,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
||||
}
|
||||
}
|
||||
|
||||
// The driver is allowed to reject executeFenced, and if they do, we should skip.
|
||||
if ((testConfig.outputType != OutputType::FULLY_SPECIFIED ||
|
||||
testConfig.executor == Executor::FENCED) &&
|
||||
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
|
||||
executionStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||
if (skipped != nullptr) {
|
||||
*skipped = true;
|
||||
@@ -691,12 +696,22 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
||||
outputShapes.size() == testModel.main.outputIndexes.size());
|
||||
break;
|
||||
case OutputType::UNSPECIFIED:
|
||||
if (testConfig.executor == Executor::FENCED) {
|
||||
// For Executor::FENCED, the output shape must be fully specified.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
// If the model output operands are not fully specified, outputShapes must have
|
||||
// the same number of elements as the number of outputs.
|
||||
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||
break;
|
||||
case OutputType::INSUFFICIENT:
|
||||
if (testConfig.executor == Executor::FENCED) {
|
||||
// For Executor::FENCED, the output shape must be fully specified.
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
|
||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||
ASSERT_FALSE(outputShapes[0].isSufficient);
|
||||
@@ -739,12 +754,12 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
|
||||
case TestKind::DYNAMIC_SHAPE: {
|
||||
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
|
||||
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
|
||||
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
|
||||
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
|
||||
} break;
|
||||
case TestKind::MEMORY_DOMAIN: {
|
||||
outputTypesList = {OutputType::FULLY_SPECIFIED};
|
||||
measureTimingList = {MeasureTiming::NO};
|
||||
executorList = {Executor::ASYNC, Executor::SYNC};
|
||||
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
|
||||
memoryType = MemoryType::DEVICE;
|
||||
} break;
|
||||
case TestKind::FENCED_COMPUTE: {
|
||||
@@ -921,8 +936,13 @@ INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel
|
||||
INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(FencedComputeTest,
|
||||
[](const TestModel& testModel) { return !testModel.expectFailure; });
|
||||
INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
|
||||
return !testModel.expectFailure &&
|
||||
std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
|
||||
[&testModel](uint32_t index) {
|
||||
return testModel.main.operands[index].data.size() > 0;
|
||||
});
|
||||
});
|
||||
|
||||
INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
|
||||
return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1;
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include "ExecutionBurstServer.h"
|
||||
#include "GeneratedTestHarness.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <chrono>
|
||||
@@ -302,8 +301,7 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
|
||||
// collect serialized result by running regular burst
|
||||
const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
|
||||
controllerRegular->compute(request, MeasureTiming::NO, keys);
|
||||
const V1_0::ErrorStatus statusRegular =
|
||||
nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular));
|
||||
const V1_0::ErrorStatus statusRegular = nn::legacyConvertResultCodeToErrorStatus(nRegular);
|
||||
EXPECT_FALSE(fallbackRegular);
|
||||
|
||||
// skip test if regular burst output isn't useful for testing a failure
|
||||
@@ -319,8 +317,7 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
|
||||
// large enough to return the serialized result
|
||||
const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
|
||||
controllerSmall->compute(request, MeasureTiming::NO, keys);
|
||||
const V1_0::ErrorStatus statusSmall =
|
||||
nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall));
|
||||
const V1_0::ErrorStatus statusSmall = nn::legacyConvertResultCodeToErrorStatus(nSmall);
|
||||
EXPECT_NE(V1_0::ErrorStatus::NONE, statusSmall);
|
||||
EXPECT_EQ(0u, outputShapesSmall.size());
|
||||
EXPECT_TRUE(badTiming(timingSmall));
|
||||
|
||||
Reference in New Issue
Block a user