diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp index 31a1a819ee..7c1faeef68 100644 --- a/neuralnetworks/1.2/vts/functional/Android.bp +++ b/neuralnetworks/1.2/vts/functional/Android.bp @@ -28,7 +28,7 @@ cc_library_static { ], header_libs: [ "libbase_headers", - ] + ], } cc_test { @@ -39,9 +39,9 @@ cc_test { "CompilationCachingTests.cpp", "GeneratedTestHarness.cpp", "TestAssertions.cpp", + "ValidateBurst.cpp", "ValidateModel.cpp", "ValidateRequest.cpp", - "ValidateBurst.cpp", "VtsHalNeuralnetworks.cpp", ], local_include_dirs: ["include"], @@ -50,18 +50,17 @@ cc_test { "libnativewindow", ], static_libs: [ + "VtsHalNeuralNetworksV1_0_utils", + "VtsHalNeuralNetworksV1_2Callbacks", "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.2", - "android.hardware.neuralnetworks@1.3", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libgmock", "libhidlmemory", "libneuralnetworks_generated_test_harness", "libneuralnetworks_utils", - "VtsHalNeuralNetworksV1_0_utils", - "VtsHalNeuralNetworksV1_2Callbacks", ], whole_static_libs: [ "neuralnetworks_generated_V1_0_example", @@ -71,5 +70,8 @@ cc_test { header_libs: [ "libneuralnetworks_headers", ], - test_suites: ["general-tests", "vts-core"], + test_suites: [ + "general-tests", + "vts-core", + ], } diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp index 10dec791cf..449b8f369d 100644 --- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp @@ -32,7 +32,6 @@ #include "GeneratedTestHarness.h" #include "MemoryUtils.h" #include "TestHarness.h" -#include "Utils.h" #include "VtsHalNeuralnetworks.h" // Forward declaration of the mobilenet generated test models in diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp index 4c8fede8b2..3ab01351e9 100644 --- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp @@ -43,7 +43,6 @@ #include "ExecutionBurstController.h" #include "MemoryUtils.h" #include "TestHarness.h" -#include "Utils.h" #include "VtsHalNeuralnetworks.h" namespace android::hardware::neuralnetworks::V1_2::vts::functional { @@ -273,7 +272,7 @@ void EvaluatePreparedModel(const sp& preparedModel, const TestMo int n; std::tie(n, outputShapes, timing, std::ignore) = controller->compute(request, testConfig.measureTiming, keys); - executionStatus = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n)); + executionStatus = nn::legacyConvertResultCodeToErrorStatus(n); break; } diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp index ec9629bccb..cc9d8048d0 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp @@ -23,7 +23,6 @@ #include "ExecutionBurstServer.h" #include "GeneratedTestHarness.h" #include "TestHarness.h" -#include "Utils.h" #include #include @@ -296,8 +295,7 @@ static void validateBurstFmqLength(const sp& preparedModel, // collect serialized result by running regular burst const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] = controllerRegular->compute(request, MeasureTiming::NO, keys); - const ErrorStatus statusRegular = - nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular)); + const ErrorStatus statusRegular = nn::legacyConvertResultCodeToErrorStatus(nRegular); EXPECT_FALSE(fallbackRegular); // skip test if regular burst output isn't useful for testing a failure @@ -313,7 +311,7 @@ static void validateBurstFmqLength(const sp& preparedModel, // large enough to return the serialized result const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] = controllerSmall->compute(request, MeasureTiming::NO, keys); - const ErrorStatus statusSmall = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall)); + const ErrorStatus statusSmall = nn::legacyConvertResultCodeToErrorStatus(nSmall); EXPECT_NE(ErrorStatus::NONE, statusSmall); EXPECT_EQ(0u, outputShapesSmall.size()); EXPECT_TRUE(badTiming(timingSmall)); diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index 7b5ff9b8e4..8498cb041b 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -22,7 +22,6 @@ #include "ExecutionBurstController.h" #include "GeneratedTestHarness.h" #include "TestHarness.h" -#include "Utils.h" #include "VtsHalNeuralnetworks.h" namespace android::hardware::neuralnetworks::V1_2::vts::functional { @@ -107,7 +106,7 @@ static void validate(const sp& preparedModel, const std::string& // execute and verify const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys); - const ErrorStatus status = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n)); + const ErrorStatus status = nn::legacyConvertResultCodeToErrorStatus(n); EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); EXPECT_EQ(outputShapes.size(), 0); EXPECT_TRUE(badTiming(timing)); diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index 8c9393b030..83a8d94ba5 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -626,21 +626,28 @@ void EvaluatePreparedModel(const sp& device, const sp& ErrorStatus result; hidl_handle syncFenceHandle; sp fencedCallback; - Return ret = preparedModel->executeFenced( - request, {}, testConfig.measureTiming, {}, loopTimeoutDuration, {}, - [&result, &syncFenceHandle, &fencedCallback]( - ErrorStatus error, const hidl_handle& handle, - const sp& callback) { - result = error; - syncFenceHandle = handle; - fencedCallback = callback; - }); + auto callbackFunc = [&result, &syncFenceHandle, &fencedCallback]( + ErrorStatus error, const hidl_handle& handle, + const sp& callback) { + result = error; + syncFenceHandle = handle; + fencedCallback = callback; + }; + Return ret = + preparedModel->executeFenced(request, {}, testConfig.measureTiming, {}, + loopTimeoutDuration, {}, callbackFunc); ASSERT_TRUE(ret.isOk()); if (result != ErrorStatus::NONE) { ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr); ASSERT_EQ(fencedCallback, nullptr); - executionStatus = ErrorStatus::GENERAL_FAILURE; + executionStatus = result; } else if (syncFenceHandle.getNativeHandle()) { + // If a sync fence is returned, try start another run waiting for the sync fence. + ret = preparedModel->executeFenced(request, {syncFenceHandle}, + testConfig.measureTiming, {}, + loopTimeoutDuration, {}, callbackFunc); + ASSERT_TRUE(ret.isOk()); + ASSERT_EQ(result, ErrorStatus::NONE); waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]); } if (result == ErrorStatus::NONE) { @@ -656,9 +663,7 @@ void EvaluatePreparedModel(const sp& device, const sp& } } - // The driver is allowed to reject executeFenced, and if they do, we should skip. - if ((testConfig.outputType != OutputType::FULLY_SPECIFIED || - testConfig.executor == Executor::FENCED) && + if (testConfig.outputType != OutputType::FULLY_SPECIFIED && executionStatus == ErrorStatus::GENERAL_FAILURE) { if (skipped != nullptr) { *skipped = true; @@ -691,12 +696,22 @@ void EvaluatePreparedModel(const sp& device, const sp& outputShapes.size() == testModel.main.outputIndexes.size()); break; case OutputType::UNSPECIFIED: + if (testConfig.executor == Executor::FENCED) { + // For Executor::FENCED, the output shape must be fully specified. + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus); + return; + } // If the model output operands are not fully specified, outputShapes must have // the same number of elements as the number of outputs. ASSERT_EQ(ErrorStatus::NONE, executionStatus); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); break; case OutputType::INSUFFICIENT: + if (testConfig.executor == Executor::FENCED) { + // For Executor::FENCED, the output shape must be fully specified. + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus); + return; + } ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus); ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); ASSERT_FALSE(outputShapes[0].isSufficient); @@ -739,12 +754,12 @@ void EvaluatePreparedModel(const sp& device, const sp& case TestKind::DYNAMIC_SHAPE: { outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT}; measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; - executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST}; + executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED}; } break; case TestKind::MEMORY_DOMAIN: { outputTypesList = {OutputType::FULLY_SPECIFIED}; measureTimingList = {MeasureTiming::NO}; - executorList = {Executor::ASYNC, Executor::SYNC}; + executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED}; memoryType = MemoryType::DEVICE; } break; case TestKind::FENCED_COMPUTE: { @@ -921,8 +936,13 @@ INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel INSTANTIATE_GENERATED_TEST(MemoryDomainTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); -INSTANTIATE_GENERATED_TEST(FencedComputeTest, - [](const TestModel& testModel) { return !testModel.expectFailure; }); +INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) { + return !testModel.expectFailure && + std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(), + [&testModel](uint32_t index) { + return testModel.main.operands[index].data.size() > 0; + }); +}); INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) { return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1; diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp index 6ff9dfd3a8..aecb7b79c4 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp @@ -23,7 +23,6 @@ #include "ExecutionBurstServer.h" #include "GeneratedTestHarness.h" #include "TestHarness.h" -#include "Utils.h" #include #include @@ -302,8 +301,7 @@ static void validateBurstFmqLength(const sp& preparedModel, // collect serialized result by running regular burst const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] = controllerRegular->compute(request, MeasureTiming::NO, keys); - const V1_0::ErrorStatus statusRegular = - nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular)); + const V1_0::ErrorStatus statusRegular = nn::legacyConvertResultCodeToErrorStatus(nRegular); EXPECT_FALSE(fallbackRegular); // skip test if regular burst output isn't useful for testing a failure @@ -319,8 +317,7 @@ static void validateBurstFmqLength(const sp& preparedModel, // large enough to return the serialized result const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] = controllerSmall->compute(request, MeasureTiming::NO, keys); - const V1_0::ErrorStatus statusSmall = - nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall)); + const V1_0::ErrorStatus statusSmall = nn::legacyConvertResultCodeToErrorStatus(nSmall); EXPECT_NE(V1_0::ErrorStatus::NONE, statusSmall); EXPECT_EQ(0u, outputShapesSmall.size()); EXPECT_TRUE(badTiming(timingSmall));