diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index b8111492f0..88837db349 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -603,7 +603,9 @@ void EvaluatePreparedModel(const sp& device, const sp& } } - if (testConfig.outputType != OutputType::FULLY_SPECIFIED && + // The driver is allowed to reject executeFenced, and if they do, we should skip. + if ((testConfig.outputType != OutputType::FULLY_SPECIFIED || + testConfig.executor == Executor::FENCED) && executionStatus == ErrorStatus::GENERAL_FAILURE) { if (skipped != nullptr) { *skipped = true; @@ -674,7 +676,7 @@ void EvaluatePreparedModel(const sp& device, const sp& case TestKind::GENERAL: { outputTypesList = {OutputType::FULLY_SPECIFIED}; measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; - executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED}; + executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST}; } break; case TestKind::DYNAMIC_SHAPE: { outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT}; @@ -687,6 +689,11 @@ void EvaluatePreparedModel(const sp& device, const sp& executorList = {Executor::ASYNC, Executor::SYNC}; memoryType = MemoryType::DEVICE; } break; + case TestKind::FENCED_COMPUTE: { + outputTypesList = {OutputType::FULLY_SPECIFIED}; + measureTimingList = {MeasureTiming::NO, MeasureTiming::YES}; + executorList = {Executor::FENCED}; + } break; case TestKind::QUANTIZATION_COUPLING: { LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel"; return; @@ -748,7 +755,8 @@ void Execute(const sp& device, const TestModel& testModel, TestKind tes switch (testKind) { case TestKind::GENERAL: case TestKind::DYNAMIC_SHAPE: - case TestKind::MEMORY_DOMAIN: { + case TestKind::MEMORY_DOMAIN: + case TestKind::FENCED_COMPUTE: { createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; EvaluatePreparedModel(device, preparedModel, testModel, testKind); @@ -811,6 +819,9 @@ class DynamicOutputShapeTest : public GeneratedTest {}; // Tag for the memory domain tests class MemoryDomainTest : public GeneratedTest {}; +// Tag for the fenced compute tests +class FencedComputeTest : public GeneratedTest {}; + // Tag for the dynamic output shape tests class QuantizationCouplingTest : public GeneratedTest {}; @@ -826,6 +837,10 @@ TEST_P(MemoryDomainTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::MEMORY_DOMAIN); } +TEST_P(FencedComputeTest, Test) { + Execute(kDevice, kTestModel, /*testKind=*/TestKind::FENCED_COMPUTE); +} + TEST_P(QuantizationCouplingTest, Test) { Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING); } @@ -840,6 +855,9 @@ INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel INSTANTIATE_GENERATED_TEST(MemoryDomainTest, [](const TestModel& testModel) { return !testModel.expectFailure; }); +INSTANTIATE_GENERATED_TEST(FencedComputeTest, + [](const TestModel& testModel) { return !testModel.expectFailure; }); + INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) { return testModel.hasQuant8CoupledOperands() && testModel.operations.size() == 1; }); diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h index fe695b471d..e597fac7cf 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h @@ -65,6 +65,8 @@ enum class TestKind { DYNAMIC_SHAPE, // Same as GENERAL but use device memories for inputs and outputs MEMORY_DOMAIN, + // Same as GENERAL but use executeFenced for exeuction + FENCED_COMPUTE, // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result // (OK/SKIPPED/FAILED) as the model with all such tensors converted to // TENSOR_QUANT8_ASYMM_SIGNED. diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp index 28cc8ffe65..c84f5b70e7 100644 --- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp @@ -140,26 +140,14 @@ void validateExecuteFenced(const sp& preparedModel, const Reques preparedModel->executeFenced(request, {hidl_handle(nullptr)}, V1_2::MeasureTiming::NO, [](ErrorStatus error, const hidl_handle& handle, const sp& callback) { - ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); + // TODO: fix this once sample driver impl is merged. + if (error != ErrorStatus::DEVICE_UNAVAILABLE) { + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); + } ASSERT_EQ(handle.getNativeHandle(), nullptr); ASSERT_EQ(callback, nullptr); }); ASSERT_TRUE(ret_null.isOk()); - - native_handle_t* nativeHandle = native_handle_create(1, 0); - ASSERT_NE(nullptr, nativeHandle); - nativeHandle->data[0] = -1; - hidl_handle hidlHandle; - hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); - Return ret_invalid = - preparedModel->executeFenced(request, {hidlHandle}, V1_2::MeasureTiming::NO, - [](ErrorStatus error, const hidl_handle& handle, - const sp& callback) { - ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error); - ASSERT_EQ(handle.getNativeHandle(), nullptr); - ASSERT_EQ(callback, nullptr); - }); - ASSERT_TRUE(ret_invalid.isOk()); } void validateEverything(const sp& device, const Model& model, const Request& request,