From af5e03a692e58761527c592caf8d9c69bb2bf31b Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Fri, 13 Apr 2018 17:55:03 -0700 Subject: [PATCH] Relax NeuralNetwork's VTS positive and negative base tests There are some NN VTS tests that assume a service is able to generate a model consisting only of a floating point add operation. However, some drivers do not support floating point operations. This CL relaxes the test requirements to allow a test to be skipped if the service does not support floating point add. Bug: 77227504 Test: mma Test: VtsHalNeuralnetworksV1_0TargetTest Test: https://drive.google.com/open?id=1P-rEzUMK0EoWSrOFuXy46EubpwC3A1cs Change-Id: Ied49c96af975eff734377b7b7445c6f5e63e117e (cherry picked from commit 25d9dd7346d995b40e1d370eb9d12bb7e3f03396) --- .../vts/functional/GeneratedTestHarness.cpp | 16 ++-- .../VtsHalNeuralnetworksV1_0TargetTest.cpp | 88 ++++++++++++------- .../VtsHalNeuralnetworksV1_0TargetTest.h | 2 - 3 files changed, 61 insertions(+), 45 deletions(-) diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index d740b5f53c..e879510998 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -72,37 +72,30 @@ void Execute(const sp& device, std::function create_model, Model model = create_model(); // see if service can handle model - ErrorStatus supportedStatus; bool fullySupportsModel = false; Return supportedCall = device->getSupportedOperations( - model, [&](ErrorStatus status, const hidl_vec& supported) { - supportedStatus = status; + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); ASSERT_NE(0ul, supported.size()); fullySupportsModel = std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); }); ASSERT_TRUE(supportedCall.isOk()); - ASSERT_EQ(ErrorStatus::NONE, supportedStatus); // launch prepare model sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); // retrieve prepared model preparedModelCallback->wait(); ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); sp preparedModel = preparedModelCallback->getPreparedModel(); - if (fullySupportsModel) { - EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); - } else { - EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE || - prepareReturnStatus == ErrorStatus::GENERAL_FAILURE); - } // early termination if vendor service cannot fully prepare model - if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) { + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { ASSERT_EQ(nullptr, preparedModel.get()); LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " "prepare model that it does not support."; @@ -111,6 +104,7 @@ void Execute(const sp& device, std::function create_model, << std::endl; return; } + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); ASSERT_NE(nullptr, preparedModel.get()); int example_no = 1; diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp index b99e20e3b4..381487fdb7 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp @@ -69,26 +69,51 @@ void NeuralnetworksHidlTest::SetUp() { void NeuralnetworksHidlTest::TearDown() {} -sp NeuralnetworksHidlTest::doPrepareModelShortcut() { +static void doPrepareModelShortcut(const sp& device, sp* preparedModel) { + ASSERT_NE(nullptr, preparedModel); Model model = createValidTestModel(); - sp preparedModelCallback = new PreparedModelCallback(); - if (preparedModelCallback == nullptr) { - return nullptr; - } - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) { - return nullptr; - } + // see if service can handle model + bool fullySupportsModel = false; + Return supportedOpsLaunchStatus = device->getSupportedOperations( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); + // launch prepare model + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); + + // retrieve prepared model preparedModelCallback->wait(); ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - sp preparedModel = preparedModelCallback->getPreparedModel(); - if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) { - return nullptr; - } + *preparedModel = preparedModelCallback->getPreparedModel(); - return preparedModel; + // The getSupportedOperations call returns a list of operations that are + // guaranteed not to fail if prepareModel is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return; + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); } // create device test @@ -149,18 +174,8 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { // prepare simple model positive test TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { - Model model = createValidTestModel(); - sp preparedModelCallback = new PreparedModelCallback(); - ASSERT_NE(nullptr, preparedModelCallback.get()); - Return prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()); - EXPECT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); - - preparedModelCallback->wait(); - ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); - EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); - sp preparedModel = preparedModelCallback->getPreparedModel(); - EXPECT_NE(nullptr, preparedModel.get()); + sp preparedModel; + doPrepareModelShortcut(device, &preparedModel); } // prepare simple model negative test 1 @@ -201,8 +216,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { std::vector expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; const uint32_t OUTPUT = 1; - sp preparedModel = doPrepareModelShortcut(); - ASSERT_NE(nullptr, preparedModel.get()); + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); + if (preparedModel == nullptr) { + return; + } Request request = createValidTestRequest(); auto postWork = [&] { @@ -235,8 +253,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { // execute simple graph negative test 1 TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { - sp preparedModel = doPrepareModelShortcut(); - ASSERT_NE(nullptr, preparedModel.get()); + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); + if (preparedModel == nullptr) { + return; + } Request request = createInvalidTestRequest1(); sp executionCallback = new ExecutionCallback(); @@ -252,8 +273,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { // execute simple graph negative test 2 TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { - sp preparedModel = doPrepareModelShortcut(); - ASSERT_NE(nullptr, preparedModel.get()); + sp preparedModel; + ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); + if (preparedModel == nullptr) { + return; + } Request request = createInvalidTestRequest2(); sp executionCallback = new ExecutionCallback(); diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h index 5cd209ae62..78d04d6b27 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h @@ -74,8 +74,6 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { void SetUp() override; void TearDown() override; - sp doPrepareModelShortcut(); - sp device; };