Relax NeuralNetwork's VTS positive and negative base tests

am: af5e03a692

Change-Id: Ia9830b7bbd39d692b93265fbeb1f6a395f0b2988
This commit is contained in:
Michael Butler
2018-04-25 16:51:12 -07:00
committed by android-build-merger
3 changed files with 61 additions and 45 deletions

View File

@@ -72,37 +72,30 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
Model model = create_model();
// see if service can handle model
ErrorStatus supportedStatus;
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations(
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
supportedStatus = status;
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
if (fullySupportsModel) {
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
} else {
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
}
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel.get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
@@ -111,6 +104,7 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
<< std::endl;
return;
}
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
int example_no = 1;

View File

@@ -69,26 +69,51 @@ void NeuralnetworksHidlTest::SetUp() {
void NeuralnetworksHidlTest::TearDown() {}
sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut() {
static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
ASSERT_NE(nullptr, preparedModel);
Model model = createValidTestModel();
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
if (preparedModelCallback == nullptr) {
return nullptr;
}
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) {
return nullptr;
}
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) {
return nullptr;
}
*preparedModel = preparedModelCallback->getPreparedModel();
return preparedModel;
// The getSupportedOperations call returns a list of operations that are
// guaranteed not to fail if prepareModel is called, and
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
// If a driver has any doubt that it can prepare an operation, it must
// return false. So here, if a driver isn't sure if it can support an
// operation, but reports that it successfully prepared the model, the test
// can continue.
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel->get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"prepare model that it does not support."
<< std::endl;
return;
}
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel->get());
}
// create device test
@@ -149,18 +174,8 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
// prepare simple model positive test
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
Model model = createValidTestModel();
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
EXPECT_NE(nullptr, preparedModel.get());
sp<IPreparedModel> preparedModel;
doPrepareModelShortcut(device, &preparedModel);
}
// prepare simple model negative test 1
@@ -201,8 +216,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
const uint32_t OUTPUT = 1;
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
ASSERT_NE(nullptr, preparedModel.get());
sp<IPreparedModel> preparedModel;
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
if (preparedModel == nullptr) {
return;
}
Request request = createValidTestRequest();
auto postWork = [&] {
@@ -235,8 +253,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
// execute simple graph negative test 1
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
ASSERT_NE(nullptr, preparedModel.get());
sp<IPreparedModel> preparedModel;
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
if (preparedModel == nullptr) {
return;
}
Request request = createInvalidTestRequest1();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
@@ -252,8 +273,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
// execute simple graph negative test 2
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
ASSERT_NE(nullptr, preparedModel.get());
sp<IPreparedModel> preparedModel;
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
if (preparedModel == nullptr) {
return;
}
Request request = createInvalidTestRequest2();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();

View File

@@ -74,8 +74,6 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
void SetUp() override;
void TearDown() override;
sp<IPreparedModel> doPrepareModelShortcut();
sp<IDevice> device;
};