Files
hardware_interfaces/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
Slava Shklyaev 73ee79dafa Refactor NNAPI VTS to remove unreasonable dependence between versions
To make it easier to create the next version of NNAPI, this change
removes the following nonsensical dependence:
- NNAPI 1.0 VTS depends on NNAPI 1.1 and 1.2
- NNAPI 1.1 VTS depends on NNAPI 1.2

In particular, I made the following changes:
- split GeneratedTestHarness.cpp into three separate implementations,
- created a restricted version of Callbacks.h for 1.0 and 1.1,
- removed the dependency on frameworks/ml/nn/HalInterfaces.h,
- refactored Android.bp files for more autonomy between 1.0, 1.1, and 1.2,
- consolidated some common code into Utils.h,
- created structure for sharing code between VTS versions (VtsHalNeuralNetworksV1_0_utils).

Bug: 74827824
Bug: 124462414
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
Change-Id: I4243d0b5e574255cef1070850f4d0a284f65f54e
Merged-In: I4243d0b5e574255cef1070850f4d0a284f65f54e
(cherry picked from commit 1d6b465997)
2019-07-19 14:00:29 +01:00

284 lines
11 KiB
C++

/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_2 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
using test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static bool badTiming(Timing timing) {
return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX;
}
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
// that use the request. Note that the request here is passed by value, and any
// mutation to the request does not leave this function.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
Request request, const std::function<void(Request*)>& mutation) {
mutation(&request);
// We'd like to test both with timing requested and without timing
// requested. Rather than running each test both ways, we'll decide whether
// to request timing by hashing the message. We do not use std::hash because
// it is not guaranteed stable across executions.
char hash = 0;
for (auto c : message) {
hash ^= c;
};
MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
// asynchronous
{
SCOPED_TRACE(message + " [execute_1_2]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executeLaunchStatus =
preparedModel->execute_1_2(request, measure, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
executionCallback->wait();
ErrorStatus executionReturnStatus = executionCallback->getStatus();
const auto& outputShapes = executionCallback->getOutputShapes();
Timing timing = executionCallback->getTiming();
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
ASSERT_EQ(outputShapes.size(), 0);
ASSERT_TRUE(badTiming(timing));
}
// synchronous
{
SCOPED_TRACE(message + " [executeSynchronously]");
Return<void> executeStatus = preparedModel->executeSynchronously(
request, measure,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
});
ASSERT_TRUE(executeStatus.isOk());
}
// burst
{
SCOPED_TRACE(message + " [burst]");
// create burst
std::shared_ptr<::android::nn::ExecutionBurstController> burst =
::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
ASSERT_NE(nullptr, burst.get());
// create memory keys
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
// execute and verify
ErrorStatus error;
std::vector<OutputShape> outputShapes;
Timing timing;
std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
// additional burst testing
if (request.pools.size() > 0) {
// valid free
burst->freeMemory(keys.front());
// negative test: invalid free of unknown (blank) memory
burst->freeMemory(intptr_t{});
// negative test: double free of memory
burst->freeMemory(keys.front());
}
}
}
///////////////////////// REMOVE INPUT ////////////////////////////////////
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
for (size_t input = 0; input < request.inputs.size(); ++input) {
const std::string message = "removeInput: removed input " + std::to_string(input);
validate(preparedModel, message, request,
[input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
}
}
///////////////////////// REMOVE OUTPUT ////////////////////////////////////
static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
for (size_t output = 0; output < request.outputs.size(); ++output) {
const std::string message = "removeOutput: removed Output " + std::to_string(output);
validate(preparedModel, message, request,
[output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
}
}
///////////////////////////// ENTRY POINT //////////////////////////////////
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
std::vector<Request> requests;
for (auto& example : examples) {
const MixedTyped& inputs = example.operands.first;
const MixedTyped& outputs = example.operands.second;
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
// Go through all outputs, initialize RequestArgument descriptors
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
return {};
}
// map pool
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
if (inputMemory == nullptr) {
return {};
}
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
if (inputPtr == nullptr) {
return {};
}
// initialize pool
inputMemory->update();
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
}
return requests;
}
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
// validate each request
for (const Request& request : requests) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
}
void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
for (const Request& request : requests) {
SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
Return<void> executeStatus = preparedModel->executeSynchronously(
request, MeasureTiming::NO,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
ASSERT_NE(ErrorStatus::NONE, error);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
});
ASSERT_TRUE(executeStatus.isOk());
}
}
} // namespace functional
} // namespace vts
} // namespace V1_2
} // namespace neuralnetworks
} // namespace hardware
} // namespace android