Merge changes from topic "nnapi-cp-test-model"

* changes:
  Modify 1.2 VTS tests to consume test struct directly.
  Modify 1.1 VTS tests to consume test struct directly.
  Modify 1.0 VTS tests to consume test struct directly.
This commit is contained in:
Treehugger Robot
2019-08-27 22:11:33 +00:00
committed by Gerrit Code Review
25 changed files with 803 additions and 1252 deletions

View File

@@ -32,12 +32,11 @@ cc_library_static {
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
],
header_libs: [
"libneuralnetworks_headers",
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
}
@@ -60,13 +59,12 @@ cc_defaults {
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
test_suites: ["general-tests"],
}

View File

@@ -15,6 +15,7 @@
*/
#include "GeneratedTestHarness.h"
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
@@ -28,6 +29,7 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <gtest/gtest.h>
#include <iostream>
namespace android {
@@ -36,6 +38,7 @@ namespace neuralnetworks {
namespace V1_0 {
namespace generated_tests {
using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::IDevice;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
@@ -45,137 +48,111 @@ using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
using ::test_helper::filter;
using ::test_helper::for_all;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
size_t constCopySize = 0, constRefSize = 0;
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
DataLocation loc = {};
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
loc = {.poolIndex = 0,
.offset = static_cast<uint32_t>(constCopySize),
.length = static_cast<uint32_t>(op.data.size())};
constCopySize += op.data.alignedSize();
} else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
loc = {.poolIndex = 0,
.offset = static_cast<uint32_t>(constRefSize),
.length = static_cast<uint32_t>(op.data.size())};
constRefSize += op.data.alignedSize();
}
operands[i] = {.type = static_cast<OperandType>(op.type),
.dimensions = op.dimensions,
.numberOfConsumers = op.numberOfConsumers,
.scale = op.scale,
.zeroPoint = op.zeroPoint,
.lifetime = static_cast<OperandLifeTime>(op.lifetime),
.location = loc};
}
// Model operations.
hidl_vec<Operation> operations(testModel.operations.size());
std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
[](const TestOperation& op) -> Operation {
return {.type = static_cast<OperationType>(op.type),
.inputs = op.inputs,
.outputs = op.outputs};
});
// Constant copies.
hidl_vec<uint8_t> operandValues(constCopySize);
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, operandValues.data() + operands[i].location.offset);
}
}
// Shared memory.
hidl_vec<hidl_memory> pools;
if (constRefSize > 0) {
hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
CHECK_NE(pools[0].size(), 0u);
// load data
sp<IMemory> mappedMemory = mapMemory(pools[0]);
CHECK(mappedMemory.get() != nullptr);
uint8_t* mappedPtr =
reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
CHECK(mappedPtr != nullptr);
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, mappedPtr + operands[i].location.offset);
}
}
}
return {.operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = testModel.inputIndexes,
.outputIndexes = testModel.outputIndexes,
.operandValues = std::move(operandValues),
.pools = std::move(pools)};
}
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples, float fpAtol,
float fpRtol) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
const Request request = createRequest(testModel);
int example_no = 1;
for (auto& example : examples) {
SCOPED_TRACE(example_no++);
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
// Launch execution.
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
CHECK(inputs.float16Operands.empty()) << "float16 is not supported in 1.0";
// Retrieve execution status.
executionCallback->wait();
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(request);
MixedTyped test; // holding test results
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
ASSERT_NE(0ull, pools[INPUT].size());
ASSERT_NE(0ull, pools[OUTPUT].size());
// load data
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
ASSERT_NE(nullptr, inputMemory.get());
ASSERT_NE(nullptr, outputMemory.get());
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
inputMemory->update();
outputMemory->update();
// Go through all inputs, copy the values
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
outputMemory->commit();
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// validate results
outputMemory->read();
copy_back(&test, outputs_info, outputPtr);
outputMemory->commit();
// Filter out don't cares
MixedTyped filtered_golden = filter(golden, is_ignored);
MixedTyped filtered_test = filter(test, is_ignored);
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
}
// We want "close-enough" results.
checkResults(testModel, outputs);
}
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
Model model = create_model();
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
Model model = createModel(testModel);
// see if service can handle model
bool fullySupportsModel = false;
@@ -190,7 +167,6 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -213,8 +189,7 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
EvaluatePreparedModel(preparedModel, testModel);
}
} // namespace generated_tests

View File

@@ -26,10 +26,9 @@ namespace neuralnetworks {
namespace V1_0 {
namespace generated_tests {
using ::test_helper::MixedTypedExample;
Model createModel(const ::test_helper::TestModel& testModel);
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
void Execute(const sp<V1_0::IDevice>& device, const ::test_helper::TestModel& testModel);
} // namespace generated_tests
} // namespace V1_0

View File

@@ -14,20 +14,11 @@
* limitations under the License.
*/
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
std::vector<Request> createRequests(const std::vector<::test_helper::MixedTypedExample>& examples);
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
namespace android::hardware::neuralnetworks::V1_0::generated_tests {
using namespace android::hardware::neuralnetworks::V1_0::vts::functional;

View File

@@ -14,45 +14,108 @@
* limitations under the License.
*/
#include "GeneratedTestHarness.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <cstring>
#include <map>
#include <algorithm>
#include <vector>
namespace android {
namespace hardware {
namespace neuralnetworks {
using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::DataLocation;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::test_helper::for_each;
using ::test_helper::MixedTyped;
using ::android::hidl::memory::V1_0::IMemory;
template <typename T>
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
char* src) {
for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) {
ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
char* begin = src + ra[index].location.offset;
memcpy(m.data(), begin, ra[index].location.length);
});
constexpr uint32_t kInputPoolIndex = 0;
constexpr uint32_t kOutputPoolIndex = 1;
Request createRequest(const TestModel& testModel) {
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.inputIndexes.size());
size_t inputSize = 0;
for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
const auto& op = testModel.operands[testModel.inputIndexes[i]];
if (op.data.size() == 0) {
// Omitted input.
inputs[i] = {.hasNoValue = true};
} else {
DataLocation loc = {.poolIndex = kInputPoolIndex,
.offset = static_cast<uint32_t>(inputSize),
.length = static_cast<uint32_t>(op.data.size())};
inputSize += op.data.alignedSize();
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
}
}
// Model outputs.
hidl_vec<RequestArgument> outputs(testModel.outputIndexes.size());
size_t outputSize = 0;
for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
const auto& op = testModel.operands[testModel.outputIndexes[i]];
// In the case of zero-sized output, we should at least provide a one-byte buffer.
// This is because zero-sized tensors are only supported internally to the driver, or
// reported in output shapes. It is illegal for the client to pre-specify a zero-sized
// tensor as model output. Otherwise, we will have two semantic conflicts:
// - "Zero dimension" conflicts with "unspecified dimension".
// - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
size_t bufferSize = std::max<size_t>(op.data.size(), 1);
DataLocation loc = {.poolIndex = kOutputPoolIndex,
.offset = static_cast<uint32_t>(outputSize),
.length = static_cast<uint32_t>(bufferSize)};
outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize();
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
}
// Allocate memory pools.
hidl_vec<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
CHECK_NE(pools[kInputPoolIndex].size(), 0u);
CHECK_NE(pools[kOutputPoolIndex].size(), 0u);
sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex]);
CHECK(inputMemory.get() != nullptr);
uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
CHECK(inputPtr != nullptr);
// Copy input data to the memory pool.
for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
const auto& op = testModel.operands[testModel.inputIndexes[i]];
if (op.data.size() > 0) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, inputPtr + inputs[i].location.offset);
}
}
return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
}
void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
copy_back_(&dst->float32Operands, ra, src);
copy_back_(&dst->int32Operands, ra, src);
copy_back_(&dst->quant8AsymmOperands, ra, src);
copy_back_(&dst->quant16SymmOperands, ra, src);
copy_back_(&dst->float16Operands, ra, src);
copy_back_(&dst->bool8Operands, ra, src);
copy_back_(&dst->quant8ChannelOperands, ra, src);
copy_back_(&dst->quant16AsymmOperands, ra, src);
copy_back_(&dst->quant8SymmOperands, ra, src);
static_assert(9 == MixedTyped::kNumTypes,
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
std::vector<TestBuffer> getOutputBuffers(const Request& request) {
sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex]);
CHECK(outputMemory.get() != nullptr);
uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
CHECK(outputPtr != nullptr);
// Copy out output results.
std::vector<TestBuffer> outputBuffers;
for (const auto& output : request.outputs) {
outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
}
return outputBuffers;
}
} // namespace neuralnetworks

View File

@@ -16,13 +16,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
@@ -33,10 +27,6 @@ namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
using test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -102,103 +92,10 @@ static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Requ
///////////////////////////// ENTRY POINT //////////////////////////////////
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
std::vector<Request> requests;
for (const MixedTypedExample& example : examples) {
const MixedTyped& inputs = example.operands.first;
const MixedTyped& outputs = example.operands.second;
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
// Go through all outputs, initialize RequestArgument descriptors
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
return {};
}
// map pool
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
if (inputMemory == nullptr) {
return {};
}
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
if (inputPtr == nullptr) {
return {};
}
// initialize pool
inputMemory->update();
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
}
return requests;
}
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
// validate each request
for (const Request& request : requests) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
const Request& request) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
} // namespace functional

View File

@@ -121,7 +121,7 @@ void NeuralnetworksHidlTest::TearDown() {
::testing::VtsHalHidlTargetTestBase::TearDown();
}
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
void ValidationTest::validateEverything(const Model& model, const Request& request) {
validateModel(model);
// create IPreparedModel
@@ -131,7 +131,7 @@ void ValidationTest::validateEverything(const Model& model, const std::vector<Re
return;
}
validateRequests(preparedModel, requests);
validateRequest(preparedModel, request);
}
} // namespace functional

View File

@@ -63,12 +63,11 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
protected:
void validateEverything(const Model& model, const std::vector<Request>& request);
void validateEverything(const Model& model, const Request& request);
private:
void validateModel(const Model& model);
void validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
};
// Tag for the generated tests

View File

@@ -26,8 +26,11 @@ namespace android {
namespace hardware {
namespace neuralnetworks {
void copy_back(::test_helper::MixedTyped* dst, const std::vector<V1_0::RequestArgument>& ra,
char* src);
// Create HIDL Request from the TestModel struct.
V1_0::Request createRequest(const ::test_helper::TestModel& testModel);
// After execution, copy out output results from the output memory pool.
std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and

View File

@@ -34,13 +34,12 @@ cc_defaults {
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
test_suites: ["general-tests"],
}

View File

@@ -24,6 +24,7 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <gtest/gtest.h>
#include <iostream>
#include "1.0/Callbacks.h"
@@ -37,8 +38,13 @@ namespace neuralnetworks {
namespace V1_1 {
namespace generated_tests {
using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::DataLocation;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_0::Operand;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::OperandType;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
@@ -47,144 +53,112 @@ using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
using ::android::hardware::neuralnetworks::V1_1::IDevice;
using ::android::hardware::neuralnetworks::V1_1::Model;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
using ::test_helper::filter;
using ::test_helper::for_all;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
size_t constCopySize = 0, constRefSize = 0;
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
DataLocation loc = {};
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
loc = {.poolIndex = 0,
.offset = static_cast<uint32_t>(constCopySize),
.length = static_cast<uint32_t>(op.data.size())};
constCopySize += op.data.alignedSize();
} else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
loc = {.poolIndex = 0,
.offset = static_cast<uint32_t>(constRefSize),
.length = static_cast<uint32_t>(op.data.size())};
constRefSize += op.data.alignedSize();
}
operands[i] = {.type = static_cast<OperandType>(op.type),
.dimensions = op.dimensions,
.numberOfConsumers = op.numberOfConsumers,
.scale = op.scale,
.zeroPoint = op.zeroPoint,
.lifetime = static_cast<OperandLifeTime>(op.lifetime),
.location = loc};
}
// Model operations.
hidl_vec<Operation> operations(testModel.operations.size());
std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
[](const TestOperation& op) -> Operation {
return {.type = static_cast<OperationType>(op.type),
.inputs = op.inputs,
.outputs = op.outputs};
});
// Constant copies.
hidl_vec<uint8_t> operandValues(constCopySize);
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, operandValues.data() + operands[i].location.offset);
}
}
// Shared memory.
hidl_vec<hidl_memory> pools;
if (constRefSize > 0) {
hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
CHECK_NE(pools[0].size(), 0u);
// load data
sp<IMemory> mappedMemory = mapMemory(pools[0]);
CHECK(mappedMemory.get() != nullptr);
uint8_t* mappedPtr =
reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
CHECK(mappedPtr != nullptr);
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, mappedPtr + operands[i].location.offset);
}
}
}
return {.operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = testModel.inputIndexes,
.outputIndexes = testModel.outputIndexes,
.operandValues = std::move(operandValues),
.pools = std::move(pools),
.relaxComputationFloat32toFloat16 = testModel.isRelaxed};
}
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, float fpAtol, float fpRtol) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
const Request request = createRequest(testModel);
int example_no = 1;
for (auto& example : examples) {
SCOPED_TRACE(example_no++);
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
// Launch execution.
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
const bool hasFloat16Inputs = !inputs.float16Operands.empty();
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
// TODO: Adjust the error limit based on testing.
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
fpAtol = 5.0f * 0.0009765625f;
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
fpRtol = 5.0f * 0.0009765625f;
}
// Retrieve execution status.
executionCallback->wait();
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(request);
MixedTyped test; // holding test results
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
ASSERT_NE(0ull, pools[INPUT].size());
ASSERT_NE(0ull, pools[OUTPUT].size());
// load data
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
ASSERT_NE(nullptr, inputMemory.get());
ASSERT_NE(nullptr, outputMemory.get());
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
inputMemory->update();
outputMemory->update();
// Go through all inputs, copy the values
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
outputMemory->commit();
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// validate results
outputMemory->read();
copy_back(&test, outputs_info, outputPtr);
outputMemory->commit();
// Filter out don't cares
MixedTyped filtered_golden = filter(golden, is_ignored);
MixedTyped filtered_test = filter(test, is_ignored);
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
}
// We want "close-enough" results.
checkResults(testModel, outputs);
}
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
Model model = create_model();
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
Model model = createModel(testModel);
// see if service can handle model
bool fullySupportsModel = false;
@@ -199,7 +173,6 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
@@ -223,8 +196,7 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f);
EvaluatePreparedModel(preparedModel, testModel);
}
} // namespace generated_tests

View File

@@ -18,9 +18,6 @@
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
#include <functional>
#include <vector>
#include "TestHarness.h"
namespace android {
@@ -29,9 +26,9 @@ namespace neuralnetworks {
namespace V1_1 {
namespace generated_tests {
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
std::function<bool(int)> is_ignored,
const std::vector<::test_helper::MixedTypedExample>& examples);
Model createModel(const ::test_helper::TestModel& testModel);
void Execute(const sp<V1_1::IDevice>& device, const ::test_helper::TestModel& testModel);
} // namespace generated_tests
} // namespace V1_1

View File

@@ -14,20 +14,11 @@
* limitations under the License.
*/
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
std::vector<Request> createRequests(const std::vector<::test_helper::MixedTypedExample>& examples);
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
namespace android::hardware::neuralnetworks::V1_1::generated_tests {
using namespace android::hardware::neuralnetworks::V1_1::vts::functional;

View File

@@ -16,14 +16,8 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
@@ -35,13 +29,8 @@ namespace functional {
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_1::IPreparedModel;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::for_all;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -87,103 +76,10 @@ static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Requ
///////////////////////////// ENTRY POINT //////////////////////////////////
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
std::vector<Request> requests;
for (auto& example : examples) {
const MixedTyped& inputs = example.operands.first;
const MixedTyped& outputs = example.operands.second;
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
// Go through all outputs, initialize RequestArgument descriptors
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
return {};
}
// map pool
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
if (inputMemory == nullptr) {
return {};
}
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
if (inputPtr == nullptr) {
return {};
}
// initialize pool
inputMemory->update();
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
}
return requests;
}
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
// validate each request
for (const Request& request : requests) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
const Request& request) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
} // namespace functional

View File

@@ -122,7 +122,7 @@ void NeuralnetworksHidlTest::TearDown() {
::testing::VtsHalHidlTargetTestBase::TearDown();
}
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
void ValidationTest::validateEverything(const Model& model, const Request& request) {
validateModel(model);
// create IPreparedModel
@@ -132,7 +132,7 @@ void ValidationTest::validateEverything(const Model& model, const std::vector<Re
return;
}
validateRequests(preparedModel, requests);
validateRequest(preparedModel, request);
}
} // namespace functional

View File

@@ -72,12 +72,11 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
protected:
void validateEverything(const Model& model, const std::vector<Request>& request);
void validateEverything(const Model& model, const Request& request);
private:
void validateModel(const Model& model);
void validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
};
// Tag for the generated tests

View File

@@ -37,13 +37,12 @@ cc_defaults {
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
test_suites: ["general-tests"],
}
@@ -75,8 +74,8 @@ cc_test {
srcs: [
"BasicTests.cpp",
":VtsHalNeuralNetworksV1_2_all_generated_V1_2_tests",
":VtsHalNeuralNetworksV1_2_mobilenets",
"CompilationCachingTests.cpp",
":VtsHalNeuralNetworksV1_2_mobilenets", // CompilationCachingTests depend on MobileNets.
"ValidateBurst.cpp",
],
}

View File

@@ -35,22 +35,14 @@
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2 {
// Forward declaration of the mobilenet generated test models in
// frameworks/ml/nn/runtime/test/generated/.
namespace generated_tests::mobilenet_224_gender_basic_fixed {
Model createTestModel();
const ::test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_224_gender_basic_fixed
} // namespace android::hardware::neuralnetworks::V1_2
namespace generated_tests::mobilenet_224_gender_basic_fixed {
std::vector<test_helper::MixedTypedExample>& get_examples();
} // namespace generated_tests::mobilenet_224_gender_basic_fixed
namespace android::hardware::neuralnetworks::V1_2::generated_tests::mobilenet_quantized {
Model createTestModel();
} // namespace android::hardware::neuralnetworks::V1_2::generated_tests::mobilenet_quantized
namespace generated_tests::mobilenet_quantized {
std::vector<test_helper::MixedTypedExample>& get_examples();
const ::test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_quantized
namespace android {
@@ -60,49 +52,23 @@ namespace V1_2 {
namespace vts {
namespace functional {
using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
namespace float32_model {
constexpr auto createTestModel = ::android::hardware::neuralnetworks::V1_2::generated_tests::
mobilenet_224_gender_basic_fixed::createTestModel;
constexpr auto get_examples = ::generated_tests::mobilenet_224_gender_basic_fixed::get_examples;
// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
// This function assumes the operation is always ADD.
std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
float outputValue = 1.0f + static_cast<float>(len);
return {{.operands = {
// Input
{.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
// Output
{.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
}
constexpr auto get_test_model = ::generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
} // namespace float32_model
namespace quant8_model {
constexpr auto createTestModel = ::android::hardware::neuralnetworks::V1_2::generated_tests::
mobilenet_quantized::createTestModel;
constexpr auto get_examples = ::generated_tests::mobilenet_quantized::get_examples;
// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
// This function assumes the operation is always ADD.
std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
uint8_t outputValue = 1 + static_cast<uint8_t>(len);
return {{.operands = {// Input
{.operandDimensions = {{0, {1}}}, .quant8AsymmOperands = {{0, {1}}}},
// Output
{.operandDimensions = {{0, {1}}},
.quant8AsymmOperands = {{0, {outputValue}}}}}}};
}
constexpr auto get_test_model = ::generated_tests::mobilenet_quantized::get_test_model;
} // namespace quant8_model
@@ -155,39 +121,34 @@ void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups,
// [1] [1] [1] [1]
//
// This function assumes the operation is either ADD or MUL.
template <typename CppType, OperandType operandType>
Model createLargeTestModelImpl(OperationType op, uint32_t len) {
EXPECT_TRUE(op == OperationType::ADD || op == OperationType::MUL);
template <typename CppType, TestOperandType operandType>
TestModel createLargeTestModelImpl(TestOperationType op, uint32_t len) {
EXPECT_TRUE(op == TestOperationType::ADD || op == TestOperationType::MUL);
// Model operations and operands.
std::vector<Operation> operations(len);
std::vector<Operand> operands(len * 2 + 2);
// The constant buffer pool. This contains the activation scalar, followed by the
// per-operation constant operands.
std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(CppType));
std::vector<TestOperation> operations(len);
std::vector<TestOperand> operands(len * 2 + 2);
// The activation scalar, value = 0.
operands[0] = {
.type = OperandType::INT32,
.type = TestOperandType::INT32,
.dimensions = {},
.numberOfConsumers = len,
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::CONSTANT_COPY,
.location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
.lifetime = TestOperandLifeTime::CONSTANT_COPY,
.data = TestBuffer::createFromVector<int32_t>({0}),
};
memset(operandValues.data(), 0, sizeof(int32_t));
// The buffer value of the constant second operand. The logical value is always 1.0f.
CppType bufferValue;
// The scale of the first and second operand.
float scale1, scale2;
if (operandType == OperandType::TENSOR_FLOAT32) {
if (operandType == TestOperandType::TENSOR_FLOAT32) {
bufferValue = 1.0f;
scale1 = 0.0f;
scale2 = 0.0f;
} else if (op == OperationType::ADD) {
} else if (op == TestOperationType::ADD) {
bufferValue = 1;
scale1 = 1.0f;
scale2 = 1.0f;
@@ -211,9 +172,9 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) {
.numberOfConsumers = 1,
.scale = scale1,
.zeroPoint = 0,
.lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT
: OperandLifeTime::TEMPORARY_VARIABLE),
.location = {},
.lifetime = (i == 0 ? TestOperandLifeTime::MODEL_INPUT
: TestOperandLifeTime::TEMPORARY_VARIABLE),
.data = (i == 0 ? TestBuffer::createFromVector<CppType>({1}) : TestBuffer()),
};
// The second operation input, value = 1.
@@ -223,13 +184,9 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) {
.numberOfConsumers = 1,
.scale = scale2,
.zeroPoint = 0,
.lifetime = OperandLifeTime::CONSTANT_COPY,
.location = {.poolIndex = 0,
.offset = static_cast<uint32_t>(i * sizeof(CppType) + sizeof(int32_t)),
.length = sizeof(CppType)},
.lifetime = TestOperandLifeTime::CONSTANT_COPY,
.data = TestBuffer::createFromVector<CppType>({bufferValue}),
};
memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(CppType), &bufferValue,
sizeof(CppType));
// The operation. All operations share the same activation scalar.
// The output operand is created as an input in the next iteration of the loop, in the case
@@ -242,6 +199,10 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) {
};
}
// For TestOperationType::ADD, output = 1 + 1 * len = len + 1
// For TestOperationType::MUL, output = 1 * 1 ^ len = 1
CppType outputResult = static_cast<CppType>(op == TestOperationType::ADD ? len + 1u : 1u);
// The model output.
operands.back() = {
.type = operandType,
@@ -249,21 +210,16 @@ Model createLargeTestModelImpl(OperationType op, uint32_t len) {
.numberOfConsumers = 0,
.scale = scale1,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_OUTPUT,
.location = {},
.lifetime = TestOperandLifeTime::MODEL_OUTPUT,
.data = TestBuffer::createFromVector<CppType>({outputResult}),
};
const std::vector<uint32_t> inputIndexes = {1};
const std::vector<uint32_t> outputIndexes = {len * 2 + 1};
const std::vector<hidl_memory> pools = {};
return {
.operands = operands,
.operations = operations,
.inputIndexes = inputIndexes,
.outputIndexes = outputIndexes,
.operandValues = operandValues,
.pools = pools,
.operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = {1},
.outputIndexes = {len * 2 + 1},
.isRelaxed = false,
};
}
@@ -332,35 +288,21 @@ class CompilationCachingTestBase : public NeuralnetworksHidlTest {
// Model and examples creators. According to kOperandType, the following methods will return
// either float32 model/examples or the quant8 variant.
Model createTestModel() {
TestModel createTestModel() {
if (kOperandType == OperandType::TENSOR_FLOAT32) {
return float32_model::createTestModel();
return float32_model::get_test_model();
} else {
return quant8_model::createTestModel();
return quant8_model::get_test_model();
}
}
std::vector<MixedTypedExample> get_examples() {
TestModel createLargeTestModel(OperationType op, uint32_t len) {
if (kOperandType == OperandType::TENSOR_FLOAT32) {
return float32_model::get_examples();
return createLargeTestModelImpl<float, TestOperandType::TENSOR_FLOAT32>(
static_cast<TestOperationType>(op), len);
} else {
return quant8_model::get_examples();
}
}
Model createLargeTestModel(OperationType op, uint32_t len) {
if (kOperandType == OperandType::TENSOR_FLOAT32) {
return createLargeTestModelImpl<float, OperandType::TENSOR_FLOAT32>(op, len);
} else {
return createLargeTestModelImpl<uint8_t, OperandType::TENSOR_QUANT8_ASYMM>(op, len);
}
}
std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
if (kOperandType == OperandType::TENSOR_FLOAT32) {
return float32_model::getLargeModelExamples(len);
} else {
return quant8_model::getLargeModelExamples(len);
return createLargeTestModelImpl<uint8_t, TestOperandType::TENSOR_QUANT8_ASYMM>(
static_cast<TestOperationType>(op), len);
}
}
@@ -482,8 +424,9 @@ class CompilationCachingTest : public CompilationCachingTestBase,
TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
sp<IPreparedModel> preparedModel = nullptr;
// Save the compilation to cache.
@@ -491,7 +434,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModel, modelCache, dataCache);
saveModelToCache(model, modelCache, dataCache);
}
// Retrieve preparedModel from cache.
@@ -516,15 +459,15 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
}
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
}
TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
sp<IPreparedModel> preparedModel = nullptr;
// Save the compilation to cache.
@@ -545,7 +488,7 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
sizeof(dummyBytes));
}
saveModelToCache(testModel, modelCache, dataCache);
saveModelToCache(model, modelCache, dataCache);
}
// Retrieve preparedModel from cache.
@@ -579,15 +522,15 @@ TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
}
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
}
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
// Test with number of model cache files greater than mNumModelCache.
{
@@ -598,12 +541,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache.pop_back();
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -625,12 +566,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache.push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -651,12 +590,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache.pop_back();
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -678,12 +615,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache.push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -698,15 +633,16 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
// Save the compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModel, modelCache, dataCache);
saveModelToCache(model, modelCache, dataCache);
}
// Test with number of model cache files greater than mNumModelCache.
@@ -778,8 +714,9 @@ TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
// Go through each handle in model cache, test with NumFd greater than 1.
for (uint32_t i = 0; i < mNumModelCache; i++) {
@@ -790,12 +727,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache[i].pop_back();
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -817,12 +752,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mModelCache[i].push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -843,12 +776,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache[i].pop_back();
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -870,12 +801,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
mDataCache[i].push_back(tmp);
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -890,15 +819,16 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
// Save the compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModel, modelCache, dataCache);
saveModelToCache(model, modelCache, dataCache);
}
// Go through each handle in model cache, test with NumFd greater than 1.
@@ -970,8 +900,9 @@ TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
@@ -983,12 +914,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
createCacheHandles(mDataCache, dataCacheMode, &dataCache);
modelCacheMode[i] = AccessMode::READ_WRITE;
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -1008,12 +937,10 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
createCacheHandles(mDataCache, dataCacheMode, &dataCache);
dataCacheMode[i] = AccessMode::READ_WRITE;
sp<IPreparedModel> preparedModel = nullptr;
saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
get_examples(),
testModel.relaxComputationFloat32toFloat16,
generated_tests::EvaluatePreparedModel(preparedModel, testModel,
/*testDynamicOutputShape=*/false);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
@@ -1028,8 +955,9 @@ TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
@@ -1038,7 +966,7 @@ TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModel, modelCache, dataCache);
saveModelToCache(model, modelCache, dataCache);
}
// Go through each handle in model cache, test with invalid access mode.
@@ -1106,12 +1034,14 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
if (checkEarlyTermination(testModelMul)) return;
const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
if (checkEarlyTermination(testModelAdd)) return;
const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
const Model modelMul = generated_tests::createModel(testModelMul);
if (checkEarlyTermination(modelMul)) return;
const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
const Model modelAdd = generated_tests::createModel(testModelAdd);
if (checkEarlyTermination(modelAdd)) return;
// Save the testModelMul compilation to cache.
// Save the modelMul compilation to cache.
auto modelCacheMul = mModelCache;
for (auto& cache : modelCacheMul) {
cache[0].append("_mul");
@@ -1120,15 +1050,15 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModelMul, modelCache, dataCache);
saveModelToCache(modelMul, modelCache, dataCache);
}
// Use a different token for testModelAdd.
// Use a different token for modelAdd.
mToken[0]++;
// This test is probabilistic, so we run it multiple times.
for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
// Save the testModelAdd compilation to cache.
// Save the modelAdd compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
@@ -1136,7 +1066,7 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
// Spawn a thread to copy the cache content concurrently while saving to cache.
std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
saveModelToCache(testModelAdd, modelCache, dataCache);
saveModelToCache(modelAdd, modelCache, dataCache);
thread.join();
}
@@ -1155,11 +1085,8 @@ TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
generated_tests::EvaluatePreparedModel(
preparedModel, [](int) { return false; },
getLargeModelExamples(kLargeModelSize),
testModelAdd.relaxComputationFloat32toFloat16,
/*testDynamicOutputShape=*/false);
generated_tests::EvaluatePreparedModel(preparedModel, testModelAdd,
/*testDynamicOutputShape=*/false);
}
}
}
@@ -1169,12 +1096,14 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
if (checkEarlyTermination(testModelMul)) return;
const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
if (checkEarlyTermination(testModelAdd)) return;
const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
const Model modelMul = generated_tests::createModel(testModelMul);
if (checkEarlyTermination(modelMul)) return;
const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
const Model modelAdd = generated_tests::createModel(testModelAdd);
if (checkEarlyTermination(modelAdd)) return;
// Save the testModelMul compilation to cache.
// Save the modelMul compilation to cache.
auto modelCacheMul = mModelCache;
for (auto& cache : modelCacheMul) {
cache[0].append("_mul");
@@ -1183,20 +1112,20 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModelMul, modelCache, dataCache);
saveModelToCache(modelMul, modelCache, dataCache);
}
// Use a different token for testModelAdd.
// Use a different token for modelAdd.
mToken[0]++;
// This test is probabilistic, so we run it multiple times.
for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
// Save the testModelAdd compilation to cache.
// Save the modelAdd compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModelAdd, modelCache, dataCache);
saveModelToCache(modelAdd, modelCache, dataCache);
}
// Retrieve preparedModel from cache.
@@ -1218,11 +1147,8 @@ TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
generated_tests::EvaluatePreparedModel(
preparedModel, [](int) { return false; },
getLargeModelExamples(kLargeModelSize),
testModelAdd.relaxComputationFloat32toFloat16,
/*testDynamicOutputShape=*/false);
generated_tests::EvaluatePreparedModel(preparedModel, testModelAdd,
/*testDynamicOutputShape=*/false);
}
}
}
@@ -1232,12 +1158,14 @@ TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
if (checkEarlyTermination(testModelMul)) return;
const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
if (checkEarlyTermination(testModelAdd)) return;
const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
const Model modelMul = generated_tests::createModel(testModelMul);
if (checkEarlyTermination(modelMul)) return;
const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
const Model modelAdd = generated_tests::createModel(testModelAdd);
if (checkEarlyTermination(modelAdd)) return;
// Save the testModelMul compilation to cache.
// Save the modelMul compilation to cache.
auto modelCacheMul = mModelCache;
for (auto& cache : modelCacheMul) {
cache[0].append("_mul");
@@ -1246,21 +1174,21 @@ TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModelMul, modelCache, dataCache);
saveModelToCache(modelMul, modelCache, dataCache);
}
// Use a different token for testModelAdd.
// Use a different token for modelAdd.
mToken[0]++;
// Save the testModelAdd compilation to cache.
// Save the modelAdd compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModelAdd, modelCache, dataCache);
saveModelToCache(modelAdd, modelCache, dataCache);
}
// Replace the model cache of testModelAdd with testModelMul.
// Replace the model cache of modelAdd with modelMul.
copyCacheFiles(modelCacheMul, mModelCache);
// Retrieve the preparedModel from cache, expect failure.
@@ -1336,15 +1264,16 @@ class CompilationCachingSecurityTest
// The modifier accepts one pointer argument "skip" as the returning value, indicating
// whether the test should be skipped or not.
void testCorruptedCache(ExpectedResult expected, std::function<void(bool*)> modifier) {
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
const TestModel& testModel = createTestModel();
const Model model = generated_tests::createModel(testModel);
if (checkEarlyTermination(model)) return;
// Save the compilation to cache.
{
hidl_vec<hidl_handle> modelCache, dataCache;
createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
saveModelToCache(testModel, modelCache, dataCache);
saveModelToCache(model, modelCache, dataCache);
}
bool skip = false;

View File

@@ -31,7 +31,10 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <numeric>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
@@ -46,7 +49,10 @@ namespace neuralnetworks {
namespace V1_2 {
namespace generated_tests {
using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::DataLocation;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
@@ -60,29 +66,122 @@ using ::android::hardware::neuralnetworks::V1_2::Timing;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::filter;
using ::test_helper::for_all;
using ::test_helper::for_each;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
static bool isZeroSized(const MixedTyped& example, uint32_t index) {
for (auto i : example.operandDimensions.at(index)) {
if (i == 0) return true;
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
size_t constCopySize = 0, constRefSize = 0;
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
DataLocation loc = {};
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
loc = {.poolIndex = 0,
.offset = static_cast<uint32_t>(constCopySize),
.length = static_cast<uint32_t>(op.data.size())};
constCopySize += op.data.alignedSize();
} else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
loc = {.poolIndex = 0,
.offset = static_cast<uint32_t>(constRefSize),
.length = static_cast<uint32_t>(op.data.size())};
constRefSize += op.data.alignedSize();
}
Operand::ExtraParams extraParams;
if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
extraParams.channelQuant(SymmPerChannelQuantParams{
.scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
}
operands[i] = {.type = static_cast<OperandType>(op.type),
.dimensions = op.dimensions,
.numberOfConsumers = op.numberOfConsumers,
.scale = op.scale,
.zeroPoint = op.zeroPoint,
.lifetime = static_cast<OperandLifeTime>(op.lifetime),
.location = loc,
.extraParams = std::move(extraParams)};
}
return false;
// Model operations.
hidl_vec<Operation> operations(testModel.operations.size());
std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
[](const TestOperation& op) -> Operation {
return {.type = static_cast<OperationType>(op.type),
.inputs = op.inputs,
.outputs = op.outputs};
});
// Constant copies.
hidl_vec<uint8_t> operandValues(constCopySize);
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, operandValues.data() + operands[i].location.offset);
}
}
// Shared memory.
hidl_vec<hidl_memory> pools = {};
if (constRefSize > 0) {
hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
CHECK_NE(pools[0].size(), 0u);
// load data
sp<IMemory> mappedMemory = mapMemory(pools[0]);
CHECK(mappedMemory.get() != nullptr);
uint8_t* mappedPtr =
reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
CHECK(mappedPtr != nullptr);
for (uint32_t i = 0; i < testModel.operands.size(); i++) {
const auto& op = testModel.operands[i];
if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
const uint8_t* begin = op.data.get<uint8_t>();
const uint8_t* end = begin + op.data.size();
std::copy(begin, end, mappedPtr + operands[i].location.offset);
}
}
}
return {.operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = testModel.inputIndexes,
.outputIndexes = testModel.outputIndexes,
.operandValues = std::move(operandValues),
.pools = std::move(pools),
.relaxComputationFloat32toFloat16 = testModel.isRelaxed};
}
static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
return byteSize > 1u;
}
static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
auto& length = request->outputs[outputIndex].location.length;
ASSERT_GT(length, 1u);
length -= 1u;
}
static void makeOutputDimensionsUnspecified(Model* model) {
for (auto i : model->outputIndexes) {
auto& dims = model->operands[i].dimensions;
std::fill(dims.begin(), dims.end(), 0);
}
}
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
return preparedModel->execute_1_2(request, measure, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
hidl_vec<OutputShape>* outputShapes,
Timing* timing) {
@@ -105,294 +204,168 @@ static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
}
enum class Executor { ASYNC, SYNC, BURST };
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
const float kDefaultAtol = 1e-5f;
const float kDefaultRtol = 1e-5f;
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
Executor executor, MeasureTiming measure, OutputType outputType) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
int example_no = 1;
for (auto& example : examples) {
SCOPED_TRACE(example_no++);
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
Request request = createRequest(testModel);
if (outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
const bool hasFloat16Inputs = !inputs.float16Operands.empty();
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
// TODO: Adjust the error limit based on testing.
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
fpAtol = 5.0f * 0.0009765625f;
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
fpRtol = 5.0f * 0.0009765625f;
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
switch (executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executionLaunchStatus =
ExecutePreparedModel(preparedModel, request, measure, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
executionStatus = executionCallback->getStatus();
outputShapes = executionCallback->getOutputShapes();
timing = executionCallback->getTiming();
break;
}
case Executor::SYNC: {
SCOPED_TRACE("synchronous");
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
// execute
Return<ErrorStatus> executionReturnStatus =
ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
break;
}
case Executor::BURST: {
SCOPED_TRACE("burst");
// create burst
const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
CreateBurst(preparedModel);
ASSERT_NE(nullptr, controller.get());
// create memory keys
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
}
MixedTyped test; // holding test results
// execute burst
std::tie(executionStatus, outputShapes, timing) =
controller->compute(request, measure, keys);
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
bool sizeLargerThanOne = true;
for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
if (index == 0) {
// On OutputType::INSUFFICIENT, set the output operand with index 0 with
// buffer size one byte less than needed.
if (outputType == OutputType::INSUFFICIENT) {
if (s > 1 && !isZeroSized(golden, index)) {
s -= 1;
} else {
sizeLargerThanOne = false;
}
}
}
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// If output0 does not have size larger than one byte,
// we can not provide an insufficient buffer
if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
ASSERT_NE(0ull, pools[INPUT].size());
ASSERT_NE(0ull, pools[OUTPUT].size());
// load data
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
ASSERT_NE(nullptr, inputMemory.get());
ASSERT_NE(nullptr, outputMemory.get());
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
inputMemory->update();
outputMemory->update();
// Go through all inputs, copy the values
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
outputMemory->commit();
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
switch (executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
ExecutePreparedModel(preparedModel, request, measure, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
executionStatus = executionCallback->getStatus();
outputShapes = executionCallback->getOutputShapes();
timing = executionCallback->getTiming();
break;
}
case Executor::SYNC: {
SCOPED_TRACE("synchronous");
// execute
Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
preparedModel, request, measure, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
break;
}
case Executor::BURST: {
SCOPED_TRACE("burst");
// create burst
const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
CreateBurst(preparedModel);
ASSERT_NE(nullptr, controller.get());
// create memory keys
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
// execute burst
std::tie(executionStatus, outputShapes, timing) =
controller->compute(request, measure, keys);
break;
}
}
if (outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"execute model that it does not support."
<< std::endl;
GTEST_SKIP();
}
if (measure == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
}
}
switch (outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_TRUE(outputShapes.size() == 0 ||
outputShapes.size() == test.operandDimensions.size());
break;
case OutputType::UNSPECIFIED:
// If the model output operands are not fully specified, outputShapes must have
// the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
break;
case OutputType::INSUFFICIENT:
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
ASSERT_FALSE(outputShapes[0].isSufficient);
return;
}
// Go through all outputs, overwrite output dimensions with returned output shapes
if (outputShapes.size() > 0) {
for_each<uint32_t>(test.operandDimensions,
[&outputShapes](int idx, std::vector<uint32_t>& dim) {
dim = outputShapes[idx].dimensions;
});
}
// validate results
outputMemory->read();
copy_back(&test, outputs_info, outputPtr);
outputMemory->commit();
// Filter out don't cares
MixedTyped filtered_golden = filter(golden, is_ignored);
MixedTyped filtered_test = filter(test, is_ignored);
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
if (example.expectedMultinomialDistributionTolerance > 0) {
expectMultinomialDistributionWithinTolerance(test, example);
break;
}
}
}
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
OutputType outputType) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
kDefaultRtol, executor, measure, outputType);
if (outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"execute model that it does not support."
<< std::endl;
GTEST_SKIP();
}
if (measure == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
}
}
switch (outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_TRUE(outputShapes.size() == 0 ||
outputShapes.size() == testModel.outputIndexes.size());
break;
case OutputType::UNSPECIFIED:
// If the model output operands are not fully specified, outputShapes must have
// the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
break;
case OutputType::INSUFFICIENT:
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
ASSERT_FALSE(outputShapes[0].isSufficient);
return;
}
// Go through all outputs, check returned output shapes.
for (uint32_t i = 0; i < outputShapes.size(); i++) {
EXPECT_TRUE(outputShapes[i].isSufficient);
const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
const std::vector<uint32_t> actual = outputShapes[i].dimensions;
EXPECT_EQ(expect, actual);
}
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
}
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
bool testDynamicOutputShape) {
if (testDynamicOutputShape) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::INSUFFICIENT);
} else {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
OutputType::FULLY_SPECIFIED);
}
}
@@ -411,7 +384,6 @@ void PrepareModel(const sp<IDevice>& device, const Model& model,
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
@@ -438,17 +410,18 @@ void PrepareModel(const sp<IDevice>& device, const Model& model,
ASSERT_NE(nullptr, preparedModel->get());
}
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
bool testDynamicOutputShape) {
Model model = create_model();
void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
Model model = createModel(testModel);
if (testDynamicOutputShape) {
makeOutputDimensionsUnspecified(&model);
}
sp<IPreparedModel> preparedModel = nullptr;
PrepareModel(device, model, &preparedModel);
if (preparedModel == nullptr) {
GTEST_SKIP();
}
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
}
} // namespace generated_tests

View File

@@ -30,18 +30,15 @@ namespace neuralnetworks {
namespace V1_2 {
namespace generated_tests {
using ::test_helper::MixedTypedExample;
Model createModel(const ::test_helper::TestModel& testModel);
void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
const ::test_helper::TestModel& testModel, bool testDynamicOutputShape);
void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
void Execute(const sp<V1_2::IDevice>& device, const ::test_helper::TestModel& testModel,
bool testDynamicOutputShape = false);
} // namespace generated_tests

View File

@@ -14,21 +14,11 @@
* limitations under the License.
*/
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
std::vector<Request> createRequests(const std::vector<::test_helper::MixedTypedExample>& examples);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
namespace android::hardware::neuralnetworks::V1_2::generated_tests {
using namespace ::android::hardware::neuralnetworks::V1_2::vts::functional;

View File

@@ -238,7 +238,7 @@ static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver*
///////////////////////// BURST VALIATION TESTS ////////////////////////////////////
static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
const Request& request) {
// create burst
std::unique_ptr<RequestChannelSender> sender;
std::unique_ptr<ResultChannelReceiver> receiver;
@@ -249,35 +249,32 @@ static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
ASSERT_NE(nullptr, receiver.get());
ASSERT_NE(nullptr, context.get());
// validate each request
for (const Request& request : requests) {
// load memory into callback slots
std::vector<intptr_t> keys;
keys.reserve(request.pools.size());
std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
[](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
// load memory into callback slots
std::vector<intptr_t> keys;
keys.reserve(request.pools.size());
std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
[](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
// ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
// subsequent slot validation testing)
ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
return slot != std::numeric_limits<int32_t>::max();
}));
// ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
// subsequent slot validation testing)
ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
return slot != std::numeric_limits<int32_t>::max();
}));
// serialize the request
const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
// serialize the request
const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
// validations
removeDatumTest(sender.get(), receiver.get(), serialized);
addDatumTest(sender.get(), receiver.get(), serialized);
mutateDatumTest(sender.get(), receiver.get(), serialized);
}
// validations
removeDatumTest(sender.get(), receiver.get(), serialized);
addDatumTest(sender.get(), receiver.get(), serialized);
mutateDatumTest(sender.get(), receiver.get(), serialized);
}
// This test validates that when the Result message size exceeds length of the
// result FMQ, the service instance gracefully fails and returns an error.
static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
const Request& request) {
// create regular burst
std::shared_ptr<ExecutionBurstController> controllerRegular;
ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
@@ -290,43 +287,40 @@ static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
ASSERT_NE(nullptr, controllerSmall.get());
// validate each request
for (const Request& request : requests) {
// load memory into callback slots
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
// collect serialized result by running regular burst
const auto [statusRegular, outputShapesRegular, timingRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
// skip test if regular burst output isn't useful for testing a failure
// caused by having too small of a length for the result FMQ
const std::vector<FmqResultDatum> serialized =
::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
if (statusRegular != ErrorStatus::NONE ||
serialized.size() <= kExecutionBurstChannelSmallLength) {
continue;
}
// by this point, execution should fail because the result channel isn't
// large enough to return the serialized result
const auto [statusSmall, outputShapesSmall, timingSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
EXPECT_NE(ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));
// load memory into callback slots
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
// collect serialized result by running regular burst
const auto [statusRegular, outputShapesRegular, timingRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
// skip test if regular burst output isn't useful for testing a failure
// caused by having too small of a length for the result FMQ
const std::vector<FmqResultDatum> serialized =
::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
if (statusRegular != ErrorStatus::NONE ||
serialized.size() <= kExecutionBurstChannelSmallLength) {
return;
}
// by this point, execution should fail because the result channel isn't
// large enough to return the serialized result
const auto [statusSmall, outputShapesSmall, timingSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
EXPECT_NE(ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));
}
///////////////////////////// ENTRY POINT //////////////////////////////////
void ValidationTest::validateBurst(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, requests));
ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, requests));
const Request& request) {
ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, request));
ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request));
}
} // namespace functional

View File

@@ -16,14 +16,9 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
@@ -35,12 +30,7 @@ namespace V1_2 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
using test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -161,119 +151,23 @@ static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Requ
///////////////////////////// ENTRY POINT //////////////////////////////////
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
std::vector<Request> requests;
for (auto& example : examples) {
const MixedTyped& inputs = example.operands.first;
const MixedTyped& outputs = example.operands.second;
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
// Go through all outputs, initialize RequestArgument descriptors
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
return {};
}
// map pool
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
if (inputMemory == nullptr) {
return {};
}
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
if (inputPtr == nullptr) {
return {};
}
// initialize pool
inputMemory->update();
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
}
return requests;
}
void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
// validate each request
for (const Request& request : requests) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
const Request& request) {
removeInputTest(preparedModel, request);
removeOutputTest(preparedModel, request);
}
void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
for (const Request& request : requests) {
SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
Return<void> executeStatus = preparedModel->executeSynchronously(
request, MeasureTiming::NO,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
ASSERT_NE(ErrorStatus::NONE, error);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
});
ASSERT_TRUE(executeStatus.isOk());
}
const Request& request) {
SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
Return<void> executeStatus = preparedModel->executeSynchronously(
request, MeasureTiming::NO,
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
ASSERT_NE(ErrorStatus::NONE, error);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
});
ASSERT_TRUE(executeStatus.isOk());
}
} // namespace functional

View File

@@ -126,7 +126,7 @@ void NeuralnetworksHidlTest::TearDown() {
::testing::VtsHalHidlTargetTestBase::TearDown();
}
void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
void ValidationTest::validateEverything(const Model& model, const Request& request) {
validateModel(model);
// create IPreparedModel
@@ -136,11 +136,11 @@ void ValidationTest::validateEverything(const Model& model, const std::vector<Re
return;
}
validateRequests(preparedModel, requests);
validateBurst(preparedModel, requests);
validateRequest(preparedModel, request);
validateBurst(preparedModel, request);
}
void ValidationTest::validateFailure(const Model& model, const std::vector<Request>& requests) {
void ValidationTest::validateFailure(const Model& model, const Request& request) {
// TODO: Should this always succeed?
// What if the invalid input is part of the model (i.e., a parameter).
validateModel(model);
@@ -151,7 +151,7 @@ void ValidationTest::validateFailure(const Model& model, const std::vector<Reque
return;
}
validateRequestFailure(preparedModel, requests);
validateRequestFailure(preparedModel, request);
}
sp<IPreparedModel> getPreparedModel_1_2(

View File

@@ -68,20 +68,16 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
sp<IDevice> device;
};
// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
protected:
void validateEverything(const Model& model, const std::vector<Request>& requests);
void validateFailure(const Model& model, const std::vector<Request>& requests);
void validateEverything(const Model& model, const Request& request);
void validateFailure(const Model& model, const Request& request);
private:
void validateModel(const Model& model);
void validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateBurst(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& request);
};
// Tag for the generated tests