mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-02 13:49:45 +00:00
Add VTS tests for reusable execution. am: 72e06c2843
Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1954276 Change-Id: I6c99102846cb87257ce45d59552102ab2ee4f837
This commit is contained in:
@@ -58,25 +58,52 @@ struct TestConfig {
|
|||||||
bool measureTiming;
|
bool measureTiming;
|
||||||
OutputType outputType;
|
OutputType outputType;
|
||||||
MemoryType memoryType;
|
MemoryType memoryType;
|
||||||
|
bool reusable;
|
||||||
// `reportSkipping` indicates if a test should print an info message in case
|
// `reportSkipping` indicates if a test should print an info message in case
|
||||||
// it is skipped. The field is set to true by default and is set to false in
|
// it is skipped. The field is set to true by default and is set to false in
|
||||||
// quantization coupling tests to suppress skipping a test
|
// quantization coupling tests to suppress skipping a test
|
||||||
bool reportSkipping;
|
bool reportSkipping;
|
||||||
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType)
|
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
|
||||||
|
bool reusable)
|
||||||
: executor(executor),
|
: executor(executor),
|
||||||
measureTiming(measureTiming),
|
measureTiming(measureTiming),
|
||||||
outputType(outputType),
|
outputType(outputType),
|
||||||
memoryType(memoryType),
|
memoryType(memoryType),
|
||||||
|
reusable(reusable),
|
||||||
reportSkipping(true) {}
|
reportSkipping(true) {}
|
||||||
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
|
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
|
||||||
bool reportSkipping)
|
bool reusable, bool reportSkipping)
|
||||||
: executor(executor),
|
: executor(executor),
|
||||||
measureTiming(measureTiming),
|
measureTiming(measureTiming),
|
||||||
outputType(outputType),
|
outputType(outputType),
|
||||||
memoryType(memoryType),
|
memoryType(memoryType),
|
||||||
|
reusable(reusable),
|
||||||
reportSkipping(reportSkipping) {}
|
reportSkipping(reportSkipping) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::string toString(OutputType type) {
|
||||||
|
switch (type) {
|
||||||
|
case OutputType::FULLY_SPECIFIED:
|
||||||
|
return "FULLY_SPECIFIED";
|
||||||
|
case OutputType::UNSPECIFIED:
|
||||||
|
return "UNSPECIFIED";
|
||||||
|
case OutputType::INSUFFICIENT:
|
||||||
|
return "INSUFFICIENT";
|
||||||
|
case OutputType::MISSED_DEADLINE:
|
||||||
|
return "MISSED_DEADLINE";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string toString(const TestConfig& config) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "TestConfig{.executor=" << toString(config.executor)
|
||||||
|
<< ", .measureTiming=" << (config.measureTiming ? "true" : "false")
|
||||||
|
<< ", .outputType=" << toString(config.outputType)
|
||||||
|
<< ", .memoryType=" << toString(config.memoryType)
|
||||||
|
<< ", .reusable=" << (config.reusable ? "true" : "false") << "}";
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
enum class IOType { INPUT, OUTPUT };
|
enum class IOType { INPUT, OUTPUT };
|
||||||
|
|
||||||
class DeviceMemoryAllocator {
|
class DeviceMemoryAllocator {
|
||||||
@@ -558,209 +585,241 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
|||||||
loopTimeoutDurationNs = 1 * kMillisecond;
|
loopTimeoutDurationNs = 1 * kMillisecond;
|
||||||
}
|
}
|
||||||
|
|
||||||
ErrorStatus executionStatus;
|
std::shared_ptr<IExecution> execution;
|
||||||
std::vector<OutputShape> outputShapes;
|
if (testConfig.reusable) {
|
||||||
Timing timing = kNoTiming;
|
const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming,
|
||||||
switch (testConfig.executor) {
|
loopTimeoutDurationNs, &execution);
|
||||||
case Executor::SYNC: {
|
ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
|
||||||
SCOPED_TRACE("synchronous");
|
ASSERT_NE(nullptr, execution.get());
|
||||||
|
}
|
||||||
|
|
||||||
ExecutionResult executionResult;
|
const auto executeAndCheckResults = [&preparedModel, &execution, &testConfig, &testModel,
|
||||||
// execute
|
&context, &request, loopTimeoutDurationNs, skipped]() {
|
||||||
const auto ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
|
ErrorStatus executionStatus;
|
||||||
kNoDeadline, loopTimeoutDurationNs,
|
std::vector<OutputShape> outputShapes;
|
||||||
&executionResult);
|
Timing timing = kNoTiming;
|
||||||
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
switch (testConfig.executor) {
|
||||||
<< ret.getDescription();
|
case Executor::SYNC: {
|
||||||
if (ret.isOk()) {
|
SCOPED_TRACE("synchronous");
|
||||||
executionStatus = executionResult.outputSufficientSize
|
|
||||||
? ErrorStatus::NONE
|
|
||||||
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
|
||||||
outputShapes = std::move(executionResult.outputShapes);
|
|
||||||
timing = executionResult.timing;
|
|
||||||
} else {
|
|
||||||
executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Executor::BURST: {
|
|
||||||
SCOPED_TRACE("burst");
|
|
||||||
|
|
||||||
// create burst
|
ExecutionResult executionResult;
|
||||||
std::shared_ptr<IBurst> burst;
|
// execute
|
||||||
auto ret = preparedModel->configureExecutionBurst(&burst);
|
::ndk::ScopedAStatus ret;
|
||||||
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
|
if (testConfig.reusable) {
|
||||||
ASSERT_NE(nullptr, burst.get());
|
ret = execution->executeSynchronously(kNoDeadline, &executionResult);
|
||||||
|
|
||||||
// associate a unique slot with each memory pool
|
|
||||||
int64_t currentSlot = 0;
|
|
||||||
std::vector<int64_t> slots;
|
|
||||||
slots.reserve(request.pools.size());
|
|
||||||
for (const auto& pool : request.pools) {
|
|
||||||
if (pool.getTag() == RequestMemoryPool::Tag::pool) {
|
|
||||||
slots.push_back(currentSlot++);
|
|
||||||
} else {
|
} else {
|
||||||
EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
|
ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
|
||||||
slots.push_back(-1);
|
kNoDeadline, loopTimeoutDurationNs,
|
||||||
|
&executionResult);
|
||||||
}
|
}
|
||||||
|
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
||||||
|
<< ret.getDescription();
|
||||||
|
if (ret.isOk()) {
|
||||||
|
executionStatus = executionResult.outputSufficientSize
|
||||||
|
? ErrorStatus::NONE
|
||||||
|
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
||||||
|
outputShapes = std::move(executionResult.outputShapes);
|
||||||
|
timing = executionResult.timing;
|
||||||
|
} else {
|
||||||
|
executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
case Executor::BURST: {
|
||||||
|
SCOPED_TRACE("burst");
|
||||||
|
|
||||||
ExecutionResult executionResult;
|
// create burst
|
||||||
// execute
|
std::shared_ptr<IBurst> burst;
|
||||||
ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, kNoDeadline,
|
auto ret = preparedModel->configureExecutionBurst(&burst);
|
||||||
loopTimeoutDurationNs, &executionResult);
|
|
||||||
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
|
||||||
<< ret.getDescription();
|
|
||||||
if (ret.isOk()) {
|
|
||||||
executionStatus = executionResult.outputSufficientSize
|
|
||||||
? ErrorStatus::NONE
|
|
||||||
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
|
||||||
outputShapes = std::move(executionResult.outputShapes);
|
|
||||||
timing = executionResult.timing;
|
|
||||||
} else {
|
|
||||||
executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark each slot as unused after the execution. This is unnecessary because the burst
|
|
||||||
// is freed after this scope ends, but this is here to test the functionality.
|
|
||||||
for (int64_t slot : slots) {
|
|
||||||
ret = burst->releaseMemoryResource(slot);
|
|
||||||
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
|
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
|
||||||
}
|
ASSERT_NE(nullptr, burst.get());
|
||||||
|
|
||||||
break;
|
// associate a unique slot with each memory pool
|
||||||
}
|
int64_t currentSlot = 0;
|
||||||
case Executor::FENCED: {
|
std::vector<int64_t> slots;
|
||||||
SCOPED_TRACE("fenced");
|
slots.reserve(request.pools.size());
|
||||||
ErrorStatus result = ErrorStatus::NONE;
|
for (const auto& pool : request.pools) {
|
||||||
FencedExecutionResult executionResult;
|
if (pool.getTag() == RequestMemoryPool::Tag::pool) {
|
||||||
auto ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
|
slots.push_back(currentSlot++);
|
||||||
kNoDeadline, loopTimeoutDurationNs, kNoDuration,
|
} else {
|
||||||
&executionResult);
|
EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
|
||||||
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
slots.push_back(-1);
|
||||||
<< ret.getDescription();
|
}
|
||||||
if (!ret.isOk()) {
|
|
||||||
result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
|
||||||
executionStatus = result;
|
|
||||||
} else if (executionResult.syncFence.get() != -1) {
|
|
||||||
std::vector<ndk::ScopedFileDescriptor> waitFor;
|
|
||||||
auto dupFd = dup(executionResult.syncFence.get());
|
|
||||||
ASSERT_NE(dupFd, -1);
|
|
||||||
waitFor.emplace_back(dupFd);
|
|
||||||
// If a sync fence is returned, try start another run waiting for the sync fence.
|
|
||||||
ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
|
|
||||||
kNoDeadline, loopTimeoutDurationNs, kNoDuration,
|
|
||||||
&executionResult);
|
|
||||||
ASSERT_TRUE(ret.isOk());
|
|
||||||
waitForSyncFence(executionResult.syncFence.get());
|
|
||||||
}
|
|
||||||
if (result == ErrorStatus::NONE) {
|
|
||||||
ASSERT_NE(executionResult.callback, nullptr);
|
|
||||||
Timing timingFenced;
|
|
||||||
auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
|
|
||||||
&executionStatus);
|
|
||||||
ASSERT_TRUE(ret.isOk());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
FAIL() << "Unsupported execution mode for AIDL interface.";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
|
|
||||||
executionStatus == ErrorStatus::GENERAL_FAILURE) {
|
|
||||||
if (skipped != nullptr) {
|
|
||||||
*skipped = true;
|
|
||||||
}
|
|
||||||
if (!testConfig.reportSkipping) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
|
||||||
"execute model that it does not support.";
|
|
||||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
|
||||||
"execute model that it does not support."
|
|
||||||
<< std::endl;
|
|
||||||
GTEST_SKIP();
|
|
||||||
}
|
|
||||||
if (!testConfig.measureTiming) {
|
|
||||||
EXPECT_EQ(timing, kNoTiming);
|
|
||||||
} else {
|
|
||||||
if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
|
|
||||||
EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (testConfig.outputType) {
|
|
||||||
case OutputType::FULLY_SPECIFIED:
|
|
||||||
if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
|
|
||||||
// Executor::FENCED does not support zero-sized output.
|
|
||||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// If the model output operands are fully specified, outputShapes must be either
|
|
||||||
// either empty, or have the same number of elements as the number of outputs.
|
|
||||||
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
|
||||||
ASSERT_TRUE(outputShapes.size() == 0 ||
|
|
||||||
outputShapes.size() == testModel.main.outputIndexes.size());
|
|
||||||
break;
|
|
||||||
case OutputType::UNSPECIFIED:
|
|
||||||
if (testConfig.executor == Executor::FENCED) {
|
|
||||||
// For Executor::FENCED, the output shape must be fully specified.
|
|
||||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// If the model output operands are not fully specified, outputShapes must have
|
|
||||||
// the same number of elements as the number of outputs.
|
|
||||||
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
|
||||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
|
||||||
break;
|
|
||||||
case OutputType::INSUFFICIENT:
|
|
||||||
if (testConfig.executor == Executor::FENCED) {
|
|
||||||
// For Executor::FENCED, the output shape must be fully specified.
|
|
||||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
|
|
||||||
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
|
||||||
// Check that all returned output dimensions are at least as fully specified as the
|
|
||||||
// union of the information about the corresponding operand in the model and in the
|
|
||||||
// request. In this test, all model outputs have known rank with all dimensions
|
|
||||||
// unspecified, and no dimensional information is provided in the request.
|
|
||||||
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
|
||||||
ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
|
|
||||||
const auto& actual = outputShapes[i].dimensions;
|
|
||||||
const auto& golden =
|
|
||||||
testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
|
|
||||||
ASSERT_EQ(actual.size(), golden.size());
|
|
||||||
for (uint32_t j = 0; j < actual.size(); j++) {
|
|
||||||
if (actual[j] == 0) continue;
|
|
||||||
EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ExecutionResult executionResult;
|
||||||
|
// execute
|
||||||
|
ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
|
||||||
|
kNoDeadline, loopTimeoutDurationNs,
|
||||||
|
&executionResult);
|
||||||
|
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
||||||
|
<< ret.getDescription();
|
||||||
|
if (ret.isOk()) {
|
||||||
|
executionStatus = executionResult.outputSufficientSize
|
||||||
|
? ErrorStatus::NONE
|
||||||
|
: ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
|
||||||
|
outputShapes = std::move(executionResult.outputShapes);
|
||||||
|
timing = executionResult.timing;
|
||||||
|
} else {
|
||||||
|
executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark each slot as unused after the execution. This is unnecessary because the
|
||||||
|
// burst is freed after this scope ends, but this is here to test the functionality.
|
||||||
|
for (int64_t slot : slots) {
|
||||||
|
ret = burst->releaseMemoryResource(slot);
|
||||||
|
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return;
|
case Executor::FENCED: {
|
||||||
case OutputType::MISSED_DEADLINE:
|
SCOPED_TRACE("fenced");
|
||||||
ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
|
ErrorStatus result = ErrorStatus::NONE;
|
||||||
executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
|
FencedExecutionResult executionResult;
|
||||||
<< "executionStatus = " << executionStatus;
|
::ndk::ScopedAStatus ret;
|
||||||
return;
|
if (testConfig.reusable) {
|
||||||
|
ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
|
||||||
|
} else {
|
||||||
|
ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
|
||||||
|
kNoDeadline, loopTimeoutDurationNs,
|
||||||
|
kNoDuration, &executionResult);
|
||||||
|
}
|
||||||
|
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
|
||||||
|
<< ret.getDescription();
|
||||||
|
if (!ret.isOk()) {
|
||||||
|
result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
|
||||||
|
executionStatus = result;
|
||||||
|
} else if (executionResult.syncFence.get() != -1) {
|
||||||
|
std::vector<ndk::ScopedFileDescriptor> waitFor;
|
||||||
|
auto dupFd = dup(executionResult.syncFence.get());
|
||||||
|
ASSERT_NE(dupFd, -1);
|
||||||
|
waitFor.emplace_back(dupFd);
|
||||||
|
// If a sync fence is returned, try start another run waiting for the sync
|
||||||
|
// fence.
|
||||||
|
ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
|
||||||
|
kNoDeadline, loopTimeoutDurationNs,
|
||||||
|
kNoDuration, &executionResult);
|
||||||
|
ASSERT_TRUE(ret.isOk());
|
||||||
|
waitForSyncFence(executionResult.syncFence.get());
|
||||||
|
}
|
||||||
|
if (result == ErrorStatus::NONE) {
|
||||||
|
ASSERT_NE(executionResult.callback, nullptr);
|
||||||
|
Timing timingFenced;
|
||||||
|
auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
|
||||||
|
&executionStatus);
|
||||||
|
ASSERT_TRUE(ret.isOk());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
FAIL() << "Unsupported execution mode for AIDL interface.";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
|
||||||
|
executionStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||||
|
if (skipped != nullptr) {
|
||||||
|
*skipped = true;
|
||||||
|
}
|
||||||
|
if (!testConfig.reportSkipping) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||||
|
"execute model that it does not support.";
|
||||||
|
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||||
|
"execute model that it does not support."
|
||||||
|
<< std::endl;
|
||||||
|
GTEST_SKIP();
|
||||||
|
}
|
||||||
|
if (!testConfig.measureTiming) {
|
||||||
|
EXPECT_EQ(timing, kNoTiming);
|
||||||
|
} else {
|
||||||
|
if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
|
||||||
|
EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (testConfig.outputType) {
|
||||||
|
case OutputType::FULLY_SPECIFIED:
|
||||||
|
if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
|
||||||
|
// Executor::FENCED does not support zero-sized output.
|
||||||
|
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// If the model output operands are fully specified, outputShapes must be either
|
||||||
|
// either empty, or have the same number of elements as the number of outputs.
|
||||||
|
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
||||||
|
ASSERT_TRUE(outputShapes.size() == 0 ||
|
||||||
|
outputShapes.size() == testModel.main.outputIndexes.size());
|
||||||
|
break;
|
||||||
|
case OutputType::UNSPECIFIED:
|
||||||
|
if (testConfig.executor == Executor::FENCED) {
|
||||||
|
// For Executor::FENCED, the output shape must be fully specified.
|
||||||
|
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// If the model output operands are not fully specified, outputShapes must have
|
||||||
|
// the same number of elements as the number of outputs.
|
||||||
|
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
|
||||||
|
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||||
|
break;
|
||||||
|
case OutputType::INSUFFICIENT:
|
||||||
|
if (testConfig.executor == Executor::FENCED) {
|
||||||
|
// For Executor::FENCED, the output shape must be fully specified.
|
||||||
|
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
|
||||||
|
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
|
||||||
|
// Check that all returned output dimensions are at least as fully specified as the
|
||||||
|
// union of the information about the corresponding operand in the model and in the
|
||||||
|
// request. In this test, all model outputs have known rank with all dimensions
|
||||||
|
// unspecified, and no dimensional information is provided in the request.
|
||||||
|
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
||||||
|
ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
|
||||||
|
const auto& actual = outputShapes[i].dimensions;
|
||||||
|
const auto& golden =
|
||||||
|
testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
|
||||||
|
ASSERT_EQ(actual.size(), golden.size());
|
||||||
|
for (uint32_t j = 0; j < actual.size(); j++) {
|
||||||
|
if (actual[j] == 0) continue;
|
||||||
|
EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
case OutputType::MISSED_DEADLINE:
|
||||||
|
ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
|
||||||
|
executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
|
||||||
|
<< "executionStatus = " << executionStatus;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through all outputs, check returned output shapes.
|
||||||
|
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
||||||
|
EXPECT_TRUE(outputShapes[i].isSufficient);
|
||||||
|
const auto& expect =
|
||||||
|
testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
|
||||||
|
const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
|
||||||
|
ASSERT_TRUE(unsignedActual.has_value());
|
||||||
|
const std::vector<uint32_t>& actual = unsignedActual.value();
|
||||||
|
EXPECT_EQ(expect, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve execution results.
|
||||||
|
const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
|
||||||
|
|
||||||
|
// We want "close-enough" results.
|
||||||
|
checkResults(testModel, outputs);
|
||||||
|
};
|
||||||
|
|
||||||
|
executeAndCheckResults();
|
||||||
|
|
||||||
|
// For reusable execution tests, run the execution twice.
|
||||||
|
if (testConfig.reusable) {
|
||||||
|
SCOPED_TRACE("Second execution");
|
||||||
|
executeAndCheckResults();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go through all outputs, check returned output shapes.
|
|
||||||
for (uint32_t i = 0; i < outputShapes.size(); i++) {
|
|
||||||
EXPECT_TRUE(outputShapes[i].isSufficient);
|
|
||||||
const auto& expect = testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
|
|
||||||
const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
|
|
||||||
ASSERT_TRUE(unsignedActual.has_value());
|
|
||||||
const std::vector<uint32_t>& actual = unsignedActual.value();
|
|
||||||
EXPECT_EQ(expect, actual);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve execution results.
|
|
||||||
const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
|
|
||||||
|
|
||||||
// We want "close-enough" results.
|
|
||||||
checkResults(testModel, outputs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
||||||
@@ -770,6 +829,13 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
|||||||
std::vector<bool> measureTimingList;
|
std::vector<bool> measureTimingList;
|
||||||
std::vector<Executor> executorList;
|
std::vector<Executor> executorList;
|
||||||
std::vector<MemoryType> memoryTypeList;
|
std::vector<MemoryType> memoryTypeList;
|
||||||
|
std::vector<bool> reusableList = {false};
|
||||||
|
|
||||||
|
int deviceVersion;
|
||||||
|
ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
|
||||||
|
if (deviceVersion >= kMinAidlLevelForFL8) {
|
||||||
|
reusableList.push_back(true);
|
||||||
|
}
|
||||||
|
|
||||||
switch (testKind) {
|
switch (testKind) {
|
||||||
case TestKind::GENERAL: {
|
case TestKind::GENERAL: {
|
||||||
@@ -812,8 +878,13 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
|
|||||||
for (const bool measureTiming : measureTimingList) {
|
for (const bool measureTiming : measureTimingList) {
|
||||||
for (const Executor executor : executorList) {
|
for (const Executor executor : executorList) {
|
||||||
for (const MemoryType memoryType : memoryTypeList) {
|
for (const MemoryType memoryType : memoryTypeList) {
|
||||||
const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
|
for (const bool reusable : reusableList) {
|
||||||
EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
|
if (executor == Executor::BURST && reusable) continue;
|
||||||
|
const TestConfig testConfig(executor, measureTiming, outputType, memoryType,
|
||||||
|
reusable);
|
||||||
|
SCOPED_TRACE(toString(testConfig));
|
||||||
|
EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -833,7 +904,7 @@ void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
|
|||||||
for (const bool measureTiming : measureTimingList) {
|
for (const bool measureTiming : measureTimingList) {
|
||||||
for (const Executor executor : executorList) {
|
for (const Executor executor : executorList) {
|
||||||
const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
|
const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
|
||||||
/*reportSkipping=*/false);
|
/*reusable=*/false, /*reportSkipping=*/false);
|
||||||
bool baseSkipped = false;
|
bool baseSkipped = false;
|
||||||
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
|
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
|
||||||
bool coupledSkipped = false;
|
bool coupledSkipped = false;
|
||||||
|
|||||||
@@ -177,6 +177,17 @@ std::string gtestCompliantName(std::string name) {
|
|||||||
return os << toString(errorStatus);
|
return os << toString(errorStatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string toString(MemoryType type) {
|
||||||
|
switch (type) {
|
||||||
|
case MemoryType::ASHMEM:
|
||||||
|
return "ASHMEM";
|
||||||
|
case MemoryType::BLOB_AHWB:
|
||||||
|
return "BLOB_AHWB";
|
||||||
|
case MemoryType::DEVICE:
|
||||||
|
return "DEVICE";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
|
Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
|
||||||
CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
|
CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
|
||||||
|
|
||||||
|
|||||||
@@ -111,6 +111,8 @@ class TestBlobAHWB : public TestMemoryBase {
|
|||||||
|
|
||||||
enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
|
enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
|
||||||
|
|
||||||
|
std::string toString(MemoryType type);
|
||||||
|
|
||||||
// Manages the lifetime of memory resources used in an execution.
|
// Manages the lifetime of memory resources used in an execution.
|
||||||
class ExecutionContext {
|
class ExecutionContext {
|
||||||
DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
|
DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
|
||||||
|
|||||||
@@ -36,6 +36,51 @@ using ExecutionMutation = std::function<void(Request*)>;
|
|||||||
|
|
||||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||||
|
|
||||||
|
// Test request validation with reusable execution.
|
||||||
|
static void validateReusableExecution(const std::shared_ptr<IPreparedModel>& preparedModel,
|
||||||
|
const std::string& message, const Request& request,
|
||||||
|
bool measure) {
|
||||||
|
// createReusableExecution
|
||||||
|
std::shared_ptr<IExecution> execution;
|
||||||
|
{
|
||||||
|
SCOPED_TRACE(message + " [createReusableExecution]");
|
||||||
|
const auto createStatus = preparedModel->createReusableExecution(
|
||||||
|
request, measure, kOmittedTimeoutDuration, &execution);
|
||||||
|
if (!createStatus.isOk()) {
|
||||||
|
ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||||
|
ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()),
|
||||||
|
ErrorStatus::INVALID_ARGUMENT);
|
||||||
|
ASSERT_EQ(nullptr, execution);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
ASSERT_NE(nullptr, execution);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// synchronous
|
||||||
|
{
|
||||||
|
SCOPED_TRACE(message + " [executeSynchronously]");
|
||||||
|
ExecutionResult executionResult;
|
||||||
|
const auto executeStatus = execution->executeSynchronously(kNoDeadline, &executionResult);
|
||||||
|
ASSERT_FALSE(executeStatus.isOk());
|
||||||
|
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||||
|
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
|
||||||
|
ErrorStatus::INVALID_ARGUMENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
// fenced
|
||||||
|
{
|
||||||
|
SCOPED_TRACE(message + " [executeFenced]");
|
||||||
|
FencedExecutionResult executionResult;
|
||||||
|
const auto executeStatus =
|
||||||
|
execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
|
||||||
|
ASSERT_FALSE(executeStatus.isOk());
|
||||||
|
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
|
||||||
|
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
|
||||||
|
ErrorStatus::INVALID_ARGUMENT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Primary validation function. This function will take a valid request, apply a
|
// Primary validation function. This function will take a valid request, apply a
|
||||||
// mutation to it to invalidate the request, then pass it to interface calls
|
// mutation to it to invalidate the request, then pass it to interface calls
|
||||||
// that use the request.
|
// that use the request.
|
||||||
@@ -101,6 +146,14 @@ static void validate(const std::shared_ptr<IPreparedModel>& preparedModel,
|
|||||||
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
|
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
|
||||||
ErrorStatus::INVALID_ARGUMENT);
|
ErrorStatus::INVALID_ARGUMENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t aidlVersion;
|
||||||
|
ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk());
|
||||||
|
|
||||||
|
// validate reusable execution
|
||||||
|
if (aidlVersion >= kMinAidlLevelForFL8) {
|
||||||
|
validateReusableExecution(preparedModel, message, request, measure);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<IBurst> createBurst(const std::shared_ptr<IPreparedModel>& preparedModel) {
|
std::shared_ptr<IBurst> createBurst(const std::shared_ptr<IPreparedModel>& preparedModel) {
|
||||||
|
|||||||
@@ -30,6 +30,8 @@ namespace aidl::android::hardware::neuralnetworks::vts::functional {
|
|||||||
using NamedDevice = Named<std::shared_ptr<IDevice>>;
|
using NamedDevice = Named<std::shared_ptr<IDevice>>;
|
||||||
using NeuralNetworksAidlTestParam = NamedDevice;
|
using NeuralNetworksAidlTestParam = NamedDevice;
|
||||||
|
|
||||||
|
constexpr int kMinAidlLevelForFL8 = 4;
|
||||||
|
|
||||||
class NeuralNetworksAidlTest : public testing::TestWithParam<NeuralNetworksAidlTestParam> {
|
class NeuralNetworksAidlTest : public testing::TestWithParam<NeuralNetworksAidlTestParam> {
|
||||||
protected:
|
protected:
|
||||||
void SetUp() override;
|
void SetUp() override;
|
||||||
|
|||||||
Reference in New Issue
Block a user