Merge "Add BLOB AHWB tests in VTS." into rvc-dev

This commit is contained in:
Xusong Wang
2020-03-21 06:38:36 +00:00
committed by Android (Google) Code Review
11 changed files with 275 additions and 111 deletions

View File

@@ -125,7 +125,9 @@ Model createModel(const TestModel& testModel) {
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
const Model model = createModel(testModel);
const Request request = createRequest(testModel);
ExecutionContext context;
const Request request = context.createRequest(testModel);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@@ -143,7 +145,7 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel) {
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(request);
const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);

View File

@@ -21,10 +21,13 @@
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware_buffer.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <vndk/hardware_buffer.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <vector>
@@ -37,10 +40,64 @@ using V1_0::DataLocation;
using V1_0::Request;
using V1_0::RequestArgument;
constexpr uint32_t kInputPoolIndex = 0;
constexpr uint32_t kOutputPoolIndex = 1;
std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
auto ashmem = std::make_unique<TestAshmem>(size);
return ashmem->mIsValid ? std::move(ashmem) : nullptr;
}
void TestAshmem::initialize(uint32_t size) {
mIsValid = false;
ASSERT_GT(size, 0);
mHidlMemory = nn::allocateSharedMemory(size);
ASSERT_TRUE(mHidlMemory.valid());
mMappedMemory = mapMemory(mHidlMemory);
ASSERT_NE(mMappedMemory, nullptr);
mPtr = static_cast<uint8_t*>(static_cast<void*>(mMappedMemory->getPointer()));
ASSERT_NE(mPtr, nullptr);
mIsValid = true;
}
std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
auto ahwb = std::make_unique<TestBlobAHWB>(size);
return ahwb->mIsValid ? std::move(ahwb) : nullptr;
}
void TestBlobAHWB::initialize(uint32_t size) {
mIsValid = false;
ASSERT_GT(size, 0);
const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
const AHardwareBuffer_Desc desc = {
.width = size,
.height = 1,
.layers = 1,
.format = AHARDWAREBUFFER_FORMAT_BLOB,
.usage = usage,
.stride = size,
};
ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
ASSERT_NE(mAhwb, nullptr);
void* buffer = nullptr;
ASSERT_EQ(AHardwareBuffer_lock(mAhwb, usage, -1, nullptr, &buffer), 0);
ASSERT_NE(buffer, nullptr);
mPtr = static_cast<uint8_t*>(buffer);
const native_handle_t* handle = AHardwareBuffer_getNativeHandle(mAhwb);
ASSERT_NE(handle, nullptr);
mHidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
mIsValid = true;
}
TestBlobAHWB::~TestBlobAHWB() {
if (mAhwb) {
AHardwareBuffer_unlock(mAhwb, nullptr);
AHardwareBuffer_release(mAhwb);
}
}
Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
Request createRequest(const TestModel& testModel) {
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
size_t inputSize = 0;
@@ -80,16 +137,19 @@ Request createRequest(const TestModel& testModel) {
}
// Allocate memory pools.
hidl_vec<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
CHECK_NE(pools[kInputPoolIndex].size(), 0u);
CHECK_NE(pools[kOutputPoolIndex].size(), 0u);
sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex]);
CHECK(inputMemory.get() != nullptr);
uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
CHECK(inputPtr != nullptr);
if (memoryType == MemoryType::ASHMEM) {
mInputMemory = TestAshmem::create(inputSize);
mOutputMemory = TestAshmem::create(outputSize);
} else {
mInputMemory = TestBlobAHWB::create(inputSize);
mOutputMemory = TestBlobAHWB::create(outputSize);
}
EXPECT_NE(mInputMemory, nullptr);
EXPECT_NE(mOutputMemory, nullptr);
hidl_vec<hidl_memory> pools = {mInputMemory->getHidlMemory(), mOutputMemory->getHidlMemory()};
// Copy input data to the memory pool.
uint8_t* inputPtr = mInputMemory->getPointer();
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
if (op.data.size() > 0) {
@@ -102,18 +162,13 @@ Request createRequest(const TestModel& testModel) {
return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
}
std::vector<TestBuffer> getOutputBuffers(const Request& request) {
sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex]);
CHECK(outputMemory.get() != nullptr);
uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
CHECK(outputPtr != nullptr);
std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
// Copy out output results.
uint8_t* outputPtr = mOutputMemory->getPointer();
std::vector<TestBuffer> outputBuffers;
for (const auto& output : request.outputs) {
outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
}
return outputBuffers;
}

View File

@@ -129,7 +129,8 @@ void validateEverything(const sp<IDevice>& device, const Model& model, const Req
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
const Request request = createRequest(kTestModel);
ExecutionContext context;
const Request request = context.createRequest(kTestModel);
ASSERT_FALSE(kTestModel.expectFailure);
validateEverything(kDevice, model, request);
}

View File

@@ -19,6 +19,8 @@
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware_buffer.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <algorithm>
#include <iosfwd>
#include <string>
@@ -28,11 +30,73 @@
namespace android::hardware::neuralnetworks {
// Create HIDL Request from the TestModel struct.
V1_0::Request createRequest(const test_helper::TestModel& testModel);
// Convenience class to manage the lifetime of memory resources.
class TestMemoryBase {
DISALLOW_COPY_AND_ASSIGN(TestMemoryBase);
// After execution, copy out output results from the output memory pool.
std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
public:
TestMemoryBase() = default;
virtual ~TestMemoryBase() = default;
uint8_t* getPointer() const { return mPtr; }
hidl_memory getHidlMemory() const { return mHidlMemory; }
protected:
uint8_t* mPtr = nullptr;
hidl_memory mHidlMemory;
bool mIsValid = false;
};
class TestAshmem : public TestMemoryBase {
public:
static std::unique_ptr<TestAshmem> create(uint32_t size);
// Prefer TestAshmem::create.
// The constructor calls initialize, which constructs the memory resources. This is a workaround
// that gtest macros cannot be used directly in a constructor.
TestAshmem(uint32_t size) { initialize(size); }
private:
void initialize(uint32_t size);
sp<hidl::memory::V1_0::IMemory> mMappedMemory;
};
class TestBlobAHWB : public TestMemoryBase {
public:
static std::unique_ptr<TestBlobAHWB> create(uint32_t size);
// Prefer TestBlobAHWB::create.
// The constructor calls initialize, which constructs the memory resources. This is a
// workaround that gtest macros cannot be used directly in a constructor.
TestBlobAHWB(uint32_t size) { initialize(size); }
~TestBlobAHWB();
private:
void initialize(uint32_t size);
AHardwareBuffer* mAhwb = nullptr;
};
enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
// Manages the lifetime of memory resources used in an execution.
class ExecutionContext {
DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
public:
static constexpr uint32_t kInputPoolIndex = 0;
static constexpr uint32_t kOutputPoolIndex = 1;
ExecutionContext() = default;
// Create HIDL Request from the TestModel struct.
V1_0::Request createRequest(const test_helper::TestModel& testModel,
MemoryType memoryType = MemoryType::ASHMEM);
// After execution, copy out output results from the output memory pool.
std::vector<test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request) const;
private:
std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
};
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and

View File

@@ -133,7 +133,9 @@ Model createModel(const TestModel& testModel) {
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
const Model model = createModel(testModel);
const Request request = createRequest(testModel);
ExecutionContext context;
const Request request = context.createRequest(testModel);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@@ -151,7 +153,7 @@ void Execute(const sp<IDevice>& device, const TestModel& testModel) {
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(request);
const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);

View File

@@ -132,7 +132,8 @@ void validateEverything(const sp<IDevice>& device, const Model& model, const Req
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
const Request request = createRequest(kTestModel);
ExecutionContext context;
const Request request = context.createRequest(kTestModel);
ASSERT_FALSE(kTestModel.expectFailure);
validateEverything(kDevice, model, request);
}

View File

@@ -68,6 +68,7 @@ struct TestConfig {
Executor executor;
MeasureTiming measureTiming;
OutputType outputType;
MemoryType memoryType;
};
} // namespace
@@ -216,7 +217,8 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
return;
}
Request request = createRequest(testModel);
ExecutionContext context;
Request request = context.createRequest(testModel, testConfig.memoryType);
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
@@ -326,7 +328,7 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
}
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(request);
const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
@@ -337,24 +339,30 @@ void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestMo
std::vector<OutputType> outputTypesList;
std::vector<MeasureTiming> measureTimingList;
std::vector<Executor> executorList;
std::vector<MemoryType> memoryTypeList;
if (testDynamicOutputShape) {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
memoryTypeList = {MemoryType::ASHMEM};
} else {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
memoryTypeList = {MemoryType::ASHMEM, MemoryType::BLOB_AHWB};
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig = {.executor = executor,
.measureTiming = measureTiming,
.outputType = outputType};
EvaluatePreparedModel(preparedModel, testModel, testConfig);
for (const MemoryType memoryType : memoryTypeList) {
const TestConfig testConfig = {.executor = executor,
.measureTiming = measureTiming,
.outputType = outputType,
.memoryType = memoryType};
EvaluatePreparedModel(preparedModel, testModel, testConfig);
}
}
}
}

View File

@@ -153,7 +153,8 @@ void validateFailure(const sp<IDevice>& device, const Model& model, const Reques
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
const Request request = createRequest(kTestModel);
ExecutionContext context;
const Request request = context.createRequest(kTestModel);
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {

View File

@@ -74,8 +74,6 @@ namespace {
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
enum class MemoryType { SHARED, DEVICE };
enum class IOType { INPUT, OUTPUT };
struct TestConfig {
@@ -336,21 +334,39 @@ static void makeOutputDimensionsUnspecified(Model* model) {
}
}
constexpr uint32_t kInputPoolIndex = 0;
constexpr uint32_t kOutputPoolIndex = 1;
constexpr uint32_t kDeviceMemoryBeginIndex = 2;
class ExecutionContextV1_3 {
public:
ExecutionContextV1_3(sp<IDevice> device, sp<IPreparedModel> preparedModel)
: kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
const TestModel& testModel, bool preferDeviceMemory) {
std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
const Request& request) const;
private:
// Get a TestBuffer with data copied from an IBuffer object.
void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) const;
static constexpr uint32_t kInputPoolIndex = 0;
static constexpr uint32_t kOutputPoolIndex = 1;
static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
const sp<IDevice> kDevice;
const sp<IPreparedModel> kPreparedModel;
std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
std::vector<sp<IBuffer>> mBuffers;
};
std::optional<Request> ExecutionContextV1_3::createRequest(const TestModel& testModel,
MemoryType memoryType) {
// Memory pools are organized as:
// - 0: Input shared memory pool
// - 1: Output shared memory pool
// - [2, 2+i): Input device memories
// - [2+i, 2+i+o): Output device memories
DeviceMemoryAllocator allocator(device, preparedModel, testModel);
std::vector<sp<IBuffer>> buffers;
DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
std::vector<uint32_t> tokens;
mBuffers.clear();
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
@@ -361,13 +377,13 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
// Omitted input.
inputs[i] = {.hasNoValue = true};
continue;
} else if (preferDeviceMemory) {
} else if (memoryType == MemoryType::DEVICE) {
SCOPED_TRACE("Input index = " + std::to_string(i));
auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
if (buffer != nullptr) {
DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
kDeviceMemoryBeginIndex)};
buffers.push_back(std::move(buffer));
mBuffers.push_back(std::move(buffer));
tokens.push_back(token);
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
continue;
@@ -387,13 +403,13 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
size_t outputSize = 0;
for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
if (preferDeviceMemory) {
if (memoryType == MemoryType::DEVICE) {
SCOPED_TRACE("Output index = " + std::to_string(i));
auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
if (buffer != nullptr) {
DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
kDeviceMemoryBeginIndex)};
buffers.push_back(std::move(buffer));
mBuffers.push_back(std::move(buffer));
tokens.push_back(token);
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
continue;
@@ -416,21 +432,29 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
}
if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
return std::nullopt;
}
// Memory pools.
hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + buffers.size());
pools[kInputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(inputSize, 1)));
pools[kOutputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(outputSize, 1)));
CHECK_NE(pools[kInputPoolIndex].hidlMemory().size(), 0u);
CHECK_NE(pools[kOutputPoolIndex].hidlMemory().size(), 0u);
for (uint32_t i = 0; i < buffers.size(); i++) {
hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + mBuffers.size());
if (memoryType == MemoryType::BLOB_AHWB) {
mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
} else {
mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1));
mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1));
}
EXPECT_NE(mInputMemory, nullptr);
EXPECT_NE(mOutputMemory, nullptr);
pools[kInputPoolIndex].hidlMemory(mInputMemory->getHidlMemory());
pools[kOutputPoolIndex].hidlMemory(mOutputMemory->getHidlMemory());
for (uint32_t i = 0; i < mBuffers.size(); i++) {
pools[kDeviceMemoryBeginIndex + i].token(tokens[i]);
}
// Copy input data to the input shared memory pool.
sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex].hidlMemory());
CHECK(inputMemory.get() != nullptr);
uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
CHECK(inputPtr != nullptr);
uint8_t* inputPtr = mInputMemory->getPointer();
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
@@ -439,14 +463,38 @@ static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
std::copy(begin, end, inputPtr + inputs[i].location.offset);
}
}
Request request = {
return Request{
.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
return {std::move(request), std::move(buffers)};
}
std::vector<TestBuffer> ExecutionContextV1_3::getOutputBuffers(const TestModel& testModel,
const Request& request) const {
// Copy out output results.
uint8_t* outputPtr = mOutputMemory->getPointer();
std::vector<TestBuffer> outputBuffers;
for (uint32_t i = 0; i < request.outputs.size(); i++) {
const auto& outputLoc = request.outputs[i].location;
if (outputLoc.poolIndex == kOutputPoolIndex) {
outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
} else {
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
if (op.data.size() == 0) {
outputBuffers.emplace_back(0, nullptr);
} else {
SCOPED_TRACE("Output index = " + std::to_string(i));
const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
TestBuffer buffer;
getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
outputBuffers.push_back(std::move(buffer));
}
}
}
return outputBuffers;
}
// Get a TestBuffer with data copied from an IBuffer object.
static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) {
void ExecutionContextV1_3::getBuffer(const sp<IBuffer>& buffer, size_t size,
TestBuffer* testBuffer) const {
// IBuffer -> Shared memory.
hidl_memory tmp = nn::allocateSharedMemory(size);
const auto ret = buffer->copyTo(tmp);
@@ -462,35 +510,6 @@ static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBu
*testBuffer = TestBuffer(size, outputPtr);
}
static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, const Request& request,
const std::vector<sp<IBuffer>>& buffers) {
sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex].hidlMemory());
CHECK(outputMemory.get() != nullptr);
uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
CHECK(outputPtr != nullptr);
// Copy out output results.
std::vector<TestBuffer> outputBuffers;
for (uint32_t i = 0; i < request.outputs.size(); i++) {
const auto& outputLoc = request.outputs[i].location;
if (outputLoc.poolIndex == kOutputPoolIndex) {
outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
} else {
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
if (op.data.size() == 0) {
outputBuffers.emplace_back();
} else {
SCOPED_TRACE("Output index = " + std::to_string(i));
const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
TestBuffer buffer;
getBuffer(buffers[bufferIndex], op.data.size(), &buffer);
outputBuffers.push_back(std::move(buffer));
}
}
}
return outputBuffers;
}
static bool hasZeroSizedOutput(const TestModel& testModel) {
return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
[&testModel](uint32_t index) {
@@ -541,13 +560,14 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
return;
}
auto [request, buffers] =
createRequest(device, preparedModel, testModel,
/*preferDeviceMemory=*/testConfig.memoryType == MemoryType::DEVICE);
ExecutionContextV1_3 context(device, preparedModel);
auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
// Skip if testing memory domain but no device memory has been allocated.
if (testConfig.memoryType == MemoryType::DEVICE && buffers.empty()) {
if (!maybeRequest.has_value()) {
return;
}
Request request = std::move(maybeRequest.value());
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
@@ -742,7 +762,7 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
}
// Retrieve execution results.
const std::vector<TestBuffer> outputs = getOutputBuffers(testModel, request, buffers);
const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
// We want "close-enough" results.
checkResults(testModel, outputs);
@@ -753,29 +773,32 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
std::vector<OutputType> outputTypesList;
std::vector<MeasureTiming> measureTimingList;
std::vector<Executor> executorList;
MemoryType memoryType = MemoryType::SHARED;
std::vector<MemoryType> memoryTypeList;
switch (testKind) {
case TestKind::GENERAL: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::DYNAMIC_SHAPE: {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::MEMORY_DOMAIN: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
memoryType = MemoryType::DEVICE;
memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
} break;
case TestKind::FENCED_COMPUTE: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::FENCED};
memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::QUANTIZATION_COUPLING: {
LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
@@ -786,14 +809,17 @@ void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>&
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
// Burst does not support V1_3 loop timeout.
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
memoryTypeList = {MemoryType::ASHMEM};
} break;
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
for (const MemoryType memoryType : memoryTypeList) {
const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
}
}
}
}
@@ -812,7 +838,7 @@ void EvaluatePreparedCoupledModels(const sp<IDevice>& device,
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::SHARED,
const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
/*reportSkipping=*/false);
bool baseSkipped = false;
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);

View File

@@ -214,7 +214,8 @@ static MaybeResults executeSynchronously(const sp<IPreparedModel>& preparedModel
}
void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
const Request& request, bool synchronous, DeadlineBoundType deadlineBound) {
const Request& request, const ExecutionContext& context, bool synchronous,
DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously;
const auto deadline = makeDeadline(deadlineBound);
@@ -261,7 +262,7 @@ void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel&
// Retrieve execution results.
ASSERT_TRUE(nn::compliantWithV1_0(request));
const V1_0::Request request10 = nn::convertToV1_0(request);
const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
const std::vector<TestBuffer> outputs = context.getOutputBuffers(request10);
// We want "close-enough" results.
if (status == ErrorStatus::NONE) {
@@ -270,10 +271,11 @@ void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel&
}
void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
const Request& request) {
const Request& request, const ExecutionContext& context) {
for (bool synchronous : {false, true}) {
for (auto deadlineBound : deadlineBounds) {
runExecutionTest(preparedModel, testModel, request, synchronous, deadlineBound);
runExecutionTest(preparedModel, testModel, request, context, synchronous,
deadlineBound);
}
}
}
@@ -291,8 +293,9 @@ void runTests(const sp<IDevice>& device, const TestModel& testModel) {
if (preparedModel == nullptr) return;
// run execution tests
const Request request = nn::convertToV1_3(createRequest(testModel));
runExecutionTests(preparedModel, testModel, request);
ExecutionContext context;
const Request request = nn::convertToV1_3(context.createRequest(testModel));
runExecutionTests(preparedModel, testModel, request, context);
}
class DeadlineTest : public GeneratedTestBase {};

View File

@@ -177,7 +177,8 @@ void validateFailure(const sp<IDevice>& device, const Model& model, const Reques
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
const Request request = nn::convertToV1_3(createRequest(kTestModel));
ExecutionContext context;
const Request request = nn::convertToV1_3(context.createRequest(kTestModel));
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {