Refactor NNAPI VTS to remove unreasonable dependence between versions

To make it easier to create the next version of NNAPI, this change
removes the following nonsensical dependence:
- NNAPI 1.0 VTS depends on NNAPI 1.1 and 1.2
- NNAPI 1.1 VTS depends on NNAPI 1.2

In particular, I made the following changes:
- split GeneratedTestHarness.cpp into three separate implementations,
- created a restricted version of Callbacks.h for 1.0 and 1.1,
- removed the dependency on frameworks/ml/nn/HalInterfaces.h,
- refactored Android.bp files for more autonomy between 1.0, 1.1, and 1.2,
- consolidated some common code into Utils.h,
- created structure for sharing code between VTS versions (VtsHalNeuralNetworksV1_0_utils).

Bug: 74827824
Bug: 124462414
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
Change-Id: I4243d0b5e574255cef1070850f4d0a284f65f54e
This commit is contained in:
Slava Shklyaev
2019-05-14 14:15:14 +01:00
parent f11a1a9bb4
commit 1d6b465997
35 changed files with 1761 additions and 788 deletions

View File

@@ -15,21 +15,19 @@
//
cc_library_static {
name: "VtsHalNeuralnetworksTest_utils",
name: "VtsHalNeuralNetworksV1_0_utils",
srcs: [
"Callbacks.cpp",
"GeneratedTestHarness.cpp",
"Utils.cpp",
],
defaults: ["VtsHalTargetTestDefaults"],
export_include_dirs: ["."],
export_include_dirs: ["include"],
shared_libs: [
"libfmq",
"libnativewindow",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
@@ -44,12 +42,13 @@ cc_library_static {
}
cc_defaults {
name: "VtsHalNeuralNetworksTargetTestDefaults",
name: "VtsHalNeuralNetworksV1_0TargetTestDefaults",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"ValidateModel.cpp",
"ValidateRequest.cpp",
"VtsHalNeuralnetworks.cpp",
"GeneratedTestHarness.cpp",
],
shared_libs: [
"libfmq",
@@ -57,14 +56,12 @@ cc_defaults {
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_utils",
"VtsHalNeuralnetworksTest_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
@@ -76,19 +73,19 @@ cc_defaults {
cc_test {
name: "VtsHalNeuralnetworksV1_0TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_0TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
"GeneratedTestsV1_0.cpp",
],
}
cc_test {
name: "PresubmitHalNeuralnetworksV1_0TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_0TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
"GeneratedTestsV1_0.cpp",
],
cflags: [
"-DPRESUBMIT_NOT_VTS",

View File

@@ -14,13 +14,13 @@
* limitations under the License.
*/
#include "Callbacks.h"
#include "1.0/Callbacks.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_2 {
namespace V1_0 {
namespace implementation {
CallbackBase::CallbackBase() : mNotified(false) {}
@@ -111,14 +111,6 @@ Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
return Void();
}
Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
const sp<V1_2::IPreparedModel>& preparedModel) {
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
CallbackBase::notify();
return Void();
}
ErrorStatus PreparedModelCallback::getStatus() {
wait();
return mErrorStatus;
@@ -135,18 +127,6 @@ ExecutionCallback::~ExecutionCallback() {}
Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
mErrorStatus = errorStatus;
mOutputShapes = {};
mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
CallbackBase::notify();
return Void();
}
Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
mErrorStatus = errorStatus;
mOutputShapes = outputShapes;
mTiming = timing;
CallbackBase::notify();
return Void();
}
@@ -156,18 +136,8 @@ ErrorStatus ExecutionCallback::getStatus() {
return mErrorStatus;
}
const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() {
wait();
return mOutputShapes;
}
Timing ExecutionCallback::getTiming() {
wait();
return mTiming;
}
} // namespace implementation
} // namespace V1_2
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View File

@@ -15,129 +15,47 @@
*/
#include "GeneratedTestHarness.h"
#include "Callbacks.h"
#include "ExecutionBurstController.h"
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <iostream>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace generated_tests {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::test_helper::bool8;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::IDevice;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_0::Model;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::filter;
using ::test_helper::for_all;
using ::test_helper::for_each;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
template <typename T>
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
char* src) {
for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) {
ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
char* begin = src + ra[index].location.offset;
memcpy(m.data(), begin, ra[index].location.length);
});
}
void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
copy_back_(&dst->float32Operands, ra, src);
copy_back_(&dst->int32Operands, ra, src);
copy_back_(&dst->quant8AsymmOperands, ra, src);
copy_back_(&dst->quant16SymmOperands, ra, src);
copy_back_(&dst->float16Operands, ra, src);
copy_back_(&dst->bool8Operands, ra, src);
copy_back_(&dst->quant8ChannelOperands, ra, src);
copy_back_(&dst->quant16AsymmOperands, ra, src);
copy_back_(&dst->quant8SymmOperands, ra, src);
static_assert(9 == MixedTyped::kNumTypes,
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
}
static bool isZeroSized(const MixedTyped& example, uint32_t index) {
for (auto i : example.operandDimensions.at(index)) {
if (i == 0) return true;
}
return false;
}
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
const Request& request, MeasureTiming,
sp<ExecutionCallback>& callback) {
return preparedModel->execute(request, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
return preparedModel->execute_1_2(request, measure, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>&, const Request&,
MeasureTiming, hidl_vec<OutputShape>*, Timing*) {
ADD_FAILURE() << "asking for synchronous execution at V1_0";
return ErrorStatus::GENERAL_FAILURE;
}
static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
hidl_vec<OutputShape>* outputShapes,
Timing* timing) {
ErrorStatus result;
Return<void> ret = preparedModel->executeSynchronously(
request, measure,
[&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
const Timing& time) {
result = error;
*outputShapes = shapes;
*timing = time;
});
if (!ret.isOk()) {
return ErrorStatus::GENERAL_FAILURE;
}
return result;
}
static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
const sp<V1_0::IPreparedModel>&) {
ADD_FAILURE() << "asking for burst execution at V1_0";
return nullptr;
}
static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
const sp<V1_2::IPreparedModel>& preparedModel) {
return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
}
enum class Executor { ASYNC, SYNC, BURST };
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
const float kDefaultAtol = 1e-5f;
const float kDefaultRtol = 1e-5f;
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
Executor executor, MeasureTiming measure, OutputType outputType) {
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples, float fpAtol,
float fpRtol) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
@@ -147,14 +65,7 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
const bool hasFloat16Inputs = !inputs.float16Operands.empty();
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
// TODO: Adjust the error limit based on testing.
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
fpAtol = 5.0f * 0.0009765625f;
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
fpRtol = 5.0f * 0.0009765625f;
}
CHECK(inputs.float16Operands.empty()) << "float16 is not supported in 1.0";
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
@@ -163,11 +74,13 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -185,31 +98,17 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
bool sizeLargerThanOne = true;
for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
int index, auto, auto s) {
for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
if (index == 0) {
// On OutputType::INSUFFICIENT, set the output operand with index 0 with
// buffer size one byte less than needed.
if (outputType == OutputType::INSUFFICIENT) {
if (s > 1 && !isZeroSized(golden, index)) {
s -= 1;
} else {
sizeLargerThanOne = false;
}
}
}
RequestArgument arg = {
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// If output0 does not have size larger than one byte,
// we can not provide an insufficient buffer
if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
@@ -248,107 +147,17 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
switch (executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
ExecutePreparedModel(preparedModel, request, measure, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
executionStatus = executionCallback->getStatus();
outputShapes = executionCallback->getOutputShapes();
timing = executionCallback->getTiming();
break;
}
case Executor::SYNC: {
SCOPED_TRACE("synchronous");
// execute
Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
preparedModel, request, measure, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
break;
}
case Executor::BURST: {
SCOPED_TRACE("burst");
// create burst
const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
CreateBurst(preparedModel);
ASSERT_NE(nullptr, controller.get());
// create memory keys
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
// execute burst
std::tie(executionStatus, outputShapes, timing) =
controller->compute(request, measure, keys);
break;
}
}
if (outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"execute model that it does not support."
<< std::endl;
GTEST_SKIP();
}
if (measure == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
}
}
switch (outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_TRUE(outputShapes.size() == 0 ||
outputShapes.size() == test.operandDimensions.size());
break;
case OutputType::UNSPECIFIED:
// If the model output operands are not fully specified, outputShapes must have
// the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
break;
case OutputType::INSUFFICIENT:
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
ASSERT_FALSE(outputShapes[0].isSufficient);
return;
}
// Go through all outputs, overwrite output dimensions with returned output shapes
if (outputShapes.size() > 0) {
for_each<uint32_t>(test.operandDimensions,
[&outputShapes](int idx, std::vector<uint32_t>& dim) {
dim = outputShapes[idx].dimensions;
});
}
// retrieve execution status
executionCallback->wait();
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// validate results
outputMemory->read();
@@ -360,89 +169,22 @@ void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bo
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
if (example.expectedMultinomialDistributionTolerance > 0) {
expectMultinomialDistributionWithinTolerance(test, example);
}
}
}
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
OutputType outputType) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
kDefaultRtol, executor, measure, outputType);
}
void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
if (testDynamicOutputShape) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
} else {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
}
}
static void getPreparedModel(sp<PreparedModelCallback> callback,
sp<V1_0::IPreparedModel>* preparedModel) {
*preparedModel = callback->getPreparedModel();
}
static void getPreparedModel(sp<PreparedModelCallback> callback,
sp<V1_2::IPreparedModel>* preparedModel) {
sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
*preparedModel = V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
}
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
V1_0::Model model = create_model();
Model model = create_model();
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
[](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
@@ -455,8 +197,7 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<V1_0::IPreparedModel> preparedModel;
getPreparedModel(preparedModelCallback, &preparedModel);
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -472,115 +213,10 @@ void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> c
ASSERT_NE(nullptr, preparedModel.get());
float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
EvaluatePreparedModel(preparedModel, is_ignored, examples,
/*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Executor::ASYNC,
MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
}
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
V1_1::Model model = create_model();
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations_1_1(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<V1_0::IPreparedModel> preparedModel;
getPreparedModel(preparedModelCallback, &preparedModel);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel.get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"prepare model that it does not support."
<< std::endl;
GTEST_SKIP();
}
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Executor::ASYNC,
MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
}
void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
sp<V1_2::IPreparedModel>* preparedModel) {
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations_1_2(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
getPreparedModel(preparedModelCallback, preparedModel);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel->get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"prepare model that it does not support."
<< std::endl;
return;
}
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel->get());
}
// TODO: Reduce code duplication.
void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
bool testDynamicOutputShape) {
V1_2::Model model = create_model();
sp<V1_2::IPreparedModel> preparedModel = nullptr;
PrepareModel(device, model, &preparedModel);
if (preparedModel == nullptr) {
GTEST_SKIP();
}
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
}
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View File

@@ -14,14 +14,11 @@
* limitations under the License.
*/
#ifndef VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
#define VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
#include "TestHarness.h"
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include "TestHarness.h"
namespace android {
namespace hardware {
@@ -30,28 +27,13 @@ namespace neuralnetworks {
namespace generated_tests {
using ::test_helper::MixedTypedExample;
void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
bool testDynamicOutputShape = false);
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H

View File

@@ -16,17 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -34,8 +33,9 @@ namespace V1_0 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;

View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <cstring>
#include <map>
#include <vector>
namespace android {
namespace hardware {
namespace neuralnetworks {
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::test_helper::for_each;
using ::test_helper::MixedTyped;
template <typename T>
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
char* src) {
for_each<T>(*dst, [&ra, src](int index, std::vector<T>& m) {
ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
char* begin = src + ra[index].location.offset;
memcpy(m.data(), begin, ra[index].location.length);
});
}
void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
copy_back_(&dst->float32Operands, ra, src);
copy_back_(&dst->int32Operands, ra, src);
copy_back_(&dst->quant8AsymmOperands, ra, src);
copy_back_(&dst->quant16SymmOperands, ra, src);
copy_back_(&dst->float16Operands, ra, src);
copy_back_(&dst->bool8Operands, ra, src);
copy_back_(&dst->quant8ChannelOperands, ra, src);
copy_back_(&dst->quant16AsymmOperands, ra, src);
copy_back_(&dst->quant8SymmOperands, ra, src);
static_assert(9 == MixedTyped::kNumTypes,
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
}
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View File

@@ -18,7 +18,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "1.0/Callbacks.h"
namespace android {
namespace hardware {
@@ -27,8 +27,8 @@ namespace V1_0 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////

View File

@@ -16,16 +16,15 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -33,7 +32,7 @@ namespace V1_0 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -121,11 +120,13 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -143,8 +144,10 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;

View File

@@ -20,7 +20,7 @@
#include <android-base/logging.h>
#include "Callbacks.h"
#include "1.0/Callbacks.h"
namespace android {
namespace hardware {
@@ -29,7 +29,7 @@ namespace V1_0 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
sp<IPreparedModel>* preparedModel) {

View File

@@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
@@ -89,4 +89,4 @@ namespace android::hardware::neuralnetworks::V1_0 {
} // namespace android::hardware::neuralnetworks::V1_0
#endif // VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H

View File

@@ -0,0 +1,326 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <hidl/Status.h>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <mutex>
#include <thread>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
namespace implementation {
/**
* The CallbackBase class is used internally by the NeuralNetworks runtime to
* synchronize between different threads. An asynchronous task is launched
* paired with a callback object. When a client thread requires the output being
* generated by the asynchronous task, the client thread can wait for the result
* and be blocked until it has completed or a timeout condition has been
* reached. Any wait* may safely be called concurrently, even on the same
* callback object. When the asynchronous task has finished its workload, it
* must immediately call "notify". If the asynchronous task has failed to launch,
* the function that tried to launch the asynchronous task must immediately call
* "notify". This "notify" call awakens any client threads waiting on the
* callback object.
*
* The CallbackBase class implements some of the base synchronization common to
* both PrepareModelCallback and ExecutionCallback. For consistency, any HIDL
* callback class must inherit from CallbackBase as well as the HIDL callback
* interface it implements.
*
* This class exists to enable synchronization across HIDL. When synchronization
* is only required in the same process, consider using std::future, std::mutex,
* std::condition_variable, or std::experimental::latch instead.
*/
class CallbackBase {
public:
CallbackBase();
~CallbackBase();
/**
* CallbackBase::wait blocks until notify has been called on the callback
* object.
*/
void wait();
/**
* CallbackBase::wait_for blocks until notify has been called on the
* callback object or the time duration from the time the wait_for function
* was called has expired, whichever comes first.
*
* @return Status std::cv_status::no_timeout if the callback was notified
* before the time duration expired, std::cv_status::timeout
* otherwise.
*/
template <class Rep, class Period>
std::cv_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration);
/**
* CallbackBase::on_finish binds a function to the callback object. This
* bound function will be executed when CallbackBase::notify is called,
* before any calls to wait* return. (Note that CallbackBase::wait_for can
* return std::cv_status::timeout before CallbackBase::notify is called for
* the first time, and hence before the bound function is executed.)
*
* The bound function must not synchronize with or otherwise access the
* callback object it is bound to, as this could cause a deadlock.
*
* CallbackBase::on_finish can be called at most once on a given callback
* object, and the call to CallbackBase::on_finish must finish before
* CallbackBase::notify is called.
*
* @param post_work Function to be invoked the first time
* CallbackBase::notify is called. Must have a target --
* i.e., must not compare equal to nullptr. post_work
* returns true if it successfully completes, false if it
* fails.
* @return bool True if the function was successfully bound, false if
* unsuccessful.
*
* TODO: Why does the return value of the callback matter?
*/
bool on_finish(std::function<bool(void)> post_work);
/**
* CallbackBase::bind_thread binds a thread to the event for later use by
* CallbackBase::join_thread.
*
* The thread must be passed using std::move.
*
* Once a thread is bound with CallbackBase::bind_thread, the client code
* should ensure that one of the following occurs before the event is
* destroyed:
* - CallbackBase::join_thread has been called.
* - CallbackBase::wait has been called.
* - CallbackBase::wait_for has been called and returned other than
* std::cv_status::no_timeout.
*
* The bound thread shall not call any CallbackBase method with the
* exception of CallbackBase::notify, which it must call when the thread has
* finished its computation.
*
* CallbackBase::bind_thread can be called at most once on a given callback
* object.
*
* @param asyncThread Thread to be bound to the callback object. The thread
* object must represent a thread of execution -- i.e.,
* asyncThread.joinable() must be true.
* @return bool True if successful, false if thread was not properly bound.
*/
bool bind_thread(std::thread&& asyncThread);
/**
* CallbackBase::join_thread ensures that the thread (if any) bound to this
* event with CallbackBase::bind_thread has fully finished and cleaned its
* resources. It is legal to call this function multiple times, concurrently
* or sequentially.
*/
void join_thread();
protected:
/**
* CallbackBase::notify enables all prior and future wait* calls on the
* callback object to proceed. The call to CallbackBase::notify happens
* before any wait* calls on this callback object return (except in the case
* of wait_for timing out). The asynchronous call the callback object is
* paired with must ensure that any update to state that should be visible
* to the caller of wait* happens before the call to CallbackBase::notify.
*
* CallbackBase::notify must be called exactly once on a given callback
* object.
*/
void notify();
private:
// Same as CallbackBase::join_thread but assumes we already hold a lock on
// mMutex.
void join_thread_locked();
bool mNotified;
std::mutex mMutex;
std::condition_variable mCondition;
std::function<bool(void)> mPostWork;
std::thread mThread;
};
/**
* The PreparedModelCallback class is used to receive the error status of
* preparing a model as well as the prepared model from a task executing
* asynchronously with respect to the runtime. If a calling thread calls wait*
* or get* on a PreparedModelCallback object and the corresponding asynchronous
* task has not finished preparing the model, the calling thread will block
* until the asynchronous task has called notify. For more information on the
* synchronization behavior, refer to the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
* CallbackBase, and implements the HIDL notify call from
* IPreparedModelCallback. This callback object is passed as an argument to
* IDevice::prepareModel.
*/
class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback {
public:
PreparedModelCallback();
~PreparedModelCallback() override;
/**
* IPreparedModelCallback::notify marks the callback object with the return
* status of the asynchronous model preparation along with the prepared
* model and calls CallbackBase::notify, enabling all prior and future
* wait* calls on the PreparedModelCallback object to proceed.
* For more information on the synchronization behavior, refer to the
* CallbackBase class.
*
* IPreparedModelCallback::notify must be called exactly once on a given
* PreparedModelCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
/**
* Retrieves the error status returned from the asynchronous task launched
* by IDevice::prepareModel. If IDevice::prepareModel has not finished
* asynchronously preparing the model, this call will block until the
* asynchronous task notifies the object.
*
* @return status Error status returned from asynchronously preparing the
* model; will be:
* - NONE if the asynchronous preparation was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - INVALID_ARGUMENT if the input model is invalid
*/
ErrorStatus getStatus();
/**
* Retrieves the model that has been prepared for execution from the
* asynchronous task launched by IDevice::prepareModel. If
* IDevice::prepareModel has not finished asynchronously preparing the
* model, this call will block until the asynchronous task notifies the
* object.
*
* @return preparedModel Returned model that has been prepared for
* execution, nullptr if the model was unable to be
* prepared.
*/
sp<V1_0::IPreparedModel> getPreparedModel();
private:
ErrorStatus mErrorStatus;
sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
* The ExecutionCallback class is used to receive the error status of the
* execution from a task executing asynchronously with respect to the runtime.
* If a calling thread calls wait* or get* on a PreparedModelCallback object and
* the corresponding asynchronous task has not finished the execution, the
* calling thread will block until the asynchronous task has called notify.
* For more information on the synchronization behavior, refer to the
* CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
* CallbackBase, and implements the HIDL notify call from IExecutionCallback.
* This callback object is passed as an argument to IPreparedModel::execute.
*/
class ExecutionCallback : public CallbackBase, public IExecutionCallback {
public:
ExecutionCallback();
~ExecutionCallback() override;
/**
* IExecutionCallback::notify marks the callback object with the return
* status of the asynchronous execution that held this callback and enable
* all prior and future wait* calls on the ExecutionCallback object to
* proceed. For more information on the synchronization behavior, refer to
* the CallbackBase class.
*
* IExecutionCallback::notify must be called exactly once on a given
* ExecutionCallback object.
*
* @param status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
* (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
* not large enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(ErrorStatus status) override;
/**
* Retrieves the error status returned from the asynchronous task launched
* by IPreparedModel::execute. If IPreparedModel::execute has not finished
* asynchronously executing, this call will block until the asynchronous
* task notifies the object.
*
* @return status Error status returned from launching the asynchronous task
* (if the launch fails) or from the asynchronous task itself
* (if the launch succeeds). Must be:
* - NONE if the asynchronous execution was successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if the asynchronous task resulted in an
* unspecified error
* - OUTPUT_INSUFFICIENT_SIZE if at least one output
* operand buffer is not large enough to store the
* corresponding output
* - INVALID_ARGUMENT if one of the input arguments to
* prepareModel is invalid
*/
ErrorStatus getStatus();
private:
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
};
// template function implementation(s) below this point
template <class Rep, class Period>
std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep, Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status =
mCondition.wait_for(lock, timeout_duration, [this] { return mNotified; });
if (status != std::cv_status::timeout) {
join_thread_locked();
}
return status;
}
} // namespace implementation
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H

View File

@@ -0,0 +1,56 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <algorithm>
#include <vector>
#include "TestHarness.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
void copy_back(::test_helper::MixedTyped* dst, const std::vector<V1_0::RequestArgument>& ra,
char* src);
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
// resizing the hidl_vec to one less.
template <typename Type>
inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
if (vec) {
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
vec->resize(vec->size() - 1);
}
}
template <typename Type>
inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
// assume vec is valid
const uint32_t index = vec->size();
vec->resize(index + 1);
(*vec)[index] = value;
return index;
}
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H

View File

@@ -14,10 +14,41 @@
// limitations under the License.
//
cc_defaults {
name: "VtsHalNeuralNetworksV1_1TargetTestDefaults",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"ValidateModel.cpp",
"ValidateRequest.cpp",
"VtsHalNeuralnetworks.cpp",
"GeneratedTestHarness.cpp",
],
shared_libs: [
"libfmq",
"libnativewindow",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
test_suites: ["general-tests"],
}
// Tests for V1_0 models using the V1_1 HAL.
cc_test {
name: "VtsHalNeuralnetworksV1_1CompatV1_0TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
srcs: [
"GeneratedTestsV1_0.cpp",
],
@@ -26,19 +57,19 @@ cc_test {
// Tests for V1_1 models.
cc_test {
name: "VtsHalNeuralnetworksV1_1TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
"GeneratedTestsV1_1.cpp",
],
}
cc_test {
name: "PresubmitHalNeuralnetworksV1_1TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
"GeneratedTestsV1_1.cpp",
],
cflags: [
"-DPRESUBMIT_NOT_VTS",

View File

@@ -0,0 +1,232 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GeneratedTestHarness.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <iostream>
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace generated_tests {
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
using ::android::hardware::neuralnetworks::V1_1::IDevice;
using ::android::hardware::neuralnetworks::V1_1::Model;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
using ::test_helper::filter;
using ::test_helper::for_all;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, float fpAtol, float fpRtol) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
int example_no = 1;
for (auto& example : examples) {
SCOPED_TRACE(example_no++);
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
const bool hasFloat16Inputs = !inputs.float16Operands.empty();
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
// TODO: Adjust the error limit based on testing.
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
fpAtol = 5.0f * 0.0009765625f;
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
fpRtol = 5.0f * 0.0009765625f;
}
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
MixedTyped test; // holding test results
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
ASSERT_NE(0ull, pools[INPUT].size());
ASSERT_NE(0ull, pools[OUTPUT].size());
// load data
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
ASSERT_NE(nullptr, inputMemory.get());
ASSERT_NE(nullptr, outputMemory.get());
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
inputMemory->update();
outputMemory->update();
// Go through all inputs, copy the values
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
outputMemory->commit();
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
preparedModel->execute(request, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// validate results
outputMemory->read();
copy_back(&test, outputs_info, outputPtr);
outputMemory->commit();
// Filter out don't cares
MixedTyped filtered_golden = filter(golden, is_ignored);
MixedTyped filtered_test = filter(test, is_ignored);
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
}
}
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
Model model = create_model();
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations_1_1(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
[](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel.get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"prepare model that it does not support."
<< std::endl;
GTEST_SKIP();
}
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f);
}
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View File

@@ -0,0 +1,40 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
#include <functional>
#include <vector>
#include "TestHarness.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace generated_tests {
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
std::function<bool(int)> is_ignored,
const std::vector<::test_helper::MixedTypedExample>& examples);
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H

View File

@@ -16,17 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -34,8 +33,10 @@ namespace V1_1 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;

View File

@@ -16,17 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -34,8 +33,10 @@ namespace V1_1 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;

View File

@@ -16,25 +16,22 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_1 {
using V1_0::IPreparedModel;
using V1_0::Operand;
using V1_0::OperandLifeTime;
using V1_0::OperandType;
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_0::Operand;
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::OperandType;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -42,10 +39,10 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
const V1_1::Model& model) {
SCOPED_TRACE(message + " [getSupportedOperations_1_1]");
Return<void> ret =
device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
});
Return<void> ret = device->getSupportedOperations_1_1(
model, [&](ErrorStatus status, const hidl_vec<bool>&) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
});
EXPECT_TRUE(ret.isOk());
}
@@ -56,7 +53,7 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
device->prepareModel_1_1(model, preference, preparedModelCallback);
device->prepareModel_1_1(model, preference, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -87,36 +84,16 @@ static void validate(const sp<IDevice>& device, const std::string& message, V1_1
validatePrepareModel(device, message, model, preference);
}
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
// resizing the hidl_vec to one less.
template <typename Type>
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
if (vec) {
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
vec->resize(vec->size() - 1);
}
}
template <typename Type>
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
// assume vec is valid
const uint32_t index = vec->size();
vec->resize(index + 1);
(*vec)[index] = value;
return index;
}
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
{
.type = OperandType::INT32,
.dimensions = {},
.numberOfConsumers = 0,
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_INPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
.type = OperandType::INT32,
.dimensions = {},
.numberOfConsumers = 0,
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_INPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
});
}
@@ -130,10 +107,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const int32_t invalidOperandTypes[] = {
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
};
static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
@@ -226,7 +203,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_1::Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const std::vector<int32_t> invalidZeroPoints =
getInvalidZeroPoints(model.operands[operand].type);
getInvalidZeroPoints(model.operands[operand].type);
for (int32_t invalidZeroPoint : invalidZeroPoints) {
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
@@ -258,18 +235,18 @@ static void mutateOperand(Operand* operand, OperandType type) {
break;
case OperandType::TENSOR_FLOAT32:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_INT32:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
break;
case OperandType::OEM:
@@ -319,10 +296,10 @@ static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_1
///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
static const int32_t invalidOperationTypes[] = {
static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental
static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental
static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
};
static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
@@ -333,7 +310,7 @@ static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model
std::to_string(invalidOperationType);
validate(device, message, model, [operation, invalidOperationType](Model* model) {
model->operations[operation].type =
static_cast<OperationType>(invalidOperationType);
static_cast<OperationType>(invalidOperationType);
});
}
}
@@ -486,7 +463,7 @@ static void addOperationInputTest(const sp<IDevice>& device, const V1_1::Model&
static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
"addOperationOutputTest: operation " + std::to_string(operation);
"addOperationOutputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -498,14 +475,14 @@ static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model&
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
static const int32_t invalidExecutionPreferences[] = {
static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const V1_1::Model& model) {
for (int32_t preference : invalidExecutionPreferences) {
const std::string message =
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
validate(device, message, model, [](Model*) {},
static_cast<ExecutionPreference>(preference));
}

View File

@@ -16,16 +16,16 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -33,11 +33,15 @@ namespace V1_1 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_1::IPreparedModel;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
using test_helper::MixedTypedExample;
using ::test_helper::for_all;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -61,26 +65,6 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
}
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
// resizing the hidl_vec to one less.
template <typename Type>
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
if (vec) {
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
vec->resize(vec->size() - 1);
}
}
template <typename Type>
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
// assume vec is valid
const uint32_t index = vec->size();
vec->resize(index + 1);
(*vec)[index] = value;
return index;
}
///////////////////////// REMOVE INPUT ////////////////////////////////////
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
@@ -121,11 +105,13 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -143,8 +129,10 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;

View File

@@ -20,7 +20,7 @@
#include <android-base/logging.h>
#include "Callbacks.h"
#include "1.0/Callbacks.h"
namespace android {
namespace hardware {
@@ -29,7 +29,7 @@ namespace V1_1 {
namespace vts {
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
sp<IPreparedModel>* preparedModel) {

View File

@@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H
#define VTS_HAL_NEURALNETWORKS_V1_1_H
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
@@ -98,4 +98,4 @@ namespace android::hardware::neuralnetworks::V1_0 {
} // namespace android::hardware::neuralnetworks::V1_0
#endif // VTS_HAL_NEURALNETWORKS_V1_1_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H

View File

@@ -14,10 +14,44 @@
// limitations under the License.
//
cc_defaults {
name: "VtsHalNeuralNetworksV1_2TargetTestDefaults",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"ValidateModel.cpp",
"ValidateRequest.cpp",
"VtsHalNeuralnetworks.cpp",
"Callbacks.cpp",
"GeneratedTestHarness.cpp",
],
local_include_dirs: ["include"],
shared_libs: [
"libfmq",
"libnativewindow",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
"libhidlmemory",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
header_libs: [
"libneuralnetworks_headers",
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
test_suites: ["general-tests"],
}
// Tests for V1_0 models using the V1_2 HAL.
cc_test {
name: "VtsHalNeuralnetworksV1_2CompatV1_0TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"GeneratedTestsV1_0.cpp",
"ValidateBurst.cpp",
@@ -30,7 +64,7 @@ cc_test {
// Tests for V1_1 models using the V1_2 HAL.
cc_test {
name: "VtsHalNeuralnetworksV1_2CompatV1_1TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"GeneratedTestsV1_1.cpp",
"ValidateBurst.cpp",
@@ -43,11 +77,11 @@ cc_test {
// Tests for V1_2 models.
cc_test {
name: "VtsHalNeuralnetworksV1_2TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"CompilationCachingTests.cpp",
"GeneratedTests.cpp",
"GeneratedTestsV1_2.cpp",
"ValidateBurst.cpp",
],
cflags: [
@@ -57,10 +91,10 @@ cc_test {
cc_test {
name: "PresubmitHalNeuralnetworksV1_2TargetTest",
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
"GeneratedTestsV1_2.cpp",
"ValidateBurst.cpp",
],
cflags: [

View File

@@ -0,0 +1,173 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "1.2/Callbacks.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace V1_2 {
namespace implementation {
CallbackBase::CallbackBase() : mNotified(false) {}
CallbackBase::~CallbackBase() {
// Note that we cannot call CallbackBase::join_thread from here:
// CallbackBase is intended to be reference counted, and it is possible that
// the reference count drops to zero in the bound thread, causing the
// bound thread to call this destructor. If a thread tries to join
// itself, it throws an exception, producing a message like the
// following:
//
// terminating with uncaught exception of type std::__1::system_error:
// thread::join failed: Resource deadlock would occur
}
void CallbackBase::wait() {
std::unique_lock<std::mutex> lock(mMutex);
mCondition.wait(lock, [this] { return mNotified; });
join_thread_locked();
}
bool CallbackBase::on_finish(std::function<bool(void)> post_work) {
std::lock_guard<std::mutex> lock(mMutex);
if (mPostWork != nullptr) {
LOG(ERROR) << "CallbackBase::on_finish -- a post-work function has already been bound to "
"this callback object";
return false;
}
if (post_work == nullptr) {
LOG(ERROR) << "CallbackBase::on_finish -- the new post-work function is invalid";
return false;
}
mPostWork = std::move(post_work);
return true;
}
bool CallbackBase::bind_thread(std::thread&& asyncThread) {
std::lock_guard<std::mutex> lock(mMutex);
if (mThread.joinable()) {
LOG(ERROR) << "CallbackBase::bind_thread -- a thread has already been bound to this "
"callback object";
return false;
}
if (!asyncThread.joinable()) {
LOG(ERROR) << "CallbackBase::bind_thread -- the new thread is not joinable";
return false;
}
mThread = std::move(asyncThread);
return true;
}
void CallbackBase::join_thread() {
std::lock_guard<std::mutex> lock(mMutex);
join_thread_locked();
}
void CallbackBase::notify() {
{
std::lock_guard<std::mutex> lock(mMutex);
mNotified = true;
if (mPostWork != nullptr) {
bool success = mPostWork();
if (!success) {
LOG(ERROR) << "CallbackBase::notify -- post work failed";
}
}
}
mCondition.notify_all();
}
void CallbackBase::join_thread_locked() {
if (mThread.joinable()) {
mThread.join();
}
}
PreparedModelCallback::PreparedModelCallback()
: mErrorStatus(ErrorStatus::GENERAL_FAILURE), mPreparedModel(nullptr) {}
PreparedModelCallback::~PreparedModelCallback() {}
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
const sp<V1_0::IPreparedModel>& preparedModel) {
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
CallbackBase::notify();
return Void();
}
Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
const sp<V1_2::IPreparedModel>& preparedModel) {
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
CallbackBase::notify();
return Void();
}
ErrorStatus PreparedModelCallback::getStatus() {
wait();
return mErrorStatus;
}
sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
wait();
return mPreparedModel;
}
ExecutionCallback::ExecutionCallback() : mErrorStatus(ErrorStatus::GENERAL_FAILURE) {}
ExecutionCallback::~ExecutionCallback() {}
Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
mErrorStatus = errorStatus;
mOutputShapes = {};
mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
CallbackBase::notify();
return Void();
}
Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
mErrorStatus = errorStatus;
mOutputShapes = outputShapes;
mTiming = timing;
CallbackBase::notify();
return Void();
}
ErrorStatus ExecutionCallback::getStatus() {
wait();
return mErrorStatus;
}
const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() {
wait();
return mOutputShapes;
}
Timing ExecutionCallback::getTiming() {
wait();
return mTiming;
}
} // namespace implementation
} // namespace V1_2
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View File

@@ -27,8 +27,9 @@
#include <cstdlib>
#include <random>
#include "Callbacks.h"
#include "1.2/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"

View File

@@ -0,0 +1,452 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GeneratedTestHarness.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include <iostream>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace generated_tests {
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::Request;
using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
using ::android::hardware::neuralnetworks::V1_2::IDevice;
using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_2::Model;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::filter;
using ::test_helper::for_all;
using ::test_helper::for_each;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
static bool isZeroSized(const MixedTyped& example, uint32_t index) {
for (auto i : example.operandDimensions.at(index)) {
if (i == 0) return true;
}
return false;
}
static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
return preparedModel->execute_1_2(request, measure, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
hidl_vec<OutputShape>* outputShapes,
Timing* timing) {
ErrorStatus result;
Return<void> ret = preparedModel->executeSynchronously(
request, measure,
[&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
const Timing& time) {
result = error;
*outputShapes = shapes;
*timing = time;
});
if (!ret.isOk()) {
return ErrorStatus::GENERAL_FAILURE;
}
return result;
}
static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
const sp<IPreparedModel>& preparedModel) {
return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
}
enum class Executor { ASYNC, SYNC, BURST };
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
const float kDefaultAtol = 1e-5f;
const float kDefaultRtol = 1e-5f;
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
Executor executor, MeasureTiming measure, OutputType outputType) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
int example_no = 1;
for (auto& example : examples) {
SCOPED_TRACE(example_no++);
const MixedTyped& inputs = example.operands.first;
const MixedTyped& golden = example.operands.second;
const bool hasFloat16Inputs = !inputs.float16Operands.empty();
if (hasRelaxedFloat32Model || hasFloat16Inputs) {
// TODO: Adjust the error limit based on testing.
// If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
fpAtol = 5.0f * 0.0009765625f;
// Set the relative tolerance to be 5ULP of the corresponding FP precision.
fpRtol = 5.0f * 0.0009765625f;
}
std::vector<RequestArgument> inputs_info, outputs_info;
uint32_t inputSize = 0, outputSize = 0;
// This function only partially specifies the metadata (vector of RequestArguments).
// The contents are copied over below.
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
});
// Compute offset for inputs 1 and so on
{
size_t offset = 0;
for (auto& i : inputs_info) {
if (!i.hasNoValue) i.location.offset = offset;
offset += i.location.length;
}
}
MixedTyped test; // holding test results
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
bool sizeLargerThanOne = true;
for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
if (index == 0) {
// On OutputType::INSUFFICIENT, set the output operand with index 0 with
// buffer size one byte less than needed.
if (outputType == OutputType::INSUFFICIENT) {
if (s > 1 && !isZeroSized(golden, index)) {
s -= 1;
} else {
sizeLargerThanOne = false;
}
}
}
RequestArgument arg = {
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;
});
// If output0 does not have size larger than one byte,
// we can not provide an insufficient buffer
if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
for (auto& i : outputs_info) {
i.location.offset = offset;
offset += i.location.length;
}
}
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
nn::allocateSharedMemory(outputSize)};
ASSERT_NE(0ull, pools[INPUT].size());
ASSERT_NE(0ull, pools[OUTPUT].size());
// load data
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
ASSERT_NE(nullptr, inputMemory.get());
ASSERT_NE(nullptr, outputMemory.get());
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
ASSERT_NE(nullptr, inputPtr);
ASSERT_NE(nullptr, outputPtr);
inputMemory->update();
outputMemory->update();
// Go through all inputs, copy the values
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
char* begin = (char*)p;
char* end = begin + s;
// TODO: handle more than one input
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
});
inputMemory->commit();
outputMemory->commit();
const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
switch (executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
Return<ErrorStatus> executionLaunchStatus =
ExecutePreparedModel(preparedModel, request, measure, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
// retrieve execution status
executionCallback->wait();
executionStatus = executionCallback->getStatus();
outputShapes = executionCallback->getOutputShapes();
timing = executionCallback->getTiming();
break;
}
case Executor::SYNC: {
SCOPED_TRACE("synchronous");
// execute
Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
preparedModel, request, measure, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
break;
}
case Executor::BURST: {
SCOPED_TRACE("burst");
// create burst
const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
CreateBurst(preparedModel);
ASSERT_NE(nullptr, controller.get());
// create memory keys
std::vector<intptr_t> keys(request.pools.size());
for (size_t i = 0; i < keys.size(); ++i) {
keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
}
// execute burst
std::tie(executionStatus, outputShapes, timing) =
controller->compute(request, measure, keys);
break;
}
}
if (outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"execute model that it does not support."
<< std::endl;
GTEST_SKIP();
}
if (measure == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
}
}
switch (outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_TRUE(outputShapes.size() == 0 ||
outputShapes.size() == test.operandDimensions.size());
break;
case OutputType::UNSPECIFIED:
// If the model output operands are not fully specified, outputShapes must have
// the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
break;
case OutputType::INSUFFICIENT:
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
ASSERT_FALSE(outputShapes[0].isSufficient);
return;
}
// Go through all outputs, overwrite output dimensions with returned output shapes
if (outputShapes.size() > 0) {
for_each<uint32_t>(test.operandDimensions,
[&outputShapes](int idx, std::vector<uint32_t>& dim) {
dim = outputShapes[idx].dimensions;
});
}
// validate results
outputMemory->read();
copy_back(&test, outputs_info, outputPtr);
outputMemory->commit();
// Filter out don't cares
MixedTyped filtered_golden = filter(golden, is_ignored);
MixedTyped filtered_test = filter(test, is_ignored);
// We want "close-enough" results for float
compare(filtered_golden, filtered_test, fpAtol, fpRtol);
if (example.expectedMultinomialDistributionTolerance > 0) {
expectMultinomialDistributionWithinTolerance(test, example);
}
}
}
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
OutputType outputType) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
kDefaultRtol, executor, measure, outputType);
}
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
if (testDynamicOutputShape) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
} else {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
}
}
void PrepareModel(const sp<IDevice>& device, const Model& model,
sp<IPreparedModel>* preparedModel) {
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations_1_2(
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel = std::all_of(supported.begin(), supported.end(),
[](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<V1_0::IPreparedModel> preparedModelV1_0 = preparedModelCallback->getPreparedModel();
*preparedModel = IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel->get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"prepare model that it does not support."
<< std::endl;
return;
}
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel->get());
}
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
bool testDynamicOutputShape) {
Model model = create_model();
sp<IPreparedModel> preparedModel = nullptr;
PrepareModel(device, model, &preparedModel);
if (preparedModel == nullptr) {
GTEST_SKIP();
}
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
}
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android

View File

@@ -0,0 +1,51 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <functional>
#include <vector>
#include "TestHarness.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace generated_tests {
using ::test_helper::MixedTypedExample;
void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
sp<V1_2::IPreparedModel>* preparedModel);
void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
bool testDynamicOutputShape = false);
} // namespace generated_tests
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H

View File

@@ -16,17 +16,17 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.2/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {

View File

@@ -16,17 +16,17 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.2/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {

View File

@@ -16,17 +16,17 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.2/Callbacks.h"
#include "GeneratedTestHarness.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {

View File

@@ -18,7 +18,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
#include "ExecutionBurstServer.h"
#include "TestHarness.h"

View File

@@ -16,10 +16,10 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -41,10 +41,10 @@ static void validateGetSupportedOperations(const sp<IDevice>& device, const std:
const Model& model) {
SCOPED_TRACE(message + " [getSupportedOperations_1_2]");
Return<void> ret =
device->getSupportedOperations_1_2(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
});
Return<void> ret = device->getSupportedOperations_1_2(
model, [&](ErrorStatus status, const hidl_vec<bool>&) {
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
});
EXPECT_TRUE(ret.isOk());
}
@@ -87,36 +87,16 @@ static void validate(const sp<IDevice>& device, const std::string& message, Mode
validatePrepareModel(device, message, model, preference);
}
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
// resizing the hidl_vec to one less.
template <typename Type>
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
if (vec) {
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
vec->resize(vec->size() - 1);
}
}
template <typename Type>
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
// assume vec is valid
const uint32_t index = vec->size();
vec->resize(index + 1);
(*vec)[index] = value;
return index;
}
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
{
.type = OperandType::INT32,
.dimensions = {},
.numberOfConsumers = 0,
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_INPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
.type = OperandType::INT32,
.dimensions = {},
.numberOfConsumers = 0,
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_INPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
});
}
@@ -243,7 +223,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
case OperandType::TENSOR_QUANT8_ASYMM:
return {-1, 256};
case OperandType::TENSOR_QUANT8_SYMM:
return {-129, -1, 1, 128};
return {-129, -1, 1, 128};
case OperandType::TENSOR_QUANT16_ASYMM:
return {-1, 65536};
case OperandType::TENSOR_QUANT16_SYMM:
@@ -256,7 +236,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const std::vector<int32_t> invalidZeroPoints =
getInvalidZeroPoints(model.operands[operand].type);
getInvalidZeroPoints(model.operands[operand].type);
for (int32_t invalidZeroPoint : invalidZeroPoints) {
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
@@ -292,13 +272,13 @@ static void mutateOperand(Operand* operand, OperandType type) {
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_INT32:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
@@ -306,19 +286,20 @@ static void mutateOperand(Operand* operand, OperandType type) {
case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
break;
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
SymmPerChannelQuantParams channelQuant;
channelQuant.channelDim = 0;
channelQuant.scales = hidl_vec<float>(
operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0]) : 0);
operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0])
: 0);
for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
channelQuant.scales[i] = 1.0f;
}
@@ -435,7 +416,7 @@ static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& mode
std::to_string(invalidOperationType);
validate(device, message, model, [operation, invalidOperationType](Model* model) {
model->operations[operation].type =
static_cast<OperationType>(invalidOperationType);
static_cast<OperationType>(invalidOperationType);
});
}
}
@@ -690,7 +671,7 @@ static void addOperationInputTest(const sp<IDevice>& device, const Model& model)
static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
"addOperationOutputTest: operation " + std::to_string(operation);
"addOperationOutputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -702,14 +683,14 @@ static void addOperationOutputTest(const sp<IDevice>& device, const Model& model
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
static const int32_t invalidExecutionPreferences[] = {
static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
for (int32_t preference : invalidExecutionPreferences) {
const std::string message =
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
validate(device, message, model, [](Model*) {},
static_cast<ExecutionPreference>(preference));
}

View File

@@ -16,17 +16,18 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
#include "ExecutionBurstController.h"
#include "TestHarness.h"
#include "Utils.h"
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -137,26 +138,6 @@ static void validate(const sp<IPreparedModel>& preparedModel, const std::string&
}
}
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
// resizing the hidl_vec to one less.
template <typename Type>
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
if (vec) {
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
vec->resize(vec->size() - 1);
}
}
template <typename Type>
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
// assume vec is valid
const uint32_t index = vec->size();
vec->resize(index + 1);
(*vec)[index] = value;
return index;
}
///////////////////////// REMOVE INPUT ////////////////////////////////////
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
@@ -197,11 +178,13 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = INPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
RequestArgument arg_empty = {
.hasNoValue = true,
.hasNoValue = true,
};
inputs_info[index] = s ? arg : arg_empty;
inputSize += s;
@@ -219,8 +202,10 @@ std::vector<Request> createRequests(const std::vector<MixedTypedExample>& exampl
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
RequestArgument arg = {
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
.location = {.poolIndex = OUTPUT,
.offset = 0,
.length = static_cast<uint32_t>(s)},
.dimensions = {},
};
outputs_info[index] = arg;
outputSize += s;

View File

@@ -20,7 +20,7 @@
#include <android-base/logging.h>
#include "Callbacks.h"
#include "1.2/Callbacks.h"
namespace android {
namespace hardware {

View File

@@ -14,24 +14,23 @@
* limitations under the License.
*/
#ifndef VTS_HAL_NEURALNETWORKS_V1_2_H
#define VTS_HAL_NEURALNETWORKS_V1_2_H
#include "Callbacks.h"
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
#include <android-base/macros.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
#include <android-base/macros.h>
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include "1.2/Callbacks.h"
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -50,7 +49,7 @@ class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvB
NeuralnetworksHidlEnvironment();
~NeuralnetworksHidlEnvironment() override;
public:
public:
static NeuralnetworksHidlEnvironment* getInstance();
void registerTestServices() override;
};
@@ -59,30 +58,30 @@ class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvB
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
public:
public:
NeuralnetworksHidlTest();
~NeuralnetworksHidlTest() override;
void SetUp() override;
void TearDown() override;
protected:
protected:
sp<IDevice> device;
};
// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
protected:
void validateEverything(const Model& model, const std::vector<Request>& requests);
void validateFailure(const Model& model, const std::vector<Request>& requests);
protected:
void validateEverything(const Model& model, const std::vector<Request>& requests);
void validateFailure(const Model& model, const std::vector<Request>& requests);
private:
void validateModel(const Model& model);
void validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateBurst(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
private:
void validateModel(const Model& model);
void validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
void validateBurst(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests);
};
// Tag for the generated tests
@@ -93,7 +92,7 @@ class DynamicOutputShapeTest : public NeuralnetworksHidlTest {};
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_2(
const sp<V1_2::implementation::PreparedModelCallback>& callback);
const sp<V1_2::implementation::PreparedModelCallback>& callback);
} // namespace functional
} // namespace vts
@@ -110,4 +109,4 @@ namespace android::hardware::neuralnetworks::V1_0 {
} // namespace android::hardware::neuralnetworks::V1_0
#endif // VTS_HAL_NEURALNETWORKS_V1_2_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H

View File

@@ -14,14 +14,11 @@
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
#include <chrono>
#include <condition_variable>
@@ -60,7 +57,7 @@ using V1_0::ErrorStatus;
* std::condition_variable, or std::experimental::latch instead.
*/
class CallbackBase {
public:
public:
CallbackBase();
~CallbackBase();
@@ -79,8 +76,8 @@ class CallbackBase {
* before the time duration expired, std::cv_status::timeout
* otherwise.
*/
template<class Rep, class Period>
std::cv_status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
template <class Rep, class Period>
std::cv_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration);
/**
* CallbackBase::on_finish binds a function to the callback object. This
@@ -144,7 +141,7 @@ class CallbackBase {
*/
void join_thread();
protected:
protected:
/**
* CallbackBase::notify enables all prior and future wait* calls on the
* callback object to proceed. The call to CallbackBase::notify happens
@@ -158,16 +155,16 @@ class CallbackBase {
*/
void notify();
private:
private:
// Same as CallbackBase::join_thread but assumes we already hold a lock on
// mMutex.
void join_thread_locked();
bool mNotified;
std::mutex mMutex;
std::condition_variable mCondition;
bool mNotified;
std::mutex mMutex;
std::condition_variable mCondition;
std::function<bool(void)> mPostWork;
std::thread mThread;
std::thread mThread;
};
/**
@@ -185,7 +182,7 @@ class CallbackBase {
* IDevice::prepareModel.
*/
class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback {
public:
public:
PreparedModelCallback();
~PreparedModelCallback() override;
@@ -241,8 +238,8 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
*/
sp<V1_0::IPreparedModel> getPreparedModel();
private:
ErrorStatus mErrorStatus;
private:
ErrorStatus mErrorStatus;
sp<V1_0::IPreparedModel> mPreparedModel;
};
@@ -260,8 +257,8 @@ class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback
* IExecutionCallback. This callback object is passed as an argument to
* IPreparedModel::execute.
*/
class ExecutionCallback : public CallbackBase, public IExecutionCallback {
public:
class ExecutionCallback : public CallbackBase, public IExecutionCallback {
public:
ExecutionCallback();
~ExecutionCallback() override;
@@ -376,19 +373,19 @@ class ExecutionCallback : public CallbackBase, public IExecutionCallback {
*/
Timing getTiming();
private:
private:
ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
std::vector<OutputShape> mOutputShapes = {};
Timing mTiming = {};
};
// template function implementation(s) below this point
template<class Rep, class Period>
std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
template <class Rep, class Period>
std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep, Period>& timeout_duration) {
std::unique_lock<std::mutex> lock(mMutex);
std::cv_status status = mCondition.wait_for(lock, timeout_duration, [this]{return mNotified;});
std::cv_status status =
mCondition.wait_for(lock, timeout_duration, [this] { return mNotified; });
if (status != std::cv_status::timeout) {
join_thread_locked();
}
@@ -401,4 +398,4 @@ std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep,Period>& t
} // namespace hardware
} // namespace android
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H