mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 22:04:26 +00:00
Merge "Refactor NN API VTS tests and add v1.1 tests"
This commit is contained in:
committed by
Android (Google) Code Review
commit
e31495a29e
@@ -14,22 +14,49 @@
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_0TargetTest",
|
||||
cc_library_static {
|
||||
name: "VtsHalNeuralnetworksTest_utils",
|
||||
srcs: [
|
||||
"Callbacks.cpp",
|
||||
"GeneratedTestHarness.cpp",
|
||||
"Models.cpp",
|
||||
"VtsHalNeuralnetworksV1_0TargetTest.cpp",
|
||||
"GeneratedTestHarness.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
export_include_dirs: ["."],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_utils",
|
||||
],
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
}
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_0TargetTest",
|
||||
srcs: [
|
||||
"VtsHalNeuralnetworksV1_0.cpp",
|
||||
"VtsHalNeuralnetworksV1_0BasicTest.cpp",
|
||||
"VtsHalNeuralnetworksV1_0GeneratedTest.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_utils",
|
||||
"VtsHalNeuralnetworksTest_utils",
|
||||
],
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
|
||||
@@ -16,9 +16,15 @@
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
#include <iostream>
|
||||
@@ -26,11 +32,6 @@
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
// allocator helper
|
||||
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem");
|
||||
|
||||
namespace generated_tests {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
@@ -64,54 +65,10 @@ void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* sr
|
||||
|
||||
// Top level driver for models and examples generated by test_generator.py
|
||||
// Test driver for those generated from ml/nn/runtime/test/spec
|
||||
void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
|
||||
std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExampleType>& examples) {
|
||||
void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExampleType>& examples) {
|
||||
const uint32_t INPUT = 0;
|
||||
const uint32_t OUTPUT = 1;
|
||||
Model model = create_model();
|
||||
|
||||
// see if service can handle model
|
||||
ErrorStatus supportedStatus;
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedCall = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
supportedStatus = status;
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedCall.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
if (fullySupportsModel) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
} else {
|
||||
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
|
||||
prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
// early termination if vendor service cannot fully prepare model
|
||||
if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
int example_no = 1;
|
||||
for (auto& example : examples) {
|
||||
@@ -167,8 +124,8 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
|
||||
offset += i.location.length;
|
||||
}
|
||||
}
|
||||
std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
|
||||
allocateSharedMemory(outputSize)};
|
||||
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
|
||||
nn::allocateSharedMemory(outputSize)};
|
||||
ASSERT_NE(0ull, pools[INPUT].size());
|
||||
ASSERT_NE(0ull, pools[OUTPUT].size());
|
||||
|
||||
@@ -221,11 +178,107 @@ void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
|
||||
}
|
||||
}
|
||||
|
||||
void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
|
||||
std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExampleType>& examples) {
|
||||
V1_0::Model model = create_model();
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
ErrorStatus supportedStatus;
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
|
||||
Return<void> supportedCall = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
supportedStatus = status;
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedCall.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
if (fullySupportsModel) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
} else {
|
||||
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
|
||||
prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
// early termination if vendor service cannot fully prepare model
|
||||
if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples);
|
||||
}
|
||||
|
||||
void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
|
||||
std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExampleType>& examples) {
|
||||
V1_1::Model model = create_model();
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
ErrorStatus supportedStatus;
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
|
||||
Return<void> supportedCall = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
supportedStatus = status;
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedCall.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
if (fullySupportsModel) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
} else {
|
||||
EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
|
||||
prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
|
||||
}
|
||||
|
||||
// early termination if vendor service cannot fully prepare model
|
||||
if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples);
|
||||
}
|
||||
|
||||
} // namespace generated_tests
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
@@ -17,19 +17,22 @@
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
#include <vector>
|
||||
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// create a valid model
|
||||
Model createValidTestModel() {
|
||||
V1_1::Model createValidTestModel_1_1() {
|
||||
const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
|
||||
const uint32_t size = operand2Data.size() * sizeof(float);
|
||||
|
||||
@@ -103,39 +106,34 @@ Model createValidTestModel() {
|
||||
}
|
||||
|
||||
// create first invalid model
|
||||
Model createInvalidTestModel1() {
|
||||
Model model = createValidTestModel();
|
||||
V1_1::Model createInvalidTestModel1_1_1() {
|
||||
Model model = createValidTestModel_1_1();
|
||||
model.operations[0].type = static_cast<OperationType>(0xDEADBEEF); /* INVALID */
|
||||
return model;
|
||||
}
|
||||
|
||||
// create second invalid model
|
||||
Model createInvalidTestModel2() {
|
||||
Model model = createValidTestModel();
|
||||
V1_1::Model createInvalidTestModel2_1_1() {
|
||||
Model model = createValidTestModel_1_1();
|
||||
const uint32_t operand1 = 0;
|
||||
const uint32_t operand5 = 4; // INVALID OPERAND
|
||||
model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
|
||||
return model;
|
||||
}
|
||||
|
||||
// allocator helper
|
||||
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
|
||||
hidl_memory memory;
|
||||
V1_0::Model createValidTestModel_1_0() {
|
||||
V1_1::Model model = createValidTestModel_1_1();
|
||||
return nn::convertToV1_0(model);
|
||||
}
|
||||
|
||||
sp<IAllocator> allocator = IAllocator::getService(type);
|
||||
if (!allocator.get()) {
|
||||
return {};
|
||||
}
|
||||
V1_0::Model createInvalidTestModel1_1_0() {
|
||||
V1_1::Model model = createInvalidTestModel1_1_1();
|
||||
return nn::convertToV1_0(model);
|
||||
}
|
||||
|
||||
Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
|
||||
ASSERT_TRUE(success);
|
||||
memory = mem;
|
||||
});
|
||||
if (!ret.isOk()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return memory;
|
||||
V1_0::Model createInvalidTestModel2_1_0() {
|
||||
V1_1::Model model = createInvalidTestModel2_1_1();
|
||||
return nn::convertToV1_0(model);
|
||||
}
|
||||
|
||||
// create a valid request
|
||||
@@ -154,8 +152,8 @@ Request createValidTestRequest() {
|
||||
std::vector<RequestArgument> outputs = {{
|
||||
.location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
|
||||
}};
|
||||
std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
|
||||
allocateSharedMemory(outputSize)};
|
||||
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
|
||||
nn::allocateSharedMemory(outputSize)};
|
||||
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
|
||||
return {};
|
||||
}
|
||||
@@ -199,9 +197,6 @@ Request createInvalidTestRequest2() {
|
||||
return request;
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
@@ -16,28 +16,27 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// create the model
|
||||
Model createValidTestModel();
|
||||
Model createInvalidTestModel1();
|
||||
Model createInvalidTestModel2();
|
||||
// create V1_1 model
|
||||
V1_1::Model createValidTestModel_1_1();
|
||||
V1_1::Model createInvalidTestModel1_1_1();
|
||||
V1_1::Model createInvalidTestModel2_1_1();
|
||||
|
||||
// create V1_0 model
|
||||
V1_0::Model createValidTestModel_1_0();
|
||||
V1_0::Model createInvalidTestModel1_1_0();
|
||||
V1_0::Model createInvalidTestModel2_1_0();
|
||||
|
||||
// create the request
|
||||
Request createValidTestRequest();
|
||||
Request createInvalidTestRequest1();
|
||||
Request createInvalidTestRequest2();
|
||||
V1_0::Request createValidTestRequest();
|
||||
V1_0::Request createInvalidTestRequest1();
|
||||
V1_0::Request createInvalidTestRequest2();
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// allocator helper
|
||||
hidl_memory allocateSharedMemory(int64_t size) {
|
||||
return nn::allocateSharedMemory(size);
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
// This has to return a "new" object because it is freed inside
|
||||
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
|
||||
static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
|
||||
return instance;
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlEnvironment::registerTestServices() {
|
||||
registerTestService<V1_0::IDevice>();
|
||||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<V1_0::IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
ASSERT_NE(nullptr, device.get());
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Android Open Source Project
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -29,23 +29,6 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <string>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperationType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
@@ -53,6 +36,8 @@ namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
hidl_memory allocateSharedMemory(int64_t size);
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
NeuralnetworksHidlEnvironment();
|
||||
@@ -74,25 +59,22 @@ class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
sp<IPreparedModel> doPrepareModelShortcut();
|
||||
|
||||
sp<IDevice> device;
|
||||
sp<V1_0::IDevice> device;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
// pretty-print values for error messages
|
||||
|
||||
template<typename CharT, typename Traits>
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
ErrorStatus errorStatus) {
|
||||
V1_0::ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
template<typename CharT, typename Traits>
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
DeviceStatus deviceStatus) {
|
||||
V1_0::DeviceStatus deviceStatus) {
|
||||
return os << toString(deviceStatus);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Android Open Source Project
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
|
||||
#include "VtsHalNeuralnetworksV1_0.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Models.h"
|
||||
@@ -26,51 +26,34 @@
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperationType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::generated_tests::MixedTypedExampleType;
|
||||
|
||||
namespace generated_tests {
|
||||
extern void Execute(const sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
|
||||
const std::vector<MixedTypedExampleType>&);
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
// This has to return a "new" object because it is freed inside
|
||||
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
|
||||
static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
|
||||
return instance;
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlEnvironment::registerTestServices() {
|
||||
registerTestService<IDevice>();
|
||||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
ASSERT_NE(nullptr, device.get());
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {}
|
||||
|
||||
sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut() {
|
||||
Model model = createValidTestModel();
|
||||
inline sp<IPreparedModel> doPrepareModelShortcut(sp<IDevice>& device) {
|
||||
Model model = createValidTestModel_1_0();
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
if (preparedModelCallback == nullptr) {
|
||||
@@ -116,7 +99,7 @@ TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
|
||||
// supported operations positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
|
||||
Model model = createValidTestModel();
|
||||
Model model = createValidTestModel_1_0();
|
||||
Return<void> ret = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
@@ -127,7 +110,7 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
|
||||
|
||||
// supported operations negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
|
||||
Model model = createInvalidTestModel1();
|
||||
Model model = createInvalidTestModel1_1_0();
|
||||
Return<void> ret = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
@@ -138,7 +121,7 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
|
||||
|
||||
// supported operations negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
|
||||
Model model = createInvalidTestModel2();
|
||||
Model model = createInvalidTestModel2_1_0();
|
||||
Return<void> ret = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
@@ -149,7 +132,7 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
|
||||
|
||||
// prepare simple model positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
|
||||
Model model = createValidTestModel();
|
||||
Model model = createValidTestModel_1_0();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
@@ -165,7 +148,7 @@ TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
|
||||
|
||||
// prepare simple model negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
|
||||
Model model = createInvalidTestModel1();
|
||||
Model model = createInvalidTestModel1_1_0();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
@@ -181,7 +164,7 @@ TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
|
||||
|
||||
// prepare simple model negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
|
||||
Model model = createInvalidTestModel2();
|
||||
Model model = createInvalidTestModel2_1_0();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
@@ -201,7 +184,7 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
|
||||
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
Request request = createValidTestRequest();
|
||||
|
||||
@@ -235,7 +218,7 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
|
||||
|
||||
// execute simple graph negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
Request request = createInvalidTestRequest1();
|
||||
|
||||
@@ -252,7 +235,7 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
|
||||
|
||||
// execute simple graph negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut();
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
Request request = createInvalidTestRequest2();
|
||||
|
||||
@@ -267,16 +250,6 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
// Mixed-typed examples
|
||||
typedef MixedTypedExampleType MixedTypedExample;
|
||||
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_vts_tests.cpp"
|
||||
|
||||
// TODO: Add tests for execution failure, or wait_for/wait_until timeout.
|
||||
// Discussion:
|
||||
// https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperationType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::generated_tests::MixedTypedExampleType;
|
||||
extern void Execute(sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
|
||||
const std::vector<MixedTypedExampleType>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
// Mixed-typed examples
|
||||
typedef generated_tests::MixedTypedExampleType MixedTypedExample;
|
||||
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
10
neuralnetworks/1.1/vts/OWNERS
Normal file
10
neuralnetworks/1.1/vts/OWNERS
Normal file
@@ -0,0 +1,10 @@
|
||||
# Neuralnetworks team
|
||||
butlermichael@google.com
|
||||
dgross@google.com
|
||||
jeanluc@google.com
|
||||
miaowang@google.com
|
||||
yangni@google.com
|
||||
|
||||
# VTS team
|
||||
yim@google.com
|
||||
yuexima@google.com
|
||||
39
neuralnetworks/1.1/vts/functional/Android.bp
Normal file
39
neuralnetworks/1.1/vts/functional/Android.bp
Normal file
@@ -0,0 +1,39 @@
|
||||
//
|
||||
// Copyright (C) 2018 The Android Open Source Project
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_1TargetTest",
|
||||
srcs: [
|
||||
"VtsHalNeuralnetworksV1_1.cpp",
|
||||
"VtsHalNeuralnetworksV1_1BasicTest.cpp",
|
||||
"VtsHalNeuralnetworksV1_1GeneratedTest.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_utils",
|
||||
"VtsHalNeuralnetworksTest_utils",
|
||||
],
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_1.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// allocator helper
|
||||
hidl_memory allocateSharedMemory(int64_t size) {
|
||||
return nn::allocateSharedMemory(size);
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
// This has to return a "new" object because it is freed inside
|
||||
// ::testing::AddGlobalTestEnvironment when the gtest is being torn down
|
||||
static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
|
||||
return instance;
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlEnvironment::registerTestServices() {
|
||||
registerTestService<V1_1::IDevice>();
|
||||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<V1_1::IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
ASSERT_NE(nullptr, device.get());
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
85
neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h
Normal file
85
neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_1_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
|
||||
#include <VtsHalHidlTargetTestBase.h>
|
||||
#include <VtsHalHidlTargetTestEnvBase.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <string>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
hidl_memory allocateSharedMemory(int64_t size);
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
NeuralnetworksHidlEnvironment();
|
||||
NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete;
|
||||
NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
|
||||
NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
|
||||
NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
|
||||
|
||||
public:
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
static NeuralnetworksHidlEnvironment* getInstance();
|
||||
void registerTestServices() override;
|
||||
};
|
||||
|
||||
// The main test class for NEURALNETWORKS HIDL HAL.
|
||||
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
||||
public:
|
||||
~NeuralnetworksHidlTest() override;
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
sp<V1_1::IDevice> device;
|
||||
};
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
// pretty-print values for error messages
|
||||
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
V1_0::ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
V1_0::DeviceStatus deviceStatus) {
|
||||
return os << toString(deviceStatus);
|
||||
}
|
||||
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
#endif // VTS_HAL_NEURALNETWORKS_V1_1_H
|
||||
@@ -0,0 +1,280 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_1.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Models.h"
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Operand;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_1::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Operation;
|
||||
using ::android::hardware::neuralnetworks::V1_1::OperationType;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
inline sp<IPreparedModel> doPrepareModelShortcut(sp<IDevice>& device) {
|
||||
Model model = createValidTestModel_1_1();
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
if (preparedModelCallback == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return preparedModel;
|
||||
}
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
||||
// status test
|
||||
TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
||||
Return<DeviceStatus> status = device->getStatus();
|
||||
ASSERT_TRUE(status.isOk());
|
||||
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
|
||||
}
|
||||
|
||||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
|
||||
Model model = createValidTestModel_1_1();
|
||||
Return<void> ret = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_EQ(model.operations.size(), supported.size());
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
|
||||
Model model = createInvalidTestModel1_1_1();
|
||||
Return<void> ret = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
(void)supported;
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
|
||||
Model model = createInvalidTestModel2_1_1();
|
||||
Return<void> ret = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
(void)supported;
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// prepare simple model positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
|
||||
Model model = createValidTestModel_1_1();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_NE(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// prepare simple model negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
|
||||
Model model = createInvalidTestModel1_1_1();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// prepare simple model negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
|
||||
Model model = createInvalidTestModel2_1_1();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// execute simple graph positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
|
||||
std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
|
||||
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
Request request = createValidTestRequest();
|
||||
|
||||
auto postWork = [&] {
|
||||
sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
|
||||
if (outputMemory == nullptr) {
|
||||
return false;
|
||||
}
|
||||
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
|
||||
if (outputPtr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
outputMemory->read();
|
||||
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
|
||||
outputMemory->commit();
|
||||
return true;
|
||||
};
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
executionCallback->on_finish(postWork);
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
|
||||
EXPECT_EQ(expectedData, outputData);
|
||||
}
|
||||
|
||||
// execute simple graph negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
Request request = createInvalidTestRequest1();
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
// execute simple graph negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
|
||||
sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
|
||||
ASSERT_NE(nullptr, preparedModel.get());
|
||||
Request request = createInvalidTestRequest2();
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
|
||||
|
||||
int status = RUN_ALL_TESTS();
|
||||
return status;
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_1.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Operand;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_1::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Operation;
|
||||
using ::android::hardware::neuralnetworks::V1_1::OperationType;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::generated_tests::MixedTypedExampleType;
|
||||
extern void Execute(sp<V1_1::IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
|
||||
const std::vector<MixedTypedExampleType>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
// Mixed-typed examples
|
||||
typedef generated_tests::MixedTypedExampleType MixedTypedExample;
|
||||
|
||||
// in frameworks/ml/nn/runtime/tests/generated/
|
||||
#include "all_generated_V1_0_vts_tests.cpp"
|
||||
#include "all_generated_V1_1_vts_tests.cpp"
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
Reference in New Issue
Block a user