mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-02 06:22:53 +00:00
Merge changes from topic "nnapi-aosp-sync"
* changes: NNAPI: Add execution preference to prepareModel (HAL) Sync NNAPI Operand and Operation documentation fixes Fix the spec for TENSOR_QUANT8_ASYMM to match our validation. Fix the NNAPI HAL documentation about ADD and MUL NN validation tests Add validation tests for consistency of model inputs and outputs. Disable arm32 asan for VtsHalNeuralnetworksV1_1TargetTest
This commit is contained in:
@@ -238,15 +238,16 @@ a432d6d9200248dc2126827bcd6cdea31dd65eff39b939f64585d27d915a5857 android.hardwar
|
||||
619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler
|
||||
0a159f81359cd4f71bbe00972ee8403ea79351fb7c0cd48be72ebb3e424dbaef android.hardware.radio@1.0::types
|
||||
09342041e17c429fce0034b9096d17849122111436a5f0053e7e59500e1cb89c android.hardware.media.omx@1.0::IOmxStore
|
||||
246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
|
||||
93eb3757ceaf21590fa4cd1d4a7dfe3b3794af5396100a6d25630879352abce9 android.hardware.neuralnetworks@1.0::IDevice
|
||||
f66f9a38541bf92001d3adcce678cd7e3da2262124befb460b1c9aea9492813b android.hardware.neuralnetworks@1.0::IExecutionCallback
|
||||
953607822954435874f4b81686440a604e2a88cdd2d9164c6293f3d5772510d7 android.hardware.neuralnetworks@1.0::IPreparedModel
|
||||
73e03573494ba96f0e711ab7f1956c5b2d54c3da690cd7ecf4d6d0f287447730 android.hardware.neuralnetworks@1.0::IPreparedModelCallback
|
||||
246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
|
||||
f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal
|
||||
c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback
|
||||
|
||||
# Future changes to HALs
|
||||
5804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice
|
||||
088b30a9c9ce27bc955b08a03c38c208f8f65b51133053c7656c875479801b99 android.hardware.neuralnetworks@1.0::types
|
||||
12e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback
|
||||
702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ interface IExecutionCallback {
|
||||
* ErrorStatus resulting from the execution. If the asynchronous task
|
||||
* is not launched, notify must be invoked with the appropriate error.
|
||||
*
|
||||
* @return param Error status returned from launching the asynchronous task
|
||||
* @param status Error status returned from launching the asynchronous task
|
||||
* (if the launch fails) or from the asynchronous task itself
|
||||
* (if the launch succeeds). Must be:
|
||||
* - NONE if the asynchronous execution was successful
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,6 @@ cc_library_static {
|
||||
name: "VtsHalNeuralnetworksTest_utils",
|
||||
srcs: [
|
||||
"Callbacks.cpp",
|
||||
"Models.cpp",
|
||||
"GeneratedTestHarness.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
@@ -41,14 +40,17 @@ cc_library_static {
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_0TargetTest",
|
||||
srcs: [
|
||||
"VtsHalNeuralnetworksV1_0.cpp",
|
||||
"VtsHalNeuralnetworksV1_0BasicTest.cpp",
|
||||
"VtsHalNeuralnetworksV1_0GeneratedTest.cpp",
|
||||
"BasicTests.cpp",
|
||||
"GeneratedTests.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"ValidationTests.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hidl.allocator@1.0",
|
||||
"android.hidl.memory@1.0",
|
||||
"libhidlmemory",
|
||||
|
||||
56
neuralnetworks/1.0/vts/functional/BasicTests.cpp
Normal file
56
neuralnetworks/1.0/vts/functional/BasicTests.cpp
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
||||
// status test
|
||||
TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
||||
Return<DeviceStatus> status = device->getStatus();
|
||||
ASSERT_TRUE(status.isOk());
|
||||
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
|
||||
}
|
||||
|
||||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -17,14 +17,6 @@ namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace implementation {
|
||||
|
||||
using ::android::hardware::hidl_array;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::sp;
|
||||
|
||||
/**
|
||||
* The CallbackBase class is used internally by the NeuralNetworks runtime to
|
||||
* synchronize between different threads. An asynchronous task is launched
|
||||
|
||||
@@ -179,7 +179,7 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
|
||||
}
|
||||
}
|
||||
|
||||
void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
|
||||
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
|
||||
std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExampleType>& examples) {
|
||||
V1_0::Model model = create_model();
|
||||
@@ -223,7 +223,7 @@ void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_
|
||||
EvaluatePreparedModel(preparedModel, is_ignored, examples);
|
||||
}
|
||||
|
||||
void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
|
||||
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
|
||||
std::function<bool(int)> is_ignored,
|
||||
const std::vector<MixedTypedExampleType>& examples) {
|
||||
V1_1::Model model = create_model();
|
||||
@@ -242,8 +242,8 @@ void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
|
||||
@@ -16,47 +16,33 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperationType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::generated_tests::MixedTypedExampleType;
|
||||
extern void Execute(sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
|
||||
const std::vector<MixedTypedExampleType>&);
|
||||
extern void Execute(const sp<V1_0::IDevice>&, std::function<V1_0::Model(void)>,
|
||||
std::function<bool(int)>, const std::vector<MixedTypedExampleType>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
|
||||
// Mixed-typed examples
|
||||
typedef generated_tests::MixedTypedExampleType MixedTypedExample;
|
||||
@@ -1,202 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
#include <vector>
|
||||
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
// create a valid model
|
||||
V1_1::Model createValidTestModel_1_1() {
|
||||
const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
|
||||
const uint32_t size = operand2Data.size() * sizeof(float);
|
||||
|
||||
const uint32_t operand1 = 0;
|
||||
const uint32_t operand2 = 1;
|
||||
const uint32_t operand3 = 2;
|
||||
const uint32_t operand4 = 3;
|
||||
|
||||
const std::vector<Operand> operands = {
|
||||
{
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1, 2, 2, 1},
|
||||
.numberOfConsumers = 1,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::MODEL_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
{
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1, 2, 2, 1},
|
||||
.numberOfConsumers = 1,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::CONSTANT_COPY,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = size},
|
||||
},
|
||||
{
|
||||
.type = OperandType::INT32,
|
||||
.dimensions = {},
|
||||
.numberOfConsumers = 1,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::CONSTANT_COPY,
|
||||
.location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
|
||||
},
|
||||
{
|
||||
.type = OperandType::TENSOR_FLOAT32,
|
||||
.dimensions = {1, 2, 2, 1},
|
||||
.numberOfConsumers = 0,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::MODEL_OUTPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
},
|
||||
};
|
||||
|
||||
const std::vector<Operation> operations = {{
|
||||
.type = OperationType::ADD, .inputs = {operand1, operand2, operand3}, .outputs = {operand4},
|
||||
}};
|
||||
|
||||
const std::vector<uint32_t> inputIndexes = {operand1};
|
||||
const std::vector<uint32_t> outputIndexes = {operand4};
|
||||
std::vector<uint8_t> operandValues(
|
||||
reinterpret_cast<const uint8_t*>(operand2Data.data()),
|
||||
reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
|
||||
int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
|
||||
operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
|
||||
reinterpret_cast<const uint8_t*>(&activation[1]));
|
||||
|
||||
const std::vector<hidl_memory> pools = {};
|
||||
|
||||
return {
|
||||
.operands = operands,
|
||||
.operations = operations,
|
||||
.inputIndexes = inputIndexes,
|
||||
.outputIndexes = outputIndexes,
|
||||
.operandValues = operandValues,
|
||||
.pools = pools,
|
||||
};
|
||||
}
|
||||
|
||||
// create first invalid model
|
||||
V1_1::Model createInvalidTestModel1_1_1() {
|
||||
Model model = createValidTestModel_1_1();
|
||||
model.operations[0].type = static_cast<OperationType>(0xDEADBEEF); /* INVALID */
|
||||
return model;
|
||||
}
|
||||
|
||||
// create second invalid model
|
||||
V1_1::Model createInvalidTestModel2_1_1() {
|
||||
Model model = createValidTestModel_1_1();
|
||||
const uint32_t operand1 = 0;
|
||||
const uint32_t operand5 = 4; // INVALID OPERAND
|
||||
model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
|
||||
return model;
|
||||
}
|
||||
|
||||
V1_0::Model createValidTestModel_1_0() {
|
||||
V1_1::Model model = createValidTestModel_1_1();
|
||||
return nn::convertToV1_0(model);
|
||||
}
|
||||
|
||||
V1_0::Model createInvalidTestModel1_1_0() {
|
||||
V1_1::Model model = createInvalidTestModel1_1_1();
|
||||
return nn::convertToV1_0(model);
|
||||
}
|
||||
|
||||
V1_0::Model createInvalidTestModel2_1_0() {
|
||||
V1_1::Model model = createInvalidTestModel2_1_1();
|
||||
return nn::convertToV1_0(model);
|
||||
}
|
||||
|
||||
// create a valid request
|
||||
Request createValidTestRequest() {
|
||||
std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
|
||||
std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
|
||||
const uint32_t INPUT = 0;
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
// prepare inputs
|
||||
uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
|
||||
uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
|
||||
std::vector<RequestArgument> inputs = {{
|
||||
.location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
|
||||
}};
|
||||
std::vector<RequestArgument> outputs = {{
|
||||
.location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
|
||||
}};
|
||||
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
|
||||
nn::allocateSharedMemory(outputSize)};
|
||||
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// load data
|
||||
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
|
||||
sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
|
||||
if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) {
|
||||
return {};
|
||||
}
|
||||
float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
|
||||
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
|
||||
if (inputPtr == nullptr || outputPtr == nullptr) {
|
||||
return {};
|
||||
}
|
||||
inputMemory->update();
|
||||
outputMemory->update();
|
||||
std::copy(inputData.begin(), inputData.end(), inputPtr);
|
||||
std::copy(outputData.begin(), outputData.end(), outputPtr);
|
||||
inputMemory->commit();
|
||||
outputMemory->commit();
|
||||
|
||||
return {.inputs = inputs, .outputs = outputs, .pools = pools};
|
||||
}
|
||||
|
||||
// create first invalid request
|
||||
Request createInvalidTestRequest1() {
|
||||
Request request = createValidTestRequest();
|
||||
const uint32_t INVALID = 2;
|
||||
std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
|
||||
uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
|
||||
request.inputs[0].location = {
|
||||
.poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize};
|
||||
return request;
|
||||
}
|
||||
|
||||
// create second invalid request
|
||||
Request createInvalidTestRequest2() {
|
||||
Request request = createValidTestRequest();
|
||||
request.inputs[0].dimensions = std::vector<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */);
|
||||
return request;
|
||||
}
|
||||
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Android Open Source Project
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,29 +14,187 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// create V1_1 model
|
||||
V1_1::Model createValidTestModel_1_1();
|
||||
V1_1::Model createInvalidTestModel1_1_1();
|
||||
V1_1::Model createInvalidTestModel2_1_1();
|
||||
using MixedTypedExample = generated_tests::MixedTypedExampleType;
|
||||
|
||||
// create V1_0 model
|
||||
V1_0::Model createValidTestModel_1_0();
|
||||
V1_0::Model createInvalidTestModel1_1_0();
|
||||
V1_0::Model createInvalidTestModel2_1_0();
|
||||
#define FOR_EACH_TEST_MODEL(FN) \
|
||||
FN(add_broadcast_quant8) \
|
||||
FN(add) \
|
||||
FN(add_quant8) \
|
||||
FN(avg_pool_float_1) \
|
||||
FN(avg_pool_float_2) \
|
||||
FN(avg_pool_float_3) \
|
||||
FN(avg_pool_float_4) \
|
||||
FN(avg_pool_float_5) \
|
||||
FN(avg_pool_quant8_1) \
|
||||
FN(avg_pool_quant8_2) \
|
||||
FN(avg_pool_quant8_3) \
|
||||
FN(avg_pool_quant8_4) \
|
||||
FN(avg_pool_quant8_5) \
|
||||
FN(concat_float_1) \
|
||||
FN(concat_float_2) \
|
||||
FN(concat_float_3) \
|
||||
FN(concat_quant8_1) \
|
||||
FN(concat_quant8_2) \
|
||||
FN(concat_quant8_3) \
|
||||
FN(conv_1_h3_w2_SAME) \
|
||||
FN(conv_1_h3_w2_VALID) \
|
||||
FN(conv_3_h3_w2_SAME) \
|
||||
FN(conv_3_h3_w2_VALID) \
|
||||
FN(conv_float_2) \
|
||||
FN(conv_float_channels) \
|
||||
FN(conv_float_channels_weights_as_inputs) \
|
||||
FN(conv_float_large) \
|
||||
FN(conv_float_large_weights_as_inputs) \
|
||||
FN(conv_float) \
|
||||
FN(conv_float_weights_as_inputs) \
|
||||
FN(conv_quant8_2) \
|
||||
FN(conv_quant8_channels) \
|
||||
FN(conv_quant8_channels_weights_as_inputs) \
|
||||
FN(conv_quant8_large) \
|
||||
FN(conv_quant8_large_weights_as_inputs) \
|
||||
FN(conv_quant8) \
|
||||
FN(conv_quant8_overflow) \
|
||||
FN(conv_quant8_overflow_weights_as_inputs) \
|
||||
FN(conv_quant8_weights_as_inputs) \
|
||||
FN(depth_to_space_float_1) \
|
||||
FN(depth_to_space_float_2) \
|
||||
FN(depth_to_space_float_3) \
|
||||
FN(depth_to_space_quant8_1) \
|
||||
FN(depth_to_space_quant8_2) \
|
||||
FN(depthwise_conv2d_float_2) \
|
||||
FN(depthwise_conv2d_float_large_2) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8_2) \
|
||||
FN(depthwise_conv2d_quant8_large) \
|
||||
FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8) \
|
||||
FN(depthwise_conv2d_quant8_weights_as_inputs) \
|
||||
FN(depthwise_conv) \
|
||||
FN(dequantize) \
|
||||
FN(embedding_lookup) \
|
||||
FN(floor) \
|
||||
FN(fully_connected_float_2) \
|
||||
FN(fully_connected_float_large) \
|
||||
FN(fully_connected_float_large_weights_as_inputs) \
|
||||
FN(fully_connected_float) \
|
||||
FN(fully_connected_float_weights_as_inputs) \
|
||||
FN(fully_connected_quant8_2) \
|
||||
FN(fully_connected_quant8_large) \
|
||||
FN(fully_connected_quant8_large_weights_as_inputs) \
|
||||
FN(fully_connected_quant8) \
|
||||
FN(fully_connected_quant8_weights_as_inputs) \
|
||||
FN(hashtable_lookup_float) \
|
||||
FN(hashtable_lookup_quant8) \
|
||||
FN(l2_normalization_2) \
|
||||
FN(l2_normalization_large) \
|
||||
FN(l2_normalization) \
|
||||
FN(l2_pool_float_2) \
|
||||
FN(l2_pool_float_large) \
|
||||
FN(l2_pool_float) \
|
||||
FN(local_response_norm_float_1) \
|
||||
FN(local_response_norm_float_2) \
|
||||
FN(local_response_norm_float_3) \
|
||||
FN(local_response_norm_float_4) \
|
||||
FN(logistic_float_1) \
|
||||
FN(logistic_float_2) \
|
||||
FN(logistic_quant8_1) \
|
||||
FN(logistic_quant8_2) \
|
||||
FN(lsh_projection_2) \
|
||||
FN(lsh_projection) \
|
||||
FN(lsh_projection_weights_as_inputs) \
|
||||
FN(lstm2) \
|
||||
FN(lstm2_state2) \
|
||||
FN(lstm2_state) \
|
||||
FN(lstm3) \
|
||||
FN(lstm3_state2) \
|
||||
FN(lstm3_state3) \
|
||||
FN(lstm3_state) \
|
||||
FN(lstm) \
|
||||
FN(lstm_state2) \
|
||||
FN(lstm_state) \
|
||||
FN(max_pool_float_1) \
|
||||
FN(max_pool_float_2) \
|
||||
FN(max_pool_float_3) \
|
||||
FN(max_pool_float_4) \
|
||||
FN(max_pool_quant8_1) \
|
||||
FN(max_pool_quant8_2) \
|
||||
FN(max_pool_quant8_3) \
|
||||
FN(max_pool_quant8_4) \
|
||||
FN(mobilenet_224_gender_basic_fixed) \
|
||||
FN(mobilenet_quantized) \
|
||||
FN(mul_broadcast_quant8) \
|
||||
FN(mul) \
|
||||
FN(mul_quant8) \
|
||||
FN(mul_relu) \
|
||||
FN(relu1_float_1) \
|
||||
FN(relu1_float_2) \
|
||||
FN(relu1_quant8_1) \
|
||||
FN(relu1_quant8_2) \
|
||||
FN(relu6_float_1) \
|
||||
FN(relu6_float_2) \
|
||||
FN(relu6_quant8_1) \
|
||||
FN(relu6_quant8_2) \
|
||||
FN(relu_float_1) \
|
||||
FN(relu_float_2) \
|
||||
FN(relu_quant8_1) \
|
||||
FN(relu_quant8_2) \
|
||||
FN(reshape) \
|
||||
FN(reshape_quant8) \
|
||||
FN(reshape_quant8_weights_as_inputs) \
|
||||
FN(reshape_weights_as_inputs) \
|
||||
FN(resize_bilinear_2) \
|
||||
FN(resize_bilinear) \
|
||||
FN(rnn) \
|
||||
FN(rnn_state) \
|
||||
FN(softmax_float_1) \
|
||||
FN(softmax_float_2) \
|
||||
FN(softmax_quant8_1) \
|
||||
FN(softmax_quant8_2) \
|
||||
FN(space_to_depth_float_1) \
|
||||
FN(space_to_depth_float_2) \
|
||||
FN(space_to_depth_float_3) \
|
||||
FN(space_to_depth_quant8_1) \
|
||||
FN(space_to_depth_quant8_2) \
|
||||
FN(svdf2) \
|
||||
FN(svdf) \
|
||||
FN(svdf_state) \
|
||||
FN(tanh)
|
||||
|
||||
// create the request
|
||||
V1_0::Request createValidTestRequest();
|
||||
V1_0::Request createInvalidTestRequest1();
|
||||
V1_0::Request createInvalidTestRequest2();
|
||||
#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
|
||||
namespace function { \
|
||||
extern std::vector<MixedTypedExample> examples; \
|
||||
Model createTestModel(); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
|
||||
|
||||
#undef FORWARD_DECLARE_GENERATED_OBJECTS
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
#endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
|
||||
|
||||
506
neuralnetworks/1.0/vts/functional/ValidateModel.cpp
Normal file
506
neuralnetworks/1.0/vts/functional/ValidateModel.cpp
Normal file
@@ -0,0 +1,506 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
|
||||
const V1_0::Model& model) {
|
||||
SCOPED_TRACE(message + " [getSupportedOperations]");
|
||||
|
||||
Return<void> ret =
|
||||
device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
|
||||
const V1_0::Model& model) {
|
||||
SCOPED_TRACE(message + " [prepareModel]");
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid model, apply a
|
||||
// mutation to it to invalidate the model, then pass it to interface calls that
|
||||
// use the model. Note that the model here is passed by value, and any mutation
|
||||
// to the model does not leave this function.
|
||||
static void validate(const sp<IDevice>& device, const std::string& message, V1_0::Model model,
|
||||
const std::function<void(Model*)>& mutation) {
|
||||
mutation(&model);
|
||||
validateGetSupportedOperations(device, message, model);
|
||||
validatePrepareModel(device, message, model);
|
||||
}
|
||||
|
||||
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
|
||||
// so this is efficiently accomplished by moving the element to the end and
|
||||
// resizing the hidl_vec to one less.
|
||||
template <typename Type>
|
||||
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
|
||||
if (vec) {
|
||||
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
|
||||
vec->resize(vec->size() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
|
||||
// assume vec is valid
|
||||
const uint32_t index = vec->size();
|
||||
vec->resize(index + 1);
|
||||
(*vec)[index] = value;
|
||||
return index;
|
||||
}
|
||||
|
||||
static uint32_t addOperand(Model* model) {
|
||||
return hidl_vec_push_back(&model->operands,
|
||||
{
|
||||
.type = OperandType::INT32,
|
||||
.dimensions = {},
|
||||
.numberOfConsumers = 0,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::MODEL_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
});
|
||||
}
|
||||
|
||||
static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
|
||||
uint32_t index = addOperand(model);
|
||||
model->operands[index].numberOfConsumers = 1;
|
||||
model->operands[index].lifetime = lifetime;
|
||||
return index;
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
|
||||
|
||||
static const int32_t invalidOperandTypes[] = {
|
||||
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
|
||||
};
|
||||
|
||||
static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
for (int32_t invalidOperandType : invalidOperandTypes) {
|
||||
const std::string message = "mutateOperandTypeTest: operand " +
|
||||
std::to_string(operand) + " set to value " +
|
||||
std::to_string(invalidOperandType);
|
||||
validate(device, message, model, [operand, invalidOperandType](Model* model) {
|
||||
model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE OPERAND RANK /////////////////////////
|
||||
|
||||
static uint32_t getInvalidRank(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
return 1;
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mutateOperandRankTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
|
||||
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
|
||||
" has rank of " + std::to_string(invalidRank);
|
||||
validate(device, message, model, [operand, invalidRank](Model* model) {
|
||||
model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE OPERAND SCALE /////////////////////////
|
||||
|
||||
static float getInvalidScale(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
return 1.0f;
|
||||
case OperandType::TENSOR_INT32:
|
||||
return -1.0f;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return 0.0f;
|
||||
default:
|
||||
return 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const float invalidScale = getInvalidScale(model.operands[operand].type);
|
||||
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
|
||||
" has scale of " + std::to_string(invalidScale);
|
||||
validate(device, message, model, [operand, invalidScale](Model* model) {
|
||||
model->operands[operand].scale = invalidScale;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE OPERAND ZERO POINT /////////////////////////
|
||||
|
||||
static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
return {1};
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return {-1, 256};
|
||||
default:
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const std::vector<int32_t> invalidZeroPoints =
|
||||
getInvalidZeroPoints(model.operands[operand].type);
|
||||
for (int32_t invalidZeroPoint : invalidZeroPoints) {
|
||||
const std::string message = "mutateOperandZeroPointTest: operand " +
|
||||
std::to_string(operand) + " has zero point of " +
|
||||
std::to_string(invalidZeroPoint);
|
||||
validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
|
||||
model->operands[operand].zeroPoint = invalidZeroPoint;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
|
||||
|
||||
// TODO: Operand::lifetime
|
||||
// TODO: Operand::location
|
||||
|
||||
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
|
||||
|
||||
static void mutateOperand(Operand* operand, OperandType type) {
|
||||
Operand newOperand = *operand;
|
||||
newOperand.type = type;
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
newOperand.dimensions = hidl_vec<uint32_t>();
|
||||
newOperand.scale = 0.0f;
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = 0.0f;
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_INT32:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
|
||||
break;
|
||||
case OperandType::OEM:
|
||||
case OperandType::TENSOR_OEM_BYTE:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
*operand = newOperand;
|
||||
}
|
||||
|
||||
static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) {
|
||||
// LSH_PROJECTION's second argument is allowed to have any type. This is the
|
||||
// only operation that currently has a type that can be anything independent
|
||||
// from any other type. Changing the operand type to any other type will
|
||||
// result in a valid model for LSH_PROJECTION. If this is the case, skip the
|
||||
// test.
|
||||
for (const Operation& operation : model.operations) {
|
||||
if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
if (mutateOperationOperandTypeSkip(operand, model)) {
|
||||
continue;
|
||||
}
|
||||
for (OperandType invalidOperandType : hidl_enum_iterator<OperandType>{}) {
|
||||
// Do not test OEM types
|
||||
if (invalidOperandType == model.operands[operand].type ||
|
||||
invalidOperandType == OperandType::OEM ||
|
||||
invalidOperandType == OperandType::TENSOR_OEM_BYTE) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "mutateOperationOperandTypeTest: operand " +
|
||||
std::to_string(operand) + " set to type " +
|
||||
toString(invalidOperandType);
|
||||
validate(device, message, model, [operand, invalidOperandType](Model* model) {
|
||||
mutateOperand(&model->operands[operand], invalidOperandType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
|
||||
|
||||
static const int32_t invalidOperationTypes[] = {
|
||||
static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
|
||||
};
|
||||
|
||||
static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (int32_t invalidOperationType : invalidOperationTypes) {
|
||||
const std::string message = "mutateOperationTypeTest: operation " +
|
||||
std::to_string(operation) + " set to value " +
|
||||
std::to_string(invalidOperationType);
|
||||
validate(device, message, model, [operation, invalidOperationType](Model* model) {
|
||||
model->operations[operation].type =
|
||||
static_cast<OperationType>(invalidOperationType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
|
||||
|
||||
static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
|
||||
const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const uint32_t invalidOperand = model.operands.size();
|
||||
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
||||
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
|
||||
std::to_string(operation) + " input " +
|
||||
std::to_string(input);
|
||||
validate(device, message, model, [operation, input, invalidOperand](Model* model) {
|
||||
model->operations[operation].inputs[input] = invalidOperand;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
|
||||
|
||||
static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
|
||||
const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const uint32_t invalidOperand = model.operands.size();
|
||||
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
|
||||
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
|
||||
std::to_string(operation) + " output " +
|
||||
std::to_string(output);
|
||||
validate(device, message, model, [operation, output, invalidOperand](Model* model) {
|
||||
model->operations[operation].outputs[output] = invalidOperand;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
|
||||
|
||||
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
|
||||
if (vec) {
|
||||
// remove elements matching "value"
|
||||
auto last = std::remove(vec->begin(), vec->end(), value);
|
||||
vec->resize(std::distance(vec->begin(), last));
|
||||
|
||||
// decrement elements exceeding "value"
|
||||
std::transform(vec->begin(), vec->end(), vec->begin(),
|
||||
[value](uint32_t v) { return v > value ? v-- : v; });
|
||||
}
|
||||
}
|
||||
|
||||
static void removeOperand(Model* model, uint32_t index) {
|
||||
hidl_vec_removeAt(&model->operands, index);
|
||||
for (Operation& operation : model->operations) {
|
||||
removeValueAndDecrementGreaterValues(&operation.inputs, index);
|
||||
removeValueAndDecrementGreaterValues(&operation.outputs, index);
|
||||
}
|
||||
removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
|
||||
removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
|
||||
}
|
||||
|
||||
static void removeOperandTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
|
||||
validate(device, message, model,
|
||||
[operand](Model* model) { removeOperand(model, operand); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERATION /////////////////////////
|
||||
|
||||
static void removeOperation(Model* model, uint32_t index) {
|
||||
for (uint32_t operand : model->operations[index].inputs) {
|
||||
model->operands[operand].numberOfConsumers--;
|
||||
}
|
||||
hidl_vec_removeAt(&model->operations, index);
|
||||
}
|
||||
|
||||
static void removeOperationTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model,
|
||||
[operation](Model* model) { removeOperation(model, operation); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERATION INPUT /////////////////////////
|
||||
|
||||
static void removeOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
||||
const V1_0::Operation& op = model.operations[operation];
|
||||
// CONCATENATION has at least 2 inputs, with the last element being
|
||||
// INT32. Skip this test if removing one of CONCATENATION's
|
||||
// inputs still produces a valid model.
|
||||
if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 &&
|
||||
input != op.inputs.size() - 1) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "removeOperationInputTest: operation " +
|
||||
std::to_string(operation) + ", input " +
|
||||
std::to_string(input);
|
||||
validate(device, message, model, [operation, input](Model* model) {
|
||||
uint32_t operand = model->operations[operation].inputs[input];
|
||||
model->operands[operand].numberOfConsumers--;
|
||||
hidl_vec_removeAt(&model->operations[operation].inputs, input);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
|
||||
|
||||
static void removeOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
|
||||
const std::string message = "removeOperationOutputTest: operation " +
|
||||
std::to_string(operation) + ", output " +
|
||||
std::to_string(output);
|
||||
validate(device, message, model, [operation, output](Model* model) {
|
||||
hidl_vec_removeAt(&model->operations[operation].outputs, output);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// MODEL VALIDATION /////////////////////////
|
||||
|
||||
// TODO: remove model input
|
||||
// TODO: remove model output
|
||||
// TODO: add unused operation
|
||||
|
||||
///////////////////////// ADD OPERATION INPUT /////////////////////////
|
||||
|
||||
static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model, [operation](Model* model) {
|
||||
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
|
||||
hidl_vec_push_back(&model->operations[operation].inputs, index);
|
||||
hidl_vec_push_back(&model->inputIndexes, index);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// ADD OPERATION OUTPUT /////////////////////////
|
||||
|
||||
static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message =
|
||||
"addOperationOutputTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model, [operation](Model* model) {
|
||||
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
|
||||
hidl_vec_push_back(&model->operations[operation].outputs, index);
|
||||
hidl_vec_push_back(&model->outputIndexes, index);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////// ENTRY POINT //////////////////////////////
|
||||
|
||||
void ValidationTest::validateModel(const V1_0::Model& model) {
|
||||
mutateOperandTypeTest(device, model);
|
||||
mutateOperandRankTest(device, model);
|
||||
mutateOperandScaleTest(device, model);
|
||||
mutateOperandZeroPointTest(device, model);
|
||||
mutateOperationOperandTypeTest(device, model);
|
||||
mutateOperationTypeTest(device, model);
|
||||
mutateOperationInputOperandIndexTest(device, model);
|
||||
mutateOperationOutputOperandIndexTest(device, model);
|
||||
removeOperandTest(device, model);
|
||||
removeOperationTest(device, model);
|
||||
removeOperationInputTest(device, model);
|
||||
removeOperationOutputTest(device, model);
|
||||
addOperationInputTest(device, model);
|
||||
addOperationOutputTest(device, model);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
261
neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
Normal file
261
neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
Normal file
@@ -0,0 +1,261 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using generated_tests::MixedTyped;
|
||||
using generated_tests::MixedTypedExampleType;
|
||||
using generated_tests::for_all;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Unable to test Request validation because vendor service "
|
||||
"cannot prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid request, apply a
|
||||
// mutation to it to invalidate the request, then pass it to interface calls
|
||||
// that use the request. Note that the request here is passed by value, and any
|
||||
// mutation to the request does not leave this function.
|
||||
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
|
||||
Request request, const std::function<void(Request*)>& mutation) {
|
||||
mutation(&request);
|
||||
SCOPED_TRACE(message + " [execute]");
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
|
||||
// so this is efficiently accomplished by moving the element to the end and
|
||||
// resizing the hidl_vec to one less.
|
||||
template <typename Type>
|
||||
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
|
||||
if (vec) {
|
||||
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
|
||||
vec->resize(vec->size() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
|
||||
// assume vec is valid
|
||||
const uint32_t index = vec->size();
|
||||
vec->resize(index + 1);
|
||||
(*vec)[index] = value;
|
||||
return index;
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE INPUT ////////////////////////////////////
|
||||
|
||||
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
|
||||
for (size_t input = 0; input < request.inputs.size(); ++input) {
|
||||
const std::string message = "removeInput: removed input " + std::to_string(input);
|
||||
validate(preparedModel, message, request,
|
||||
[input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OUTPUT ////////////////////////////////////
|
||||
|
||||
static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
|
||||
for (size_t output = 0; output < request.outputs.size(); ++output) {
|
||||
const std::string message = "removeOutput: removed Output " + std::to_string(output);
|
||||
validate(preparedModel, message, request,
|
||||
[output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////// ENTRY POINT //////////////////////////////////
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) {
|
||||
const uint32_t INPUT = 0;
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
std::vector<Request> requests;
|
||||
|
||||
for (auto& example : examples) {
|
||||
const MixedTyped& inputs = example.first;
|
||||
const MixedTyped& outputs = example.second;
|
||||
|
||||
std::vector<RequestArgument> inputs_info, outputs_info;
|
||||
uint32_t inputSize = 0, outputSize = 0;
|
||||
|
||||
// This function only partially specifies the metadata (vector of RequestArguments).
|
||||
// The contents are copied over below.
|
||||
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
|
||||
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
|
||||
RequestArgument arg = {
|
||||
.location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
|
||||
.dimensions = {},
|
||||
};
|
||||
RequestArgument arg_empty = {
|
||||
.hasNoValue = true,
|
||||
};
|
||||
inputs_info[index] = s ? arg : arg_empty;
|
||||
inputSize += s;
|
||||
});
|
||||
// Compute offset for inputs 1 and so on
|
||||
{
|
||||
size_t offset = 0;
|
||||
for (auto& i : inputs_info) {
|
||||
if (!i.hasNoValue) i.location.offset = offset;
|
||||
offset += i.location.length;
|
||||
}
|
||||
}
|
||||
|
||||
// Go through all outputs, initialize RequestArgument descriptors
|
||||
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
|
||||
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
|
||||
RequestArgument arg = {
|
||||
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
|
||||
.dimensions = {},
|
||||
};
|
||||
outputs_info[index] = arg;
|
||||
outputSize += s;
|
||||
});
|
||||
// Compute offset for outputs 1 and so on
|
||||
{
|
||||
size_t offset = 0;
|
||||
for (auto& i : outputs_info) {
|
||||
i.location.offset = offset;
|
||||
offset += i.location.length;
|
||||
}
|
||||
}
|
||||
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
|
||||
nn::allocateSharedMemory(outputSize)};
|
||||
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// map pool
|
||||
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
|
||||
if (inputMemory == nullptr) {
|
||||
return {};
|
||||
}
|
||||
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
|
||||
if (inputPtr == nullptr) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// initialize pool
|
||||
inputMemory->update();
|
||||
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
|
||||
char* begin = (char*)p;
|
||||
char* end = begin + s;
|
||||
// TODO: handle more than one input
|
||||
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
|
||||
});
|
||||
inputMemory->commit();
|
||||
|
||||
requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
|
||||
}
|
||||
|
||||
return requests;
|
||||
}
|
||||
|
||||
void ValidationTest::validateRequests(const V1_0::Model& model,
|
||||
const std::vector<Request>& requests) {
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// validate each request
|
||||
for (const Request& request : requests) {
|
||||
removeInputTest(preparedModel, request);
|
||||
removeOutputTest(preparedModel, request);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
50
neuralnetworks/1.0/vts/functional/ValidationTests.cpp
Normal file
50
neuralnetworks/1.0/vts/functional/ValidationTests.cpp
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// forward declarations
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// generate validation tests
|
||||
#define VTS_CURRENT_TEST_CASE(TestName) \
|
||||
TEST_F(ValidationTest, TestName) { \
|
||||
const Model model = TestName::createTestModel(); \
|
||||
const std::vector<Request> requests = createRequests(TestName::examples); \
|
||||
validateModel(model); \
|
||||
validateRequests(model, requests); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
|
||||
|
||||
#undef VTS_CURRENT_TEST_CASE
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -16,15 +16,7 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
@@ -33,11 +25,6 @@ namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// allocator helper
|
||||
hidl_memory allocateSharedMemory(int64_t size) {
|
||||
return nn::allocateSharedMemory(size);
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
@@ -51,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlEnvironment::registerTestServices() {
|
||||
registerTestService<V1_0::IDevice>();
|
||||
registerTestService<IDevice>();
|
||||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
|
||||
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<V1_0::IDevice>(
|
||||
::testing::VtsHalHidlTargetTestBase::SetUp();
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
ASSERT_NE(nullptr, device.get());
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {}
|
||||
void NeuralnetworksHidlTest::TearDown() {
|
||||
device = nullptr;
|
||||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
|
||||
return os << toString(deviceStatus);
|
||||
}
|
||||
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
|
||||
|
||||
int status = RUN_ALL_TESTS();
|
||||
return status;
|
||||
}
|
||||
@@ -18,16 +18,15 @@
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
|
||||
#include <VtsHalHidlTargetTestBase.h>
|
||||
#include <VtsHalHidlTargetTestEnvBase.h>
|
||||
|
||||
#include <android-base/macros.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
@@ -36,47 +35,47 @@ namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
hidl_memory allocateSharedMemory(int64_t size);
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
|
||||
NeuralnetworksHidlEnvironment();
|
||||
NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete;
|
||||
NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
|
||||
NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
|
||||
NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
|
||||
public:
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
static NeuralnetworksHidlEnvironment* getInstance();
|
||||
void registerTestServices() override;
|
||||
};
|
||||
|
||||
// The main test class for NEURALNETWORKS HIDL HAL.
|
||||
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
|
||||
|
||||
public:
|
||||
NeuralnetworksHidlTest();
|
||||
~NeuralnetworksHidlTest() override;
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
sp<V1_0::IDevice> device;
|
||||
protected:
|
||||
sp<IDevice> device;
|
||||
};
|
||||
|
||||
// Tag for the validation tests
|
||||
class ValidationTest : public NeuralnetworksHidlTest {
|
||||
protected:
|
||||
void validateModel(const Model& model);
|
||||
void validateRequests(const Model& model, const std::vector<Request>& request);
|
||||
};
|
||||
|
||||
// Tag for the generated tests
|
||||
class GeneratedTest : public NeuralnetworksHidlTest {};
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
// pretty-print values for error messages
|
||||
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
V1_0::ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
V1_0::DeviceStatus deviceStatus) {
|
||||
return os << toString(deviceStatus);
|
||||
}
|
||||
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
|
||||
::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
|
||||
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
@@ -1,293 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_0.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Models.h"
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperationType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_0 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
Model model = createValidTestModel_1_0();
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
||||
// status test
|
||||
TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
||||
Return<DeviceStatus> status = device->getStatus();
|
||||
ASSERT_TRUE(status.isOk());
|
||||
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
|
||||
}
|
||||
|
||||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
|
||||
Model model = createValidTestModel_1_0();
|
||||
Return<void> ret = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_EQ(model.operations.size(), supported.size());
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
|
||||
Model model = createInvalidTestModel1_1_0();
|
||||
Return<void> ret = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
(void)supported;
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
|
||||
Model model = createInvalidTestModel2_1_0();
|
||||
Return<void> ret = device->getSupportedOperations(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
(void)supported;
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// prepare simple model positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
|
||||
sp<IPreparedModel> preparedModel;
|
||||
doPrepareModelShortcut(device, &preparedModel);
|
||||
}
|
||||
|
||||
// prepare simple model negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
|
||||
Model model = createInvalidTestModel1_1_0();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// prepare simple model negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
|
||||
Model model = createInvalidTestModel2_1_0();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// execute simple graph positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
|
||||
std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
|
||||
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
Request request = createValidTestRequest();
|
||||
|
||||
auto postWork = [&] {
|
||||
sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
|
||||
if (outputMemory == nullptr) {
|
||||
return false;
|
||||
}
|
||||
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
|
||||
if (outputPtr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
outputMemory->read();
|
||||
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
|
||||
outputMemory->commit();
|
||||
return true;
|
||||
};
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
executionCallback->on_finish(postWork);
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
|
||||
EXPECT_EQ(expectedData, outputData);
|
||||
}
|
||||
|
||||
// execute simple graph negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
Request request = createInvalidTestRequest1();
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
// execute simple graph negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
Request request = createInvalidTestRequest2();
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_0
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
|
||||
|
||||
int status = RUN_ALL_TESTS();
|
||||
return status;
|
||||
}
|
||||
@@ -102,6 +102,8 @@ interface IDevice extends @1.0::IDevice {
|
||||
* Multiple threads can call prepareModel on the same model concurrently.
|
||||
*
|
||||
* @param model The model to be prepared for execution.
|
||||
* @param preference Indicates the intended execution behavior of a prepared
|
||||
* model.
|
||||
* @param callback A callback object used to return the error status of
|
||||
* preparing the model for execution and the prepared model
|
||||
* if successful, nullptr otherwise. The callback object's
|
||||
@@ -115,6 +117,7 @@ interface IDevice extends @1.0::IDevice {
|
||||
* - INVALID_ARGUMENT if one of the input arguments is
|
||||
* invalid
|
||||
*/
|
||||
prepareModel_1_1(Model model, IPreparedModelCallback callback)
|
||||
prepareModel_1_1(Model model, ExecutionPreference preference,
|
||||
IPreparedModelCallback callback)
|
||||
generates (ErrorStatus status);
|
||||
};
|
||||
|
||||
@@ -27,25 +27,24 @@ import @1.0::PerformanceInfo;
|
||||
*/
|
||||
enum OperationType : @1.0::OperationType {
|
||||
/**
|
||||
* BatchToSpace for N-D tensors.
|
||||
* BatchToSpace for N-dimensional tensors.
|
||||
*
|
||||
* This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape
|
||||
* This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape
|
||||
* block_shape + [batch], interleaves these blocks back into the grid defined by the
|
||||
* spatial dimensions [1, ..., M], to obtain a result with the same rank as the input.
|
||||
* The spatial dimensions of this intermediate result are then optionally cropped
|
||||
* according to the amount to crop to produce the output.
|
||||
*
|
||||
* This is the reverse of SpaceToBatch.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor rank: 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 0: An n-D tensor, specifying the tensor to be reshaped
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
|
||||
* input tensor. All values must be >= 1.
|
||||
* 2: A 1-D Tensor of type TENSOR_INT32, the amount to crop for each spatial diemension of the
|
||||
* input tensor. All values must be >= 0.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
@@ -53,9 +52,9 @@ enum OperationType : @1.0::OperationType {
|
||||
BATCH_TO_SPACE_ND = 29,
|
||||
|
||||
/**
|
||||
* Divides the second tensor from the first tensor, element-wise.
|
||||
* Element-wise division of two tensors.
|
||||
*
|
||||
* Takes two input tensors of identical OperandType and compatible dimensions. The output
|
||||
* Takes two input tensors of identical type and compatible dimensions. The output
|
||||
* is the result of dividing the first input tensor by the second, optionally
|
||||
* modified by an activation function.
|
||||
*
|
||||
@@ -71,7 +70,9 @@ enum OperationType : @1.0::OperationType {
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
@@ -88,15 +89,17 @@ enum OperationType : @1.0::OperationType {
|
||||
/**
|
||||
* Computes the mean of elements across dimensions of a tensor.
|
||||
*
|
||||
* Reduces input tensor along the dimensions given in axis. Unless keep_dims is true,
|
||||
* the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is
|
||||
* true, the reduced dimensions are retained with length 1.
|
||||
* Reduces the input tensor along the given dimensions to reduce. Unless keep_dims
|
||||
* is true, the rank of the tensor is reduced by 1 for each entry in axis.
|
||||
* If keep_dims is true, the reduced dimensions are retained with length 1.
|
||||
*
|
||||
* If axis has no entries, all dimensions are reduced, and a tensor with a single
|
||||
* element is returned.
|
||||
* If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with
|
||||
* a single element is returned.
|
||||
*
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
@@ -115,14 +118,18 @@ enum OperationType : @1.0::OperationType {
|
||||
*
|
||||
* This operation pads a tensor according to the specified paddings.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 2-D Tensor of type TENSOR_INT32. The paddings, before and after for each spatial dimension
|
||||
* of the input tensor.
|
||||
* 0: An n-D tensor, specifying the tensor to be padded.
|
||||
* 1: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial dimension of the
|
||||
* input tensor. The shape of the tensor must be {rank(input0), 2}.
|
||||
* padding[i, 0] specifies the number of element to be padded in the front of dimension i.
|
||||
* padding[i, 1] specifies the number of element to be padded after the end of dimension i.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
@@ -130,7 +137,7 @@ enum OperationType : @1.0::OperationType {
|
||||
PAD = 32,
|
||||
|
||||
/**
|
||||
* SpaceToBatch for N-D tensors.
|
||||
* SpaceToBatch for N-Dimensional tensors.
|
||||
*
|
||||
* This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks
|
||||
* of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that
|
||||
@@ -139,16 +146,20 @@ enum OperationType : @1.0::OperationType {
|
||||
* batch position. Prior to division into blocks, the spatial dimensions of the input are
|
||||
* optionally zero padded according to paddings.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor rank: 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
|
||||
* input tensor. All values must be >= 1.
|
||||
* 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the
|
||||
* input tensor. All values must be >= 0.
|
||||
* input tensor. All values must be >= 0. The shape of the tensor must be {rank(input0), 2}.
|
||||
* padding[i, 0] specifies the number of element to be padded in the front of dimension i.
|
||||
* padding[i, 1] specifies the number of element to be padded after the end of dimension i.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
@@ -160,17 +171,20 @@ enum OperationType : @1.0::OperationType {
|
||||
*
|
||||
* Given a tensor input, this operation returns a tensor of the same type with all
|
||||
* dimensions of size 1 removed. If you don't want to remove all size 1 dimensions,
|
||||
* you can remove specific size 1 dimensions by specifying axis.
|
||||
* you can remove specific size 1 dimensions by specifying the axes (input1).
|
||||
*
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: An 1-D Tensor of type TENSOR_INT32. The dimensions to squeeze. If None (the default),
|
||||
* squeezes all dimensions. If specified, only squeezes the dimensions listed. The dimension
|
||||
* index starts at 0. It is an error to squeeze a dimension that is not 1.
|
||||
* 0: An n-D tensor, the tensor to be squeezed.
|
||||
* 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified
|
||||
* only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
|
||||
* The dimension index starts at 0. An error must be reported if squeezing a dimension that
|
||||
* is not 1.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0. Contains the same data as input, but has one or more
|
||||
@@ -181,23 +195,25 @@ enum OperationType : @1.0::OperationType {
|
||||
/**
|
||||
* Extracts a strided slice of a tensor.
|
||||
*
|
||||
* This op extracts a slice of size (end-begin)/stride from the given input tensor.
|
||||
* Starting at the location specified by begin the slice continues by adding
|
||||
* Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given
|
||||
* input tensor. Starting at the location specified by begin the slice continues by adding
|
||||
* stride to the index until all dimensions are not less than end. Note that a stride can
|
||||
* be negative, which causes a reverse slice.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 0: An n-D tensor, specifying the tensor to be sliced.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input
|
||||
* tensor to be sliced.
|
||||
* tensor to be sliced. The length must be of rank(input0).
|
||||
* 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input
|
||||
* tensor to be sliced.
|
||||
* tensor to be sliced. The length must be of rank(input0).
|
||||
* 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input
|
||||
* tensor to be sliced.
|
||||
* tensor to be sliced. The length must be of rank(input0).
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
@@ -205,7 +221,7 @@ enum OperationType : @1.0::OperationType {
|
||||
STRIDED_SLICE = 35,
|
||||
|
||||
/**
|
||||
* Subtracts the second tensor from the first tensor, element-wise.
|
||||
* Element-wise subtraction of two tensors.
|
||||
*
|
||||
* Takes two input tensors of identical type and compatible dimensions. The output
|
||||
* is the result of subtracting the second input tensor from the first one, optionally
|
||||
@@ -223,7 +239,9 @@ enum OperationType : @1.0::OperationType {
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
@@ -240,18 +258,20 @@ enum OperationType : @1.0::OperationType {
|
||||
/**
|
||||
* Transposes the input tensor, permuting the dimensions according to the perm tensor.
|
||||
*
|
||||
* The returned tensor's dimension i must correspond to the input dimension perm[i].
|
||||
* The returned tensor's dimension i corresponds to the input dimension perm[i].
|
||||
* If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor.
|
||||
* Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors.
|
||||
*
|
||||
* Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* 0: An n-D tensor, specifying the input.
|
||||
* 1: A 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the input
|
||||
* tensor.
|
||||
* 0: An n-D tensor, specifying the tensor to be transposed.
|
||||
* 1: An optional 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the
|
||||
* input tensor.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
@@ -362,3 +382,24 @@ struct Model {
|
||||
*/
|
||||
bool relaxComputationFloat32toFloat16;
|
||||
};
|
||||
|
||||
/**
|
||||
* Execution preferences.
|
||||
*/
|
||||
enum ExecutionPreference : int32_t {
|
||||
/**
|
||||
* Prefer executing in a way that minimizes battery drain.
|
||||
* This is desirable for compilations that will be executed often.
|
||||
*/
|
||||
LOW_POWER = 0,
|
||||
/**
|
||||
* Prefer returning a single answer as fast as possible, even if this causes
|
||||
* more power consumption.
|
||||
*/
|
||||
FAST_SINGLE_ANSWER = 1,
|
||||
/**
|
||||
* Prefer maximizing the throughput of successive frames, for example when
|
||||
* processing successive frames coming from the camera.
|
||||
*/
|
||||
SUSTAINED_SPEED = 2,
|
||||
};
|
||||
|
||||
@@ -17,9 +17,12 @@
|
||||
cc_test {
|
||||
name: "VtsHalNeuralnetworksV1_1TargetTest",
|
||||
srcs: [
|
||||
"VtsHalNeuralnetworksV1_1.cpp",
|
||||
"VtsHalNeuralnetworksV1_1BasicTest.cpp",
|
||||
"VtsHalNeuralnetworksV1_1GeneratedTest.cpp",
|
||||
"BasicTests.cpp",
|
||||
"GeneratedTests.cpp",
|
||||
"ValidateModel.cpp",
|
||||
"ValidateRequest.cpp",
|
||||
"ValidationTests.cpp",
|
||||
"VtsHalNeuralnetworks.cpp",
|
||||
],
|
||||
defaults: ["VtsHalTargetTestDefaults"],
|
||||
static_libs: [
|
||||
@@ -36,4 +39,13 @@ cc_test {
|
||||
"libneuralnetworks_generated_test_harness_headers",
|
||||
"libneuralnetworks_generated_tests",
|
||||
],
|
||||
// Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
|
||||
// error in ld.gold.
|
||||
arch: {
|
||||
arm: {
|
||||
sanitize: {
|
||||
never: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
58
neuralnetworks/1.1/vts/functional/BasicTests.cpp
Normal file
58
neuralnetworks/1.1/vts/functional/BasicTests.cpp
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
||||
// status test
|
||||
TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
||||
Return<DeviceStatus> status = device->getStatus();
|
||||
ASSERT_TRUE(status.isOk());
|
||||
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
|
||||
}
|
||||
|
||||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -16,54 +16,33 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_1.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Operand;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_1::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Operation;
|
||||
using ::android::hardware::neuralnetworks::V1_1::OperationType;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
|
||||
namespace generated_tests {
|
||||
using ::generated_tests::MixedTypedExampleType;
|
||||
extern void Execute(sp<V1_1::IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
|
||||
const std::vector<MixedTypedExampleType>&);
|
||||
extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
|
||||
std::function<bool(int)>, const std::vector<MixedTypedExampleType>&);
|
||||
} // namespace generated_tests
|
||||
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::nn::allocateSharedMemory;
|
||||
|
||||
// Mixed-typed examples
|
||||
typedef generated_tests::MixedTypedExampleType MixedTypedExample;
|
||||
323
neuralnetworks/1.1/vts/functional/Models.h
Normal file
323
neuralnetworks/1.1/vts/functional/Models.h
Normal file
@@ -0,0 +1,323 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using MixedTypedExample = generated_tests::MixedTypedExampleType;
|
||||
|
||||
#define FOR_EACH_TEST_MODEL(FN) \
|
||||
FN(add) \
|
||||
FN(add_broadcast_quant8) \
|
||||
FN(add_quant8) \
|
||||
FN(add_relaxed) \
|
||||
FN(avg_pool_float_1) \
|
||||
FN(avg_pool_float_1_relaxed) \
|
||||
FN(avg_pool_float_2) \
|
||||
FN(avg_pool_float_2_relaxed) \
|
||||
FN(avg_pool_float_3) \
|
||||
FN(avg_pool_float_3_relaxed) \
|
||||
FN(avg_pool_float_4) \
|
||||
FN(avg_pool_float_4_relaxed) \
|
||||
FN(avg_pool_float_5) \
|
||||
FN(avg_pool_quant8_1) \
|
||||
FN(avg_pool_quant8_2) \
|
||||
FN(avg_pool_quant8_3) \
|
||||
FN(avg_pool_quant8_4) \
|
||||
FN(avg_pool_quant8_5) \
|
||||
FN(batch_to_space) \
|
||||
FN(batch_to_space_float_1) \
|
||||
FN(batch_to_space_quant8_1) \
|
||||
FN(concat_float_1) \
|
||||
FN(concat_float_1_relaxed) \
|
||||
FN(concat_float_2) \
|
||||
FN(concat_float_2_relaxed) \
|
||||
FN(concat_float_3) \
|
||||
FN(concat_float_3_relaxed) \
|
||||
FN(concat_quant8_1) \
|
||||
FN(concat_quant8_2) \
|
||||
FN(concat_quant8_3) \
|
||||
FN(conv_1_h3_w2_SAME) \
|
||||
FN(conv_1_h3_w2_SAME_relaxed) \
|
||||
FN(conv_1_h3_w2_VALID) \
|
||||
FN(conv_1_h3_w2_VALID_relaxed) \
|
||||
FN(conv_3_h3_w2_SAME) \
|
||||
FN(conv_3_h3_w2_SAME_relaxed) \
|
||||
FN(conv_3_h3_w2_VALID) \
|
||||
FN(conv_3_h3_w2_VALID_relaxed) \
|
||||
FN(conv_float) \
|
||||
FN(conv_float_2) \
|
||||
FN(conv_float_channels) \
|
||||
FN(conv_float_channels_relaxed) \
|
||||
FN(conv_float_channels_weights_as_inputs) \
|
||||
FN(conv_float_channels_weights_as_inputs_relaxed) \
|
||||
FN(conv_float_large) \
|
||||
FN(conv_float_large_relaxed) \
|
||||
FN(conv_float_large_weights_as_inputs) \
|
||||
FN(conv_float_large_weights_as_inputs_relaxed) \
|
||||
FN(conv_float_relaxed) \
|
||||
FN(conv_float_weights_as_inputs) \
|
||||
FN(conv_float_weights_as_inputs_relaxed) \
|
||||
FN(conv_quant8) \
|
||||
FN(conv_quant8_2) \
|
||||
FN(conv_quant8_channels) \
|
||||
FN(conv_quant8_channels_weights_as_inputs) \
|
||||
FN(conv_quant8_large) \
|
||||
FN(conv_quant8_large_weights_as_inputs) \
|
||||
FN(conv_quant8_overflow) \
|
||||
FN(conv_quant8_overflow_weights_as_inputs) \
|
||||
FN(conv_quant8_weights_as_inputs) \
|
||||
FN(depth_to_space_float_1) \
|
||||
FN(depth_to_space_float_1_relaxed) \
|
||||
FN(depth_to_space_float_2) \
|
||||
FN(depth_to_space_float_2_relaxed) \
|
||||
FN(depth_to_space_float_3) \
|
||||
FN(depth_to_space_float_3_relaxed) \
|
||||
FN(depth_to_space_quant8_1) \
|
||||
FN(depth_to_space_quant8_2) \
|
||||
FN(depthwise_conv) \
|
||||
FN(depthwise_conv2d_float) \
|
||||
FN(depthwise_conv2d_float_2) \
|
||||
FN(depthwise_conv2d_float_large) \
|
||||
FN(depthwise_conv2d_float_large_2) \
|
||||
FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large_relaxed) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
|
||||
FN(depthwise_conv2d_float_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8) \
|
||||
FN(depthwise_conv2d_quant8_2) \
|
||||
FN(depthwise_conv2d_quant8_large) \
|
||||
FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
|
||||
FN(depthwise_conv2d_quant8_weights_as_inputs) \
|
||||
FN(depthwise_conv_relaxed) \
|
||||
FN(dequantize) \
|
||||
FN(div) \
|
||||
FN(embedding_lookup) \
|
||||
FN(embedding_lookup_relaxed) \
|
||||
FN(floor) \
|
||||
FN(floor_relaxed) \
|
||||
FN(fully_connected_float) \
|
||||
FN(fully_connected_float_2) \
|
||||
FN(fully_connected_float_large) \
|
||||
FN(fully_connected_float_large_weights_as_inputs) \
|
||||
FN(fully_connected_float_relaxed) \
|
||||
FN(fully_connected_float_weights_as_inputs) \
|
||||
FN(fully_connected_float_weights_as_inputs_relaxed) \
|
||||
FN(fully_connected_quant8) \
|
||||
FN(fully_connected_quant8_2) \
|
||||
FN(fully_connected_quant8_large) \
|
||||
FN(fully_connected_quant8_large_weights_as_inputs) \
|
||||
FN(fully_connected_quant8_weights_as_inputs) \
|
||||
FN(hashtable_lookup_float) \
|
||||
FN(hashtable_lookup_float_relaxed) \
|
||||
FN(hashtable_lookup_quant8) \
|
||||
FN(l2_normalization) \
|
||||
FN(l2_normalization_2) \
|
||||
FN(l2_normalization_large) \
|
||||
FN(l2_normalization_large_relaxed) \
|
||||
FN(l2_normalization_relaxed) \
|
||||
FN(l2_pool_float) \
|
||||
FN(l2_pool_float_2) \
|
||||
FN(l2_pool_float_large) \
|
||||
FN(l2_pool_float_relaxed) \
|
||||
FN(local_response_norm_float_1) \
|
||||
FN(local_response_norm_float_1_relaxed) \
|
||||
FN(local_response_norm_float_2) \
|
||||
FN(local_response_norm_float_2_relaxed) \
|
||||
FN(local_response_norm_float_3) \
|
||||
FN(local_response_norm_float_3_relaxed) \
|
||||
FN(local_response_norm_float_4) \
|
||||
FN(local_response_norm_float_4_relaxed) \
|
||||
FN(logistic_float_1) \
|
||||
FN(logistic_float_1_relaxed) \
|
||||
FN(logistic_float_2) \
|
||||
FN(logistic_float_2_relaxed) \
|
||||
FN(logistic_quant8_1) \
|
||||
FN(logistic_quant8_2) \
|
||||
FN(lsh_projection) \
|
||||
FN(lsh_projection_2) \
|
||||
FN(lsh_projection_2_relaxed) \
|
||||
FN(lsh_projection_relaxed) \
|
||||
FN(lsh_projection_weights_as_inputs) \
|
||||
FN(lsh_projection_weights_as_inputs_relaxed) \
|
||||
FN(lstm) \
|
||||
FN(lstm2) \
|
||||
FN(lstm2_relaxed) \
|
||||
FN(lstm2_state) \
|
||||
FN(lstm2_state2) \
|
||||
FN(lstm2_state2_relaxed) \
|
||||
FN(lstm2_state_relaxed) \
|
||||
FN(lstm3) \
|
||||
FN(lstm3_relaxed) \
|
||||
FN(lstm3_state) \
|
||||
FN(lstm3_state2) \
|
||||
FN(lstm3_state2_relaxed) \
|
||||
FN(lstm3_state3) \
|
||||
FN(lstm3_state3_relaxed) \
|
||||
FN(lstm3_state_relaxed) \
|
||||
FN(lstm_relaxed) \
|
||||
FN(lstm_state) \
|
||||
FN(lstm_state2) \
|
||||
FN(lstm_state2_relaxed) \
|
||||
FN(lstm_state_relaxed) \
|
||||
FN(max_pool_float_1) \
|
||||
FN(max_pool_float_1_relaxed) \
|
||||
FN(max_pool_float_2) \
|
||||
FN(max_pool_float_2_relaxed) \
|
||||
FN(max_pool_float_3) \
|
||||
FN(max_pool_float_3_relaxed) \
|
||||
FN(max_pool_float_4) \
|
||||
FN(max_pool_quant8_1) \
|
||||
FN(max_pool_quant8_2) \
|
||||
FN(max_pool_quant8_3) \
|
||||
FN(max_pool_quant8_4) \
|
||||
FN(mean) \
|
||||
FN(mean_float_1) \
|
||||
FN(mean_float_2) \
|
||||
FN(mean_quant8_1) \
|
||||
FN(mean_quant8_2) \
|
||||
FN(mobilenet_224_gender_basic_fixed) \
|
||||
FN(mobilenet_224_gender_basic_fixed_relaxed) \
|
||||
FN(mobilenet_quantized) \
|
||||
FN(mul) \
|
||||
FN(mul_broadcast_quant8) \
|
||||
FN(mul_quant8) \
|
||||
FN(mul_relaxed) \
|
||||
FN(mul_relu) \
|
||||
FN(mul_relu_relaxed) \
|
||||
FN(pad) \
|
||||
FN(pad_float_1) \
|
||||
FN(relu1_float_1) \
|
||||
FN(relu1_float_1_relaxed) \
|
||||
FN(relu1_float_2) \
|
||||
FN(relu1_float_2_relaxed) \
|
||||
FN(relu1_quant8_1) \
|
||||
FN(relu1_quant8_2) \
|
||||
FN(relu6_float_1) \
|
||||
FN(relu6_float_1_relaxed) \
|
||||
FN(relu6_float_2) \
|
||||
FN(relu6_float_2_relaxed) \
|
||||
FN(relu6_quant8_1) \
|
||||
FN(relu6_quant8_2) \
|
||||
FN(relu_float_1) \
|
||||
FN(relu_float_1_relaxed) \
|
||||
FN(relu_float_2) \
|
||||
FN(relu_quant8_1) \
|
||||
FN(relu_quant8_2) \
|
||||
FN(reshape) \
|
||||
FN(reshape_quant8) \
|
||||
FN(reshape_quant8_weights_as_inputs) \
|
||||
FN(reshape_relaxed) \
|
||||
FN(reshape_weights_as_inputs) \
|
||||
FN(reshape_weights_as_inputs_relaxed) \
|
||||
FN(resize_bilinear) \
|
||||
FN(resize_bilinear_2) \
|
||||
FN(resize_bilinear_relaxed) \
|
||||
FN(rnn) \
|
||||
FN(rnn_relaxed) \
|
||||
FN(rnn_state) \
|
||||
FN(rnn_state_relaxed) \
|
||||
FN(softmax_float_1) \
|
||||
FN(softmax_float_1_relaxed) \
|
||||
FN(softmax_float_2) \
|
||||
FN(softmax_float_2_relaxed) \
|
||||
FN(softmax_quant8_1) \
|
||||
FN(softmax_quant8_2) \
|
||||
FN(space_to_batch) \
|
||||
FN(space_to_batch_float_1) \
|
||||
FN(space_to_batch_float_2) \
|
||||
FN(space_to_batch_float_3) \
|
||||
FN(space_to_batch_quant8_1) \
|
||||
FN(space_to_batch_quant8_2) \
|
||||
FN(space_to_batch_quant8_3) \
|
||||
FN(space_to_depth_float_1) \
|
||||
FN(space_to_depth_float_1_relaxed) \
|
||||
FN(space_to_depth_float_2) \
|
||||
FN(space_to_depth_float_2_relaxed) \
|
||||
FN(space_to_depth_float_3) \
|
||||
FN(space_to_depth_float_3_relaxed) \
|
||||
FN(space_to_depth_quant8_1) \
|
||||
FN(space_to_depth_quant8_2) \
|
||||
FN(squeeze) \
|
||||
FN(squeeze_float_1) \
|
||||
FN(squeeze_quant8_1) \
|
||||
FN(strided_slice) \
|
||||
FN(strided_slice_float_1) \
|
||||
FN(strided_slice_float_10) \
|
||||
FN(strided_slice_float_2) \
|
||||
FN(strided_slice_float_3) \
|
||||
FN(strided_slice_float_4) \
|
||||
FN(strided_slice_float_5) \
|
||||
FN(strided_slice_float_6) \
|
||||
FN(strided_slice_float_7) \
|
||||
FN(strided_slice_float_8) \
|
||||
FN(strided_slice_float_9) \
|
||||
FN(strided_slice_qaunt8_10) \
|
||||
FN(strided_slice_quant8_1) \
|
||||
FN(strided_slice_quant8_2) \
|
||||
FN(strided_slice_quant8_3) \
|
||||
FN(strided_slice_quant8_4) \
|
||||
FN(strided_slice_quant8_5) \
|
||||
FN(strided_slice_quant8_6) \
|
||||
FN(strided_slice_quant8_7) \
|
||||
FN(strided_slice_quant8_8) \
|
||||
FN(strided_slice_quant8_9) \
|
||||
FN(sub) \
|
||||
FN(svdf) \
|
||||
FN(svdf2) \
|
||||
FN(svdf2_relaxed) \
|
||||
FN(svdf_relaxed) \
|
||||
FN(svdf_state) \
|
||||
FN(svdf_state_relaxed) \
|
||||
FN(tanh) \
|
||||
FN(tanh_relaxed) \
|
||||
FN(transpose) \
|
||||
FN(transpose_float_1) \
|
||||
FN(transpose_quant8_1)
|
||||
|
||||
#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
|
||||
namespace function { \
|
||||
extern std::vector<MixedTypedExample> examples; \
|
||||
Model createTestModel(); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
|
||||
|
||||
#undef FORWARD_DECLARE_GENERATED_OBJECTS
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
#endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
|
||||
539
neuralnetworks/1.1/vts/functional/ValidateModel.cpp
Normal file
539
neuralnetworks/1.1/vts/functional/ValidateModel.cpp
Normal file
@@ -0,0 +1,539 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
|
||||
using V1_0::IPreparedModel;
|
||||
using V1_0::Operand;
|
||||
using V1_0::OperandLifeTime;
|
||||
using V1_0::OperandType;
|
||||
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
|
||||
const V1_1::Model& model) {
|
||||
SCOPED_TRACE(message + " [getSupportedOperations_1_1]");
|
||||
|
||||
Return<void> ret =
|
||||
device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
|
||||
const V1_1::Model& model, ExecutionPreference preference) {
|
||||
SCOPED_TRACE(message + " [prepareModel_1_1]");
|
||||
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preference, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
ASSERT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
static bool validExecutionPreference(ExecutionPreference preference) {
|
||||
return preference == ExecutionPreference::LOW_POWER ||
|
||||
preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
|
||||
preference == ExecutionPreference::SUSTAINED_SPEED;
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid model, apply a
|
||||
// mutation to it to invalidate the model, then pass it to interface calls that
|
||||
// use the model. Note that the model here is passed by value, and any mutation
|
||||
// to the model does not leave this function.
|
||||
static void validate(const sp<IDevice>& device, const std::string& message, V1_1::Model model,
|
||||
const std::function<void(Model*)>& mutation,
|
||||
ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
|
||||
mutation(&model);
|
||||
if (validExecutionPreference(preference)) {
|
||||
validateGetSupportedOperations(device, message, model);
|
||||
}
|
||||
validatePrepareModel(device, message, model, preference);
|
||||
}
|
||||
|
||||
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
|
||||
// so this is efficiently accomplished by moving the element to the end and
|
||||
// resizing the hidl_vec to one less.
|
||||
template <typename Type>
|
||||
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
|
||||
if (vec) {
|
||||
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
|
||||
vec->resize(vec->size() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
|
||||
// assume vec is valid
|
||||
const uint32_t index = vec->size();
|
||||
vec->resize(index + 1);
|
||||
(*vec)[index] = value;
|
||||
return index;
|
||||
}
|
||||
|
||||
static uint32_t addOperand(Model* model) {
|
||||
return hidl_vec_push_back(&model->operands,
|
||||
{
|
||||
.type = OperandType::INT32,
|
||||
.dimensions = {},
|
||||
.numberOfConsumers = 0,
|
||||
.scale = 0.0f,
|
||||
.zeroPoint = 0,
|
||||
.lifetime = OperandLifeTime::MODEL_INPUT,
|
||||
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
||||
});
|
||||
}
|
||||
|
||||
static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
|
||||
uint32_t index = addOperand(model);
|
||||
model->operands[index].numberOfConsumers = 1;
|
||||
model->operands[index].lifetime = lifetime;
|
||||
return index;
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
|
||||
|
||||
static const int32_t invalidOperandTypes[] = {
|
||||
static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
|
||||
};
|
||||
|
||||
static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
for (int32_t invalidOperandType : invalidOperandTypes) {
|
||||
const std::string message = "mutateOperandTypeTest: operand " +
|
||||
std::to_string(operand) + " set to value " +
|
||||
std::to_string(invalidOperandType);
|
||||
validate(device, message, model, [operand, invalidOperandType](Model* model) {
|
||||
model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE OPERAND RANK /////////////////////////
|
||||
|
||||
static uint32_t getInvalidRank(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
return 1;
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mutateOperandRankTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
|
||||
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
|
||||
" has rank of " + std::to_string(invalidRank);
|
||||
validate(device, message, model, [operand, invalidRank](Model* model) {
|
||||
model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE OPERAND SCALE /////////////////////////
|
||||
|
||||
static float getInvalidScale(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
return 1.0f;
|
||||
case OperandType::TENSOR_INT32:
|
||||
return -1.0f;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return 0.0f;
|
||||
default:
|
||||
return 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const float invalidScale = getInvalidScale(model.operands[operand].type);
|
||||
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
|
||||
" has scale of " + std::to_string(invalidScale);
|
||||
validate(device, message, model, [operand, invalidScale](Model* model) {
|
||||
model->operands[operand].scale = invalidScale;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE OPERAND ZERO POINT /////////////////////////
|
||||
|
||||
static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
case OperandType::TENSOR_INT32:
|
||||
return {1};
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
return {-1, 256};
|
||||
default:
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const std::vector<int32_t> invalidZeroPoints =
|
||||
getInvalidZeroPoints(model.operands[operand].type);
|
||||
for (int32_t invalidZeroPoint : invalidZeroPoints) {
|
||||
const std::string message = "mutateOperandZeroPointTest: operand " +
|
||||
std::to_string(operand) + " has zero point of " +
|
||||
std::to_string(invalidZeroPoint);
|
||||
validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
|
||||
model->operands[operand].zeroPoint = invalidZeroPoint;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
|
||||
|
||||
// TODO: Operand::lifetime
|
||||
// TODO: Operand::location
|
||||
|
||||
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
|
||||
|
||||
static void mutateOperand(Operand* operand, OperandType type) {
|
||||
Operand newOperand = *operand;
|
||||
newOperand.type = type;
|
||||
switch (type) {
|
||||
case OperandType::FLOAT32:
|
||||
case OperandType::INT32:
|
||||
case OperandType::UINT32:
|
||||
newOperand.dimensions = hidl_vec<uint32_t>();
|
||||
newOperand.scale = 0.0f;
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_FLOAT32:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = 0.0f;
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_INT32:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.zeroPoint = 0;
|
||||
break;
|
||||
case OperandType::TENSOR_QUANT8_ASYMM:
|
||||
newOperand.dimensions =
|
||||
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
||||
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
|
||||
break;
|
||||
case OperandType::OEM:
|
||||
case OperandType::TENSOR_OEM_BYTE:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
*operand = newOperand;
|
||||
}
|
||||
|
||||
static bool mutateOperationOperandTypeSkip(size_t operand, const V1_1::Model& model) {
|
||||
// LSH_PROJECTION's second argument is allowed to have any type. This is the
|
||||
// only operation that currently has a type that can be anything independent
|
||||
// from any other type. Changing the operand type to any other type will
|
||||
// result in a valid model for LSH_PROJECTION. If this is the case, skip the
|
||||
// test.
|
||||
for (const Operation& operation : model.operations) {
|
||||
if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
if (mutateOperationOperandTypeSkip(operand, model)) {
|
||||
continue;
|
||||
}
|
||||
for (OperandType invalidOperandType : hidl_enum_iterator<OperandType>{}) {
|
||||
// Do not test OEM types
|
||||
if (invalidOperandType == model.operands[operand].type ||
|
||||
invalidOperandType == OperandType::OEM ||
|
||||
invalidOperandType == OperandType::TENSOR_OEM_BYTE) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "mutateOperationOperandTypeTest: operand " +
|
||||
std::to_string(operand) + " set to type " +
|
||||
toString(invalidOperandType);
|
||||
validate(device, message, model, [operand, invalidOperandType](Model* model) {
|
||||
mutateOperand(&model->operands[operand], invalidOperandType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
|
||||
|
||||
static const int32_t invalidOperationTypes[] = {
|
||||
static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
|
||||
static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
|
||||
static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
|
||||
};
|
||||
|
||||
static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (int32_t invalidOperationType : invalidOperationTypes) {
|
||||
const std::string message = "mutateOperationTypeTest: operation " +
|
||||
std::to_string(operation) + " set to value " +
|
||||
std::to_string(invalidOperationType);
|
||||
validate(device, message, model, [operation, invalidOperationType](Model* model) {
|
||||
model->operations[operation].type =
|
||||
static_cast<OperationType>(invalidOperationType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
|
||||
|
||||
static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
|
||||
const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const uint32_t invalidOperand = model.operands.size();
|
||||
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
||||
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
|
||||
std::to_string(operation) + " input " +
|
||||
std::to_string(input);
|
||||
validate(device, message, model, [operation, input, invalidOperand](Model* model) {
|
||||
model->operations[operation].inputs[input] = invalidOperand;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
|
||||
|
||||
static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
|
||||
const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const uint32_t invalidOperand = model.operands.size();
|
||||
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
|
||||
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
|
||||
std::to_string(operation) + " output " +
|
||||
std::to_string(output);
|
||||
validate(device, message, model, [operation, output, invalidOperand](Model* model) {
|
||||
model->operations[operation].outputs[output] = invalidOperand;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
|
||||
|
||||
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
|
||||
if (vec) {
|
||||
// remove elements matching "value"
|
||||
auto last = std::remove(vec->begin(), vec->end(), value);
|
||||
vec->resize(std::distance(vec->begin(), last));
|
||||
|
||||
// decrement elements exceeding "value"
|
||||
std::transform(vec->begin(), vec->end(), vec->begin(),
|
||||
[value](uint32_t v) { return v > value ? v-- : v; });
|
||||
}
|
||||
}
|
||||
|
||||
static void removeOperand(Model* model, uint32_t index) {
|
||||
hidl_vec_removeAt(&model->operands, index);
|
||||
for (Operation& operation : model->operations) {
|
||||
removeValueAndDecrementGreaterValues(&operation.inputs, index);
|
||||
removeValueAndDecrementGreaterValues(&operation.outputs, index);
|
||||
}
|
||||
removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
|
||||
removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
|
||||
}
|
||||
|
||||
static void removeOperandTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
||||
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
|
||||
validate(device, message, model,
|
||||
[operand](Model* model) { removeOperand(model, operand); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERATION /////////////////////////
|
||||
|
||||
static void removeOperation(Model* model, uint32_t index) {
|
||||
for (uint32_t operand : model->operations[index].inputs) {
|
||||
model->operands[operand].numberOfConsumers--;
|
||||
}
|
||||
hidl_vec_removeAt(&model->operations, index);
|
||||
}
|
||||
|
||||
static void removeOperationTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model,
|
||||
[operation](Model* model) { removeOperation(model, operation); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERATION INPUT /////////////////////////
|
||||
|
||||
static void removeOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
||||
const V1_1::Operation& op = model.operations[operation];
|
||||
// CONCATENATION has at least 2 inputs, with the last element being
|
||||
// INT32. Skip this test if removing one of CONCATENATION's
|
||||
// inputs still produces a valid model.
|
||||
if (op.type == V1_1::OperationType::CONCATENATION && op.inputs.size() > 2 &&
|
||||
input != op.inputs.size() - 1) {
|
||||
continue;
|
||||
}
|
||||
const std::string message = "removeOperationInputTest: operation " +
|
||||
std::to_string(operation) + ", input " +
|
||||
std::to_string(input);
|
||||
validate(device, message, model, [operation, input](Model* model) {
|
||||
uint32_t operand = model->operations[operation].inputs[input];
|
||||
model->operands[operand].numberOfConsumers--;
|
||||
hidl_vec_removeAt(&model->operations[operation].inputs, input);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
|
||||
|
||||
static void removeOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
|
||||
const std::string message = "removeOperationOutputTest: operation " +
|
||||
std::to_string(operation) + ", output " +
|
||||
std::to_string(output);
|
||||
validate(device, message, model, [operation, output](Model* model) {
|
||||
hidl_vec_removeAt(&model->operations[operation].outputs, output);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// MODEL VALIDATION /////////////////////////
|
||||
|
||||
// TODO: remove model input
|
||||
// TODO: remove model output
|
||||
// TODO: add unused operation
|
||||
|
||||
///////////////////////// ADD OPERATION INPUT /////////////////////////
|
||||
|
||||
static void addOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model, [operation](Model* model) {
|
||||
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
|
||||
hidl_vec_push_back(&model->operations[operation].inputs, index);
|
||||
hidl_vec_push_back(&model->inputIndexes, index);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// ADD OPERATION OUTPUT /////////////////////////
|
||||
|
||||
static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
||||
const std::string message =
|
||||
"addOperationOutputTest: operation " + std::to_string(operation);
|
||||
validate(device, message, model, [operation](Model* model) {
|
||||
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
|
||||
hidl_vec_push_back(&model->operations[operation].outputs, index);
|
||||
hidl_vec_push_back(&model->outputIndexes, index);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
|
||||
|
||||
static const int32_t invalidExecutionPreferences[] = {
|
||||
static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
|
||||
static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
|
||||
};
|
||||
|
||||
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const V1_1::Model& model) {
|
||||
for (int32_t preference : invalidExecutionPreferences) {
|
||||
const std::string message =
|
||||
"mutateExecutionPreferenceTest: preference " + std::to_string(preference);
|
||||
validate(device, message, model, [](Model*) {},
|
||||
static_cast<ExecutionPreference>(preference));
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////// ENTRY POINT //////////////////////////////
|
||||
|
||||
void ValidationTest::validateModel(const V1_1::Model& model) {
|
||||
mutateOperandTypeTest(device, model);
|
||||
mutateOperandRankTest(device, model);
|
||||
mutateOperandScaleTest(device, model);
|
||||
mutateOperandZeroPointTest(device, model);
|
||||
mutateOperationOperandTypeTest(device, model);
|
||||
mutateOperationTypeTest(device, model);
|
||||
mutateOperationInputOperandIndexTest(device, model);
|
||||
mutateOperationOutputOperandIndexTest(device, model);
|
||||
removeOperandTest(device, model);
|
||||
removeOperationTest(device, model);
|
||||
removeOperationInputTest(device, model);
|
||||
removeOperationOutputTest(device, model);
|
||||
addOperationInputTest(device, model);
|
||||
addOperationOutputTest(device, model);
|
||||
mutateExecutionPreferenceTest(device, model);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
262
neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
Normal file
262
neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
Normal file
@@ -0,0 +1,262 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "TestHarness.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using generated_tests::MixedTyped;
|
||||
using generated_tests::MixedTypedExampleType;
|
||||
using generated_tests::for_all;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
|
||||
sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations_1_1 call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel_1_1 is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Unable to test Request validation because vendor service "
|
||||
"cannot prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// Primary validation function. This function will take a valid request, apply a
|
||||
// mutation to it to invalidate the request, then pass it to interface calls
|
||||
// that use the request. Note that the request here is passed by value, and any
|
||||
// mutation to the request does not leave this function.
|
||||
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
|
||||
Request request, const std::function<void(Request*)>& mutation) {
|
||||
mutation(&request);
|
||||
SCOPED_TRACE(message + " [execute]");
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
|
||||
// so this is efficiently accomplished by moving the element to the end and
|
||||
// resizing the hidl_vec to one less.
|
||||
template <typename Type>
|
||||
static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
|
||||
if (vec) {
|
||||
std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
|
||||
vec->resize(vec->size() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
|
||||
// assume vec is valid
|
||||
const uint32_t index = vec->size();
|
||||
vec->resize(index + 1);
|
||||
(*vec)[index] = value;
|
||||
return index;
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE INPUT ////////////////////////////////////
|
||||
|
||||
static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
|
||||
for (size_t input = 0; input < request.inputs.size(); ++input) {
|
||||
const std::string message = "removeInput: removed input " + std::to_string(input);
|
||||
validate(preparedModel, message, request,
|
||||
[input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////// REMOVE OUTPUT ////////////////////////////////////
|
||||
|
||||
static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
|
||||
for (size_t output = 0; output < request.outputs.size(); ++output) {
|
||||
const std::string message = "removeOutput: removed Output " + std::to_string(output);
|
||||
validate(preparedModel, message, request,
|
||||
[output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////// ENTRY POINT //////////////////////////////////
|
||||
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) {
|
||||
const uint32_t INPUT = 0;
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
std::vector<Request> requests;
|
||||
|
||||
for (auto& example : examples) {
|
||||
const MixedTyped& inputs = example.first;
|
||||
const MixedTyped& outputs = example.second;
|
||||
|
||||
std::vector<RequestArgument> inputs_info, outputs_info;
|
||||
uint32_t inputSize = 0, outputSize = 0;
|
||||
|
||||
// This function only partially specifies the metadata (vector of RequestArguments).
|
||||
// The contents are copied over below.
|
||||
for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
|
||||
if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
|
||||
RequestArgument arg = {
|
||||
.location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
|
||||
.dimensions = {},
|
||||
};
|
||||
RequestArgument arg_empty = {
|
||||
.hasNoValue = true,
|
||||
};
|
||||
inputs_info[index] = s ? arg : arg_empty;
|
||||
inputSize += s;
|
||||
});
|
||||
// Compute offset for inputs 1 and so on
|
||||
{
|
||||
size_t offset = 0;
|
||||
for (auto& i : inputs_info) {
|
||||
if (!i.hasNoValue) i.location.offset = offset;
|
||||
offset += i.location.length;
|
||||
}
|
||||
}
|
||||
|
||||
// Go through all outputs, initialize RequestArgument descriptors
|
||||
for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
|
||||
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
|
||||
RequestArgument arg = {
|
||||
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
|
||||
.dimensions = {},
|
||||
};
|
||||
outputs_info[index] = arg;
|
||||
outputSize += s;
|
||||
});
|
||||
// Compute offset for outputs 1 and so on
|
||||
{
|
||||
size_t offset = 0;
|
||||
for (auto& i : outputs_info) {
|
||||
i.location.offset = offset;
|
||||
offset += i.location.length;
|
||||
}
|
||||
}
|
||||
std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
|
||||
nn::allocateSharedMemory(outputSize)};
|
||||
if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// map pool
|
||||
sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
|
||||
if (inputMemory == nullptr) {
|
||||
return {};
|
||||
}
|
||||
char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
|
||||
if (inputPtr == nullptr) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// initialize pool
|
||||
inputMemory->update();
|
||||
for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
|
||||
char* begin = (char*)p;
|
||||
char* end = begin + s;
|
||||
// TODO: handle more than one input
|
||||
std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
|
||||
});
|
||||
inputMemory->commit();
|
||||
|
||||
requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
|
||||
}
|
||||
|
||||
return requests;
|
||||
}
|
||||
|
||||
void ValidationTest::validateRequests(const V1_1::Model& model,
|
||||
const std::vector<Request>& requests) {
|
||||
// create IPreparedModel
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// validate each request
|
||||
for (const Request& request : requests) {
|
||||
removeInputTest(preparedModel, request);
|
||||
removeOutputTest(preparedModel, request);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
50
neuralnetworks/1.1/vts/functional/ValidationTests.cpp
Normal file
50
neuralnetworks/1.1/vts/functional/ValidationTests.cpp
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "Models.h"
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// forward declarations
|
||||
std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
|
||||
|
||||
// generate validation tests
|
||||
#define VTS_CURRENT_TEST_CASE(TestName) \
|
||||
TEST_F(ValidationTest, TestName) { \
|
||||
const Model model = TestName::createTestModel(); \
|
||||
const std::vector<Request> requests = createRequests(TestName::examples); \
|
||||
validateModel(model); \
|
||||
validateRequests(model, requests); \
|
||||
}
|
||||
|
||||
FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
|
||||
|
||||
#undef VTS_CURRENT_TEST_CASE
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -16,16 +16,7 @@
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_1.h"
|
||||
#include "Utils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
#include "VtsHalNeuralnetworks.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
@@ -34,11 +25,6 @@ namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
// allocator helper
|
||||
hidl_memory allocateSharedMemory(int64_t size) {
|
||||
return nn::allocateSharedMemory(size);
|
||||
}
|
||||
|
||||
// A class for test environment setup
|
||||
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
|
||||
|
||||
@@ -52,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlEnvironment::registerTestServices() {
|
||||
registerTestService<V1_1::IDevice>();
|
||||
registerTestService<IDevice>();
|
||||
}
|
||||
|
||||
// The main test class for NEURALNETWORK HIDL HAL.
|
||||
NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
|
||||
|
||||
NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
|
||||
|
||||
void NeuralnetworksHidlTest::SetUp() {
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<V1_1::IDevice>(
|
||||
::testing::VtsHalHidlTargetTestBase::SetUp();
|
||||
device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
|
||||
NeuralnetworksHidlEnvironment::getInstance());
|
||||
ASSERT_NE(nullptr, device.get());
|
||||
}
|
||||
|
||||
void NeuralnetworksHidlTest::TearDown() {}
|
||||
void NeuralnetworksHidlTest::TearDown() {
|
||||
device = nullptr;
|
||||
::testing::VtsHalHidlTargetTestBase::TearDown();
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
|
||||
return os << toString(deviceStatus);
|
||||
}
|
||||
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
|
||||
|
||||
int status = RUN_ALL_TESTS();
|
||||
return status;
|
||||
}
|
||||
@@ -17,65 +17,71 @@
|
||||
#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H
|
||||
#define VTS_HAL_NEURALNETWORKS_V1_1_H
|
||||
|
||||
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
|
||||
#include <android/hardware/neuralnetworks/1.0/types.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hidl/allocator/1.0/IAllocator.h>
|
||||
|
||||
#include <VtsHalHidlTargetTestBase.h>
|
||||
#include <VtsHalHidlTargetTestEnvBase.h>
|
||||
|
||||
#include <android-base/macros.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
|
||||
using V1_0::Request;
|
||||
using V1_0::DeviceStatus;
|
||||
using V1_0::ErrorStatus;
|
||||
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
hidl_memory allocateSharedMemory(int64_t size);
|
||||
|
||||
// A class for test environment setup
|
||||
class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
|
||||
NeuralnetworksHidlEnvironment();
|
||||
NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete;
|
||||
NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
|
||||
NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
|
||||
NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
|
||||
public:
|
||||
~NeuralnetworksHidlEnvironment() override;
|
||||
static NeuralnetworksHidlEnvironment* getInstance();
|
||||
void registerTestServices() override;
|
||||
};
|
||||
|
||||
// The main test class for NEURALNETWORKS HIDL HAL.
|
||||
class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
|
||||
DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
|
||||
|
||||
public:
|
||||
NeuralnetworksHidlTest();
|
||||
~NeuralnetworksHidlTest() override;
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
|
||||
sp<V1_1::IDevice> device;
|
||||
protected:
|
||||
sp<IDevice> device;
|
||||
};
|
||||
|
||||
// Tag for the validation tests
|
||||
class ValidationTest : public NeuralnetworksHidlTest {
|
||||
protected:
|
||||
void validateModel(const Model& model);
|
||||
void validateRequests(const Model& model, const std::vector<Request>& request);
|
||||
};
|
||||
|
||||
// Tag for the generated tests
|
||||
class GeneratedTest : public NeuralnetworksHidlTest {};
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
|
||||
// pretty-print values for error messages
|
||||
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
V1_0::ErrorStatus errorStatus) {
|
||||
return os << toString(errorStatus);
|
||||
}
|
||||
|
||||
template <typename CharT, typename Traits>
|
||||
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
|
||||
V1_0::DeviceStatus deviceStatus) {
|
||||
return os << toString(deviceStatus);
|
||||
}
|
||||
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
|
||||
::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
|
||||
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
@@ -1,305 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2018 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
||||
|
||||
#include "VtsHalNeuralnetworksV1_1.h"
|
||||
|
||||
#include "Callbacks.h"
|
||||
#include "Models.h"
|
||||
#include "TestHarness.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
|
||||
#include <android/hardware/neuralnetworks/1.1/types.h>
|
||||
#include <android/hidl/memory/1.0/IMemory.h>
|
||||
#include <hidlmemory/mapping.h>
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
|
||||
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
|
||||
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Operand;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
|
||||
using ::android::hardware::neuralnetworks::V1_0::OperandType;
|
||||
using ::android::hardware::neuralnetworks::V1_0::Request;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Capabilities;
|
||||
using ::android::hardware::neuralnetworks::V1_1::IDevice;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Model;
|
||||
using ::android::hardware::neuralnetworks::V1_1::Operation;
|
||||
using ::android::hardware::neuralnetworks::V1_1::OperationType;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hidl::allocator::V1_0::IAllocator;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using ::android::sp;
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace neuralnetworks {
|
||||
namespace V1_1 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
|
||||
|
||||
static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
|
||||
ASSERT_NE(nullptr, preparedModel);
|
||||
Model model = createValidTestModel_1_1();
|
||||
|
||||
// see if service can handle model
|
||||
bool fullySupportsModel = false;
|
||||
Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
|
||||
model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
ASSERT_EQ(ErrorStatus::NONE, status);
|
||||
ASSERT_NE(0ul, supported.size());
|
||||
fullySupportsModel =
|
||||
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
|
||||
});
|
||||
ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
|
||||
|
||||
// launch prepare model
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
// retrieve prepared model
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
*preparedModel = preparedModelCallback->getPreparedModel();
|
||||
|
||||
// The getSupportedOperations call returns a list of operations that are
|
||||
// guaranteed not to fail if prepareModel is called, and
|
||||
// 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
|
||||
// If a driver has any doubt that it can prepare an operation, it must
|
||||
// return false. So here, if a driver isn't sure if it can support an
|
||||
// operation, but reports that it successfully prepared the model, the test
|
||||
// can continue.
|
||||
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
|
||||
ASSERT_EQ(nullptr, preparedModel->get());
|
||||
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support.";
|
||||
std::cout << "[ ] Early termination of test because vendor service cannot "
|
||||
"prepare model that it does not support."
|
||||
<< std::endl;
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
|
||||
ASSERT_NE(nullptr, preparedModel->get());
|
||||
}
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
|
||||
// status test
|
||||
TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
||||
Return<DeviceStatus> status = device->getStatus();
|
||||
ASSERT_TRUE(status.isOk());
|
||||
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
|
||||
}
|
||||
|
||||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
Return<void> ret =
|
||||
device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
|
||||
EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
|
||||
Model model = createValidTestModel_1_1();
|
||||
Return<void> ret = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_EQ(model.operations.size(), supported.size());
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
|
||||
Model model = createInvalidTestModel1_1_1();
|
||||
Return<void> ret = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
(void)supported;
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// supported operations negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
|
||||
Model model = createInvalidTestModel2_1_1();
|
||||
Return<void> ret = device->getSupportedOperations_1_1(
|
||||
model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
||||
(void)supported;
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// prepare simple model positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
|
||||
sp<IPreparedModel> preparedModel;
|
||||
doPrepareModelShortcut(device, &preparedModel);
|
||||
}
|
||||
|
||||
// prepare simple model negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
|
||||
Model model = createInvalidTestModel1_1_1();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// prepare simple model negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
|
||||
Model model = createInvalidTestModel2_1_1();
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_1(model, preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
preparedModelCallback->wait();
|
||||
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
||||
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
|
||||
EXPECT_EQ(nullptr, preparedModel.get());
|
||||
}
|
||||
|
||||
// execute simple graph positive test
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
|
||||
std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
|
||||
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
|
||||
const uint32_t OUTPUT = 1;
|
||||
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
Request request = createValidTestRequest();
|
||||
|
||||
auto postWork = [&] {
|
||||
sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
|
||||
if (outputMemory == nullptr) {
|
||||
return false;
|
||||
}
|
||||
float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
|
||||
if (outputPtr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
outputMemory->read();
|
||||
std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
|
||||
outputMemory->commit();
|
||||
return true;
|
||||
};
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
executionCallback->on_finish(postWork);
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
|
||||
EXPECT_EQ(expectedData, outputData);
|
||||
}
|
||||
|
||||
// execute simple graph negative test 1
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
Request request = createInvalidTestRequest1();
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
// execute simple graph negative test 2
|
||||
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
|
||||
sp<IPreparedModel> preparedModel;
|
||||
ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
|
||||
if (preparedModel == nullptr) {
|
||||
return;
|
||||
}
|
||||
Request request = createInvalidTestRequest2();
|
||||
|
||||
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
|
||||
ASSERT_NE(nullptr, executionCallback.get());
|
||||
Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
|
||||
ASSERT_TRUE(executeLaunchStatus.isOk());
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
|
||||
|
||||
executionCallback->wait();
|
||||
ErrorStatus executionReturnStatus = executionCallback->getStatus();
|
||||
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
|
||||
}
|
||||
|
||||
} // namespace functional
|
||||
} // namespace vts
|
||||
} // namespace V1_1
|
||||
} // namespace neuralnetworks
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
|
||||
using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
|
||||
|
||||
int status = RUN_ALL_TESTS();
|
||||
return status;
|
||||
}
|
||||
Reference in New Issue
Block a user