mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 11:36:00 +00:00
Certain mutation testing -- mutateOperandLifeTimeTest and mutateOperandInputOutputTest -- can introduce potentially very large CONSTANT_COPY operands, which can in turn create potentially very large Models which must be passed across binder. To avoid overflowing the binder buffer, we estimate the size of the mutated Model, and skip the test if that size is too high. The old logic recognizes that our tests only have a single active binder transaction at a time, and assumes that there are no other clients using the same service at the same time, and so we should have the binder buffer to ourselves; to be conservative, we reject any Model whose estimated size exceeds half the binder buffer size. Unfortunately, sometimes the binder buffer still overflows, because it unexpectedly contains an allocation from some other transaction: It appears that binder buffer memory management is not serialized with respect to transactions from our tests, and therefore depending on scheduler behavior, there may be a sizeable allocation still in the buffer when we attempt to pass the large Model. To fix this problem we become even more conservative, and instead of limiting the Model to half the binder buffer size, we limit it to half IBinder.MAX_IPC_SIZE (the recommended transaction size limit). To confirm that this change does not exclude too many tests, I checked how may times the size filter function exceedsBinderSizeLimit is called, how many times it rejects a model under the new logic (modelsExceedHalfMaxIPCSize), and how many times it rejects a model under the old logic (modelsExceedHalfMaxIPCSize). Test: VtsHalNeuralnetworksV1_0TargetTest --gtest_filter=TestGenerated/ValidationTest.Test/*-*dsp* Test: # models = 3592, modelsExceedHalfMaxIPCSize = 212, modelsExceedHalfBufferSize = 18 Test: VtsHalNeuralnetworksV1_1TargetTest --gtest_filter=TestGenerated/ValidationTest.Test/*-*dsp* Test: # models = 7228, modelsExceedHalfMaxIPCSize = 330, modelsExceedHalfBufferSize = 28 Test: VtsHalNeuralnetworksV1_2TargetTest --gtest_filter=TestGenerated/ValidationTest.Test/*-*dsp* Test: # models = 52072, modelsExceedHalfMaxIPCSize = 506, modelsExceedHalfBufferSize = 28 Test: VtsHalNeuralnetworksV1_3TargetTest --gtest_filter=TestGenerated/ValidationTest.Test/*-*dsp* Test: # models = 73342, modelsExceedHalfMaxIPCSize = 568, modelsExceedHalfBufferSize = 28 Test: VtsHalNeuralnetworksTargetTest Bug: 227719657 Bug: 227719752 Bug: 231928847 Bug: 238777741 Bug: 242271308 Change-Id: I3f81d71ca3c0ad4c639096b1dc034a8909bc8971
1248 lines
55 KiB
C++
1248 lines
55 KiB
C++
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#define LOG_TAG "neuralnetworks_hidl_hal_test"
|
|
|
|
#include <android/hardware/neuralnetworks/1.1/types.h>
|
|
#include "1.0/Utils.h"
|
|
#include "1.2/Callbacks.h"
|
|
#include "1.2/Utils.h"
|
|
#include "GeneratedTestHarness.h"
|
|
#include "VtsHalNeuralnetworks.h"
|
|
|
|
#include <optional>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
|
|
|
|
using implementation::PreparedModelCallback;
|
|
using V1_0::DataLocation;
|
|
using V1_0::ErrorStatus;
|
|
using V1_0::OperandLifeTime;
|
|
using V1_1::ExecutionPreference;
|
|
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
|
|
|
using PrepareModelMutation = std::function<void(Model*, ExecutionPreference*)>;
|
|
|
|
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
|
|
|
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
|
|
const Model& model) {
|
|
SCOPED_TRACE(message + " [getSupportedOperations_1_2]");
|
|
|
|
Return<void> ret = device->getSupportedOperations_1_2(
|
|
model, [&](ErrorStatus status, const hidl_vec<bool>&) {
|
|
EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
|
|
});
|
|
EXPECT_TRUE(ret.isOk());
|
|
}
|
|
|
|
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
|
|
const Model& model, ExecutionPreference preference) {
|
|
SCOPED_TRACE(message + " [prepareModel_1_2]");
|
|
|
|
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
|
Return<ErrorStatus> prepareLaunchStatus =
|
|
device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
|
|
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
|
|
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
|
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
|
|
|
preparedModelCallback->wait();
|
|
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
|
|
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
|
|
sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
|
|
ASSERT_EQ(nullptr, preparedModel.get());
|
|
}
|
|
|
|
static bool validExecutionPreference(ExecutionPreference preference) {
|
|
return preference == ExecutionPreference::LOW_POWER ||
|
|
preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
|
|
preference == ExecutionPreference::SUSTAINED_SPEED;
|
|
}
|
|
|
|
// Primary validation function. This function will take a valid model, apply a
|
|
// mutation to invalidate either the model or the execution preference, then
|
|
// pass these to supportedOperations and/or prepareModel if that method is
|
|
// called with an invalid argument.
|
|
static void validate(const sp<IDevice>& device, const std::string& message,
|
|
const Model& originalModel, const PrepareModelMutation& mutate) {
|
|
Model model = originalModel;
|
|
ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER;
|
|
mutate(&model, &preference);
|
|
|
|
if (validExecutionPreference(preference)) {
|
|
validateGetSupportedOperations(device, message, model);
|
|
}
|
|
|
|
validatePrepareModel(device, message, model, preference);
|
|
}
|
|
|
|
static uint32_t addOperand(Model* model) {
|
|
return hidl_vec_push_back(&model->operands,
|
|
{
|
|
.type = OperandType::INT32,
|
|
.dimensions = {},
|
|
.numberOfConsumers = 0,
|
|
.scale = 0.0f,
|
|
.zeroPoint = 0,
|
|
.lifetime = OperandLifeTime::MODEL_INPUT,
|
|
.location = {.poolIndex = 0, .offset = 0, .length = 0},
|
|
});
|
|
}
|
|
|
|
static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
|
|
uint32_t index = addOperand(model);
|
|
model->operands[index].numberOfConsumers = 1;
|
|
model->operands[index].lifetime = lifetime;
|
|
return index;
|
|
}
|
|
|
|
// If we introduce a CONSTANT_COPY for an operand of size operandSize,
|
|
// how much will this increase the size of the model? This assumes
|
|
// that we can (re)use all of model.operandValues for the operand
|
|
// value.
|
|
static size_t constantCopyExtraSize(const Model& model, size_t operandSize) {
|
|
const size_t operandValuesSize = model.operandValues.size();
|
|
return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0;
|
|
}
|
|
|
|
// Highly specialized utility routine for converting an operand to
|
|
// CONSTANT_COPY lifetime.
|
|
//
|
|
// Expects that:
|
|
// - operand has a known size
|
|
// - operand->lifetime has already been set to CONSTANT_COPY
|
|
// - operand->location has been zeroed out
|
|
//
|
|
// Does the following:
|
|
// - initializes operand->location to point to the beginning of model->operandValues
|
|
// - resizes model->operandValues (if necessary) to be large enough for the operand
|
|
// value, padding it with zeroes on the end
|
|
//
|
|
// Potential problem:
|
|
// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the
|
|
// operand with unspecified (but deterministic) data. This means that the model may be invalidated
|
|
// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the
|
|
// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid
|
|
// value). For now, this should be fine because it just means we're not testing what we think we're
|
|
// testing in certain cases; but we can handwave this and assume we're probabilistically likely to
|
|
// exercise the validation code over the span of the entire test set and operand space.
|
|
//
|
|
// Aborts if the specified operand type is an extension type or OEM type.
|
|
static void becomeConstantCopy(Model* model, Operand* operand) {
|
|
// sizeOfData will abort if the specified type is an extension type or OEM type.
|
|
const size_t sizeOfOperand = sizeOfData(*operand);
|
|
EXPECT_NE(sizeOfOperand, size_t(0));
|
|
operand->location.poolIndex = 0;
|
|
operand->location.offset = 0;
|
|
operand->location.length = sizeOfOperand;
|
|
if (model->operandValues.size() < sizeOfOperand) {
|
|
model->operandValues.resize(sizeOfOperand);
|
|
}
|
|
}
|
|
|
|
// The sizeForBinder() functions estimate the size of the
|
|
// representation of a value when sent to binder. It's probably a bit
|
|
// of an under-estimate, because we don't know the size of the
|
|
// metadata in the binder format (e.g., representation of the size of
|
|
// a vector); but at least it adds up "big" things like vector
|
|
// contents. However, it doesn't treat inter-field or end-of-struct
|
|
// padding in a methodical way -- there's no attempt to be consistent
|
|
// in whether or not padding in the native (C++) representation
|
|
// contributes to the estimated size for the binder representation;
|
|
// and there's no attempt to understand what padding (if any) is
|
|
// needed in the binder representation.
|
|
//
|
|
// This assumes that non-metadata uses a fixed length encoding (e.g.,
|
|
// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than
|
|
// using an encoding whose length is related to the magnitude of the
|
|
// encoded value).
|
|
|
|
template <typename Type>
|
|
static size_t sizeForBinder(const Type& val) {
|
|
static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>,
|
|
"expected a trivially copyable type");
|
|
return sizeof(val);
|
|
}
|
|
|
|
template <typename Type>
|
|
static size_t sizeForBinder(const hidl_vec<Type>& vec) {
|
|
return std::accumulate(vec.begin(), vec.end(), 0,
|
|
[](size_t acc, const Type& x) { return acc + sizeForBinder(x); });
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const SymmPerChannelQuantParams& symmPerChannelQuantParams) {
|
|
size_t size = 0;
|
|
|
|
size += sizeForBinder(symmPerChannelQuantParams.scales);
|
|
size += sizeForBinder(symmPerChannelQuantParams.channelDim);
|
|
|
|
return size;
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const Operand::ExtraParams& extraParams) {
|
|
using Discriminator = Operand::ExtraParams::hidl_discriminator;
|
|
switch (extraParams.getDiscriminator()) {
|
|
case Discriminator::none:
|
|
return 0;
|
|
case Discriminator::channelQuant:
|
|
return sizeForBinder(extraParams.channelQuant());
|
|
case Discriminator::extension:
|
|
return sizeForBinder(extraParams.extension());
|
|
}
|
|
LOG(FATAL) << "Unrecognized extraParams enum: "
|
|
<< static_cast<int>(extraParams.getDiscriminator());
|
|
return 0;
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const Operand& operand) {
|
|
size_t size = 0;
|
|
|
|
size += sizeForBinder(operand.type);
|
|
size += sizeForBinder(operand.dimensions);
|
|
size += sizeForBinder(operand.numberOfConsumers);
|
|
size += sizeForBinder(operand.scale);
|
|
size += sizeForBinder(operand.zeroPoint);
|
|
size += sizeForBinder(operand.lifetime);
|
|
size += sizeForBinder(operand.location);
|
|
size += sizeForBinder(operand.extraParams);
|
|
|
|
return size;
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const Operation& operation) {
|
|
size_t size = 0;
|
|
|
|
size += sizeForBinder(operation.type);
|
|
size += sizeForBinder(operation.inputs);
|
|
size += sizeForBinder(operation.outputs);
|
|
|
|
return size;
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const hidl_string& name) {
|
|
return name.size();
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const hidl_memory& memory) {
|
|
// This is just a guess.
|
|
|
|
size_t size = 0;
|
|
|
|
if (const native_handle_t* handle = memory.handle()) {
|
|
size += sizeof(*handle);
|
|
size += sizeof(handle->data[0] * (handle->numFds + handle->numInts));
|
|
}
|
|
size += sizeForBinder(memory.name());
|
|
|
|
return size;
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const Model::ExtensionNameAndPrefix& extensionNameToPrefix) {
|
|
size_t size = 0;
|
|
|
|
size += sizeForBinder(extensionNameToPrefix.name);
|
|
size += sizeForBinder(extensionNameToPrefix.prefix);
|
|
|
|
return size;
|
|
}
|
|
|
|
template <>
|
|
size_t sizeForBinder(const Model& model) {
|
|
size_t size = 0;
|
|
|
|
size += sizeForBinder(model.operands);
|
|
size += sizeForBinder(model.operations);
|
|
size += sizeForBinder(model.inputIndexes);
|
|
size += sizeForBinder(model.outputIndexes);
|
|
size += sizeForBinder(model.operandValues);
|
|
size += sizeForBinder(model.pools);
|
|
size += sizeForBinder(model.relaxComputationFloat32toFloat16);
|
|
size += sizeForBinder(model.extensionNameToPrefix);
|
|
|
|
return size;
|
|
}
|
|
|
|
// https://developer.android.com/reference/android/os/TransactionTooLargeException.html
|
|
//
|
|
// "The Binder transaction buffer has a limited fixed size,
|
|
// currently 1Mb, which is shared by all transactions in progress
|
|
// for the process."
|
|
//
|
|
// Will our representation fit under this limit? There are three complications:
|
|
// - Our representation size is just approximate (see sizeForBinder()).
|
|
// - This object may not be the only occupant of the Binder transaction buffer
|
|
// (although our VTS test suite should not be putting multiple objects in the
|
|
// buffer at once).
|
|
// - IBinder.MAX_IPC_SIZE recommends limiting a transaction to 64 * 1024 bytes.
|
|
// So we'll be very conservative: We want the representation size to be no
|
|
// larger than half the recommended limit.
|
|
//
|
|
// If our representation grows large enough that it still fits within
|
|
// the transaction buffer but combined with other transactions may
|
|
// exceed the buffer size, then we may see intermittent HAL transport
|
|
// errors.
|
|
static bool exceedsBinderSizeLimit(size_t representationSize) {
|
|
// There is no C++ API to retrieve the value of the Java variable IBinder.MAX_IPC_SIZE.
|
|
static const size_t kHalfMaxIPCSize = 64 * 1024 / 2;
|
|
|
|
return representationSize > kHalfMaxIPCSize;
|
|
}
|
|
|
|
///////////////////////// VALIDATE EXECUTION ORDER ////////////////////////////
|
|
|
|
static void mutateExecutionOrderTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
const Operation& operationObj = model.operations[operation];
|
|
for (uint32_t input : operationObj.inputs) {
|
|
if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
|
|
model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) {
|
|
// This operation reads an operand written by some
|
|
// other operation. Move this operation to the
|
|
// beginning of the sequence, ensuring that it reads
|
|
// the operand before that operand is written, thereby
|
|
// violating execution order rules.
|
|
const std::string message = "mutateExecutionOrderTest: operation " +
|
|
std::to_string(operation) + " is a reader";
|
|
validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
|
|
auto& operations = model->operations;
|
|
std::rotate(operations.begin(), operations.begin() + operation,
|
|
operations.begin() + operation + 1);
|
|
});
|
|
break; // only need to do this once per operation
|
|
}
|
|
}
|
|
for (uint32_t output : operationObj.outputs) {
|
|
if (model.operands[output].numberOfConsumers > 0) {
|
|
// This operation writes an operand read by some other
|
|
// operation. Move this operation to the end of the
|
|
// sequence, ensuring that it writes the operand after
|
|
// that operand is read, thereby violating execution
|
|
// order rules.
|
|
const std::string message = "mutateExecutionOrderTest: operation " +
|
|
std::to_string(operation) + " is a writer";
|
|
validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
|
|
auto& operations = model->operations;
|
|
std::rotate(operations.begin() + operation, operations.begin() + operation + 1,
|
|
operations.end());
|
|
});
|
|
break; // only need to do this once per operation
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
|
|
|
|
static const uint32_t invalidOperandTypes[] = {
|
|
static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN) - 1,
|
|
static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX) + 1,
|
|
static_cast<uint32_t>(OperandTypeRange::OEM_MIN) - 1,
|
|
static_cast<uint32_t>(OperandTypeRange::OEM_MAX) + 1,
|
|
};
|
|
|
|
static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
for (uint32_t invalidOperandType : invalidOperandTypes) {
|
|
const std::string message = "mutateOperandTypeTest: operand " +
|
|
std::to_string(operand) + " set to value " +
|
|
std::to_string(invalidOperandType);
|
|
validate(device, message, model,
|
|
[operand, invalidOperandType](Model* model, ExecutionPreference*) {
|
|
model->operands[operand].type =
|
|
static_cast<OperandType>(invalidOperandType);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND RANK /////////////////////////
|
|
|
|
static uint32_t getInvalidRank(OperandType type) {
|
|
switch (type) {
|
|
case OperandType::FLOAT16:
|
|
case OperandType::FLOAT32:
|
|
case OperandType::INT32:
|
|
case OperandType::UINT32:
|
|
case OperandType::BOOL:
|
|
return 1;
|
|
case OperandType::TENSOR_BOOL8:
|
|
case OperandType::TENSOR_FLOAT16:
|
|
case OperandType::TENSOR_FLOAT32:
|
|
case OperandType::TENSOR_INT32:
|
|
case OperandType::TENSOR_QUANT8_ASYMM:
|
|
case OperandType::TENSOR_QUANT8_SYMM:
|
|
case OperandType::TENSOR_QUANT16_ASYMM:
|
|
case OperandType::TENSOR_QUANT16_SYMM:
|
|
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
|
|
return 0;
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
|
|
if (invalidRank == 0) {
|
|
continue;
|
|
}
|
|
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
|
|
" has rank of " + std::to_string(invalidRank);
|
|
validate(device, message, model,
|
|
[operand, invalidRank](Model* model, ExecutionPreference*) {
|
|
model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
|
|
});
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND SCALE /////////////////////////
|
|
|
|
static float getInvalidScale(OperandType type) {
|
|
switch (type) {
|
|
case OperandType::FLOAT16:
|
|
case OperandType::FLOAT32:
|
|
case OperandType::INT32:
|
|
case OperandType::UINT32:
|
|
case OperandType::BOOL:
|
|
case OperandType::TENSOR_BOOL8:
|
|
case OperandType::TENSOR_FLOAT16:
|
|
case OperandType::TENSOR_FLOAT32:
|
|
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
|
|
return 1.0f;
|
|
case OperandType::TENSOR_INT32:
|
|
return -1.0f;
|
|
case OperandType::TENSOR_QUANT8_SYMM:
|
|
case OperandType::TENSOR_QUANT8_ASYMM:
|
|
case OperandType::TENSOR_QUANT16_ASYMM:
|
|
case OperandType::TENSOR_QUANT16_SYMM:
|
|
return 0.0f;
|
|
default:
|
|
return 0.0f;
|
|
}
|
|
}
|
|
|
|
static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
const float invalidScale = getInvalidScale(model.operands[operand].type);
|
|
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
|
|
" has scale of " + std::to_string(invalidScale);
|
|
validate(device, message, model,
|
|
[operand, invalidScale](Model* model, ExecutionPreference*) {
|
|
model->operands[operand].scale = invalidScale;
|
|
});
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND ZERO POINT /////////////////////////
|
|
|
|
static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
|
|
switch (type) {
|
|
case OperandType::FLOAT16:
|
|
case OperandType::FLOAT32:
|
|
case OperandType::INT32:
|
|
case OperandType::UINT32:
|
|
case OperandType::BOOL:
|
|
case OperandType::TENSOR_BOOL8:
|
|
case OperandType::TENSOR_FLOAT16:
|
|
case OperandType::TENSOR_FLOAT32:
|
|
case OperandType::TENSOR_INT32:
|
|
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
|
|
return {1};
|
|
case OperandType::TENSOR_QUANT8_ASYMM:
|
|
return {-1, 256};
|
|
case OperandType::TENSOR_QUANT8_SYMM:
|
|
return {-129, -1, 1, 128};
|
|
case OperandType::TENSOR_QUANT16_ASYMM:
|
|
return {-1, 65536};
|
|
case OperandType::TENSOR_QUANT16_SYMM:
|
|
return {-32769, -1, 1, 32768};
|
|
default:
|
|
return {};
|
|
}
|
|
}
|
|
|
|
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
const std::vector<int32_t> invalidZeroPoints =
|
|
getInvalidZeroPoints(model.operands[operand].type);
|
|
for (int32_t invalidZeroPoint : invalidZeroPoints) {
|
|
const std::string message = "mutateOperandZeroPointTest: operand " +
|
|
std::to_string(operand) + " has zero point of " +
|
|
std::to_string(invalidZeroPoint);
|
|
validate(device, message, model,
|
|
[operand, invalidZeroPoint](Model* model, ExecutionPreference*) {
|
|
model->operands[operand].zeroPoint = invalidZeroPoint;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND LIFETIME /////////////////////////////////////////////
|
|
|
|
static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize,
|
|
const Operand& operand) {
|
|
// TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime
|
|
// TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime
|
|
|
|
// Ways to get an invalid lifetime:
|
|
// - change whether a lifetime means an operand should have a writer
|
|
std::vector<OperandLifeTime> ret;
|
|
switch (operand.lifetime) {
|
|
case OperandLifeTime::MODEL_OUTPUT:
|
|
case OperandLifeTime::TEMPORARY_VARIABLE:
|
|
ret = {
|
|
OperandLifeTime::MODEL_INPUT,
|
|
OperandLifeTime::CONSTANT_COPY,
|
|
};
|
|
break;
|
|
case OperandLifeTime::CONSTANT_COPY:
|
|
case OperandLifeTime::CONSTANT_REFERENCE:
|
|
case OperandLifeTime::MODEL_INPUT:
|
|
ret = {
|
|
OperandLifeTime::TEMPORARY_VARIABLE,
|
|
OperandLifeTime::MODEL_OUTPUT,
|
|
};
|
|
break;
|
|
case OperandLifeTime::NO_VALUE:
|
|
// Not enough information to know whether
|
|
// TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid --
|
|
// is this operand written (then CONSTANT_COPY would be
|
|
// invalid) or not (then TEMPORARY_VARIABLE would be
|
|
// invalid)?
|
|
break;
|
|
default:
|
|
ADD_FAILURE();
|
|
break;
|
|
}
|
|
|
|
const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
|
|
if (!operandSize ||
|
|
exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
|
|
// Unknown size or too-large size
|
|
ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end());
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const Model& model) {
|
|
const size_t modelSize = sizeForBinder(model);
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
const std::vector<OperandLifeTime> invalidLifeTimes =
|
|
getInvalidLifeTimes(model, modelSize, model.operands[operand]);
|
|
for (OperandLifeTime invalidLifeTime : invalidLifeTimes) {
|
|
const std::string message = "mutateOperandLifetimeTest: operand " +
|
|
std::to_string(operand) + " has lifetime " +
|
|
toString(invalidLifeTime) + " instead of lifetime " +
|
|
toString(model.operands[operand].lifetime);
|
|
validate(device, message, model,
|
|
[operand, invalidLifeTime](Model* model, ExecutionPreference*) {
|
|
static const DataLocation kZeroDataLocation = {};
|
|
Operand& operandObj = model->operands[operand];
|
|
switch (operandObj.lifetime) {
|
|
case OperandLifeTime::MODEL_INPUT: {
|
|
hidl_vec_remove(&model->inputIndexes, uint32_t(operand));
|
|
break;
|
|
}
|
|
case OperandLifeTime::MODEL_OUTPUT: {
|
|
hidl_vec_remove(&model->outputIndexes, uint32_t(operand));
|
|
break;
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
operandObj.lifetime = invalidLifeTime;
|
|
operandObj.location = kZeroDataLocation;
|
|
switch (invalidLifeTime) {
|
|
case OperandLifeTime::CONSTANT_COPY: {
|
|
becomeConstantCopy(model, &operandObj);
|
|
break;
|
|
}
|
|
case OperandLifeTime::MODEL_INPUT:
|
|
hidl_vec_push_back(&model->inputIndexes, uint32_t(operand));
|
|
break;
|
|
case OperandLifeTime::MODEL_OUTPUT:
|
|
hidl_vec_push_back(&model->outputIndexes, uint32_t(operand));
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT //////////////////////////////////////
|
|
|
|
static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize,
|
|
const Operand& operand) {
|
|
// Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes):
|
|
// - change whether a lifetime means an operand is a model input, a model output, or neither
|
|
// - preserve whether or not a lifetime means an operand should have a writer
|
|
switch (operand.lifetime) {
|
|
case OperandLifeTime::CONSTANT_COPY:
|
|
case OperandLifeTime::CONSTANT_REFERENCE:
|
|
return OperandLifeTime::MODEL_INPUT;
|
|
case OperandLifeTime::MODEL_INPUT: {
|
|
const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
|
|
if (!operandSize ||
|
|
exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
|
|
// Unknown size or too-large size
|
|
break;
|
|
}
|
|
return OperandLifeTime::CONSTANT_COPY;
|
|
}
|
|
case OperandLifeTime::MODEL_OUTPUT:
|
|
return OperandLifeTime::TEMPORARY_VARIABLE;
|
|
case OperandLifeTime::TEMPORARY_VARIABLE:
|
|
return OperandLifeTime::MODEL_OUTPUT;
|
|
case OperandLifeTime::NO_VALUE:
|
|
// Not enough information to know whether
|
|
// TEMPORARY_VARIABLE or CONSTANT_COPY would be an
|
|
// appropriate choice -- is this operand written (then
|
|
// TEMPORARY_VARIABLE would be appropriate) or not (then
|
|
// CONSTANT_COPY would be appropriate)?
|
|
break;
|
|
default:
|
|
ADD_FAILURE();
|
|
break;
|
|
}
|
|
|
|
return std::nullopt;
|
|
}
|
|
|
|
static void mutateOperandInputOutputTest(const sp<IDevice>& device, const Model& model) {
|
|
const size_t modelSize = sizeForBinder(model);
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
const std::optional<OperandLifeTime> changedLifeTime =
|
|
getInputOutputLifeTime(model, modelSize, model.operands[operand]);
|
|
if (changedLifeTime) {
|
|
const std::string message = "mutateOperandInputOutputTest: operand " +
|
|
std::to_string(operand) + " has lifetime " +
|
|
toString(*changedLifeTime) + " instead of lifetime " +
|
|
toString(model.operands[operand].lifetime);
|
|
validate(device, message, model,
|
|
[operand, changedLifeTime](Model* model, ExecutionPreference*) {
|
|
static const DataLocation kZeroDataLocation = {};
|
|
Operand& operandObj = model->operands[operand];
|
|
operandObj.lifetime = *changedLifeTime;
|
|
operandObj.location = kZeroDataLocation;
|
|
if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) {
|
|
becomeConstantCopy(model, &operandObj);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS //////////////////////////////////
|
|
|
|
static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) {
|
|
if (numberOfConsumers == 0) {
|
|
return {1};
|
|
} else {
|
|
return {numberOfConsumers - 1, numberOfConsumers + 1};
|
|
}
|
|
}
|
|
|
|
static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
const std::vector<uint32_t> invalidNumberOfConsumersVec =
|
|
getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers);
|
|
for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) {
|
|
const std::string message =
|
|
"mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) +
|
|
" numberOfConsumers = " + std::to_string(invalidNumberOfConsumers);
|
|
validate(device, message, model,
|
|
[operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*) {
|
|
model->operands[operand].numberOfConsumers = invalidNumberOfConsumers;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS ////////////////////////////////////
|
|
|
|
static void mutateOperandAddWriterTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size();
|
|
++badOutputNum) {
|
|
const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum];
|
|
const std::string message = "mutateOperandAddWriterTest: operation " +
|
|
std::to_string(operation) + " writes to " +
|
|
std::to_string(outputOperandIndex);
|
|
// We'll insert a copy of the operation, all of whose
|
|
// OTHER output operands are newly-created -- i.e.,
|
|
// there'll only be a duplicate write of ONE of that
|
|
// operation's output operands.
|
|
validate(device, message, model,
|
|
[operation, badOutputNum](Model* model, ExecutionPreference*) {
|
|
Operation newOperation = model->operations[operation];
|
|
for (uint32_t input : newOperation.inputs) {
|
|
++model->operands[input].numberOfConsumers;
|
|
}
|
|
for (size_t outputNum = 0; outputNum < newOperation.outputs.size();
|
|
++outputNum) {
|
|
if (outputNum == badOutputNum) continue;
|
|
|
|
Operand operandValue =
|
|
model->operands[newOperation.outputs[outputNum]];
|
|
operandValue.numberOfConsumers = 0;
|
|
if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
|
|
operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
|
|
} else {
|
|
ASSERT_EQ(operandValue.lifetime,
|
|
OperandLifeTime::TEMPORARY_VARIABLE);
|
|
}
|
|
newOperation.outputs[outputNum] =
|
|
hidl_vec_push_back(&model->operands, operandValue);
|
|
}
|
|
// Where do we insert the extra writer (a new
|
|
// operation)? It has to be later than all the
|
|
// writers of its inputs. The easiest thing to do
|
|
// is to insert it at the end of the operation
|
|
// sequence.
|
|
hidl_vec_push_back(&model->operations, newOperation);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
|
|
|
|
// TODO: Operand::location
|
|
|
|
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
|
|
|
|
static void mutateOperand(Operand* operand, OperandType type) {
|
|
Operand newOperand = *operand;
|
|
newOperand.type = type;
|
|
switch (type) {
|
|
case OperandType::FLOAT16:
|
|
case OperandType::FLOAT32:
|
|
case OperandType::INT32:
|
|
case OperandType::UINT32:
|
|
case OperandType::BOOL:
|
|
newOperand.dimensions = hidl_vec<uint32_t>();
|
|
newOperand.scale = 0.0f;
|
|
newOperand.zeroPoint = 0;
|
|
break;
|
|
case OperandType::TENSOR_BOOL8:
|
|
case OperandType::TENSOR_FLOAT16:
|
|
case OperandType::TENSOR_FLOAT32:
|
|
newOperand.dimensions =
|
|
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
|
newOperand.scale = 0.0f;
|
|
newOperand.zeroPoint = 0;
|
|
break;
|
|
case OperandType::TENSOR_INT32:
|
|
newOperand.dimensions =
|
|
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
|
newOperand.zeroPoint = 0;
|
|
break;
|
|
case OperandType::TENSOR_QUANT8_ASYMM:
|
|
case OperandType::TENSOR_QUANT8_SYMM:
|
|
case OperandType::TENSOR_QUANT16_ASYMM:
|
|
case OperandType::TENSOR_QUANT16_SYMM:
|
|
newOperand.dimensions =
|
|
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
|
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
|
|
break;
|
|
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
|
|
newOperand.dimensions =
|
|
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
|
|
newOperand.scale = 0.0f;
|
|
newOperand.zeroPoint = 0;
|
|
|
|
SymmPerChannelQuantParams channelQuant;
|
|
channelQuant.channelDim = 0;
|
|
channelQuant.scales = hidl_vec<float>(
|
|
operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0])
|
|
: 0);
|
|
for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
|
|
channelQuant.scales[i] = 1.0f;
|
|
}
|
|
newOperand.extraParams.channelQuant(std::move(channelQuant));
|
|
} break;
|
|
case OperandType::OEM:
|
|
case OperandType::TENSOR_OEM_BYTE:
|
|
default:
|
|
break;
|
|
}
|
|
*operand = newOperand;
|
|
}
|
|
|
|
static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) {
|
|
// Do not test OEM types
|
|
if (type == model.operands[operand].type || type == OperandType::OEM ||
|
|
type == OperandType::TENSOR_OEM_BYTE) {
|
|
return true;
|
|
}
|
|
for (const Operation& operation : model.operations) {
|
|
// Skip mutateOperationOperandTypeTest for the following operations.
|
|
// - LSH_PROJECTION's second argument is allowed to have any type.
|
|
// - ARGMIN and ARGMAX's first argument can be any of
|
|
// TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
|
|
// - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
|
|
// - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
|
|
// - DEQUANTIZE input can be any of
|
|
// TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can
|
|
// be of either TENSOR_FLOAT16 or TENSOR_FLOAT32.
|
|
// - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32
|
|
// - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
|
|
// - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
|
|
// - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
|
|
// - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
|
|
switch (operation.type) {
|
|
case OperationType::LSH_PROJECTION: {
|
|
if (operand == operation.inputs[1]) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::CAST:
|
|
case OperationType::ARGMAX:
|
|
case OperationType::ARGMIN: {
|
|
if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 ||
|
|
type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::QUANTIZE:
|
|
case OperationType::RANDOM_MULTINOMIAL: {
|
|
if (operand == operation.inputs[0] &&
|
|
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::DEQUANTIZE: {
|
|
if (operand == operation.inputs[0] &&
|
|
(type == OperandType::TENSOR_QUANT8_ASYMM ||
|
|
type == OperandType::TENSOR_QUANT8_SYMM ||
|
|
type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
|
|
return true;
|
|
}
|
|
if (operand == operation.outputs[0] &&
|
|
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::TRANSPOSE_CONV_2D:
|
|
case OperationType::GROUPED_CONV_2D:
|
|
case OperationType::DEPTHWISE_CONV_2D:
|
|
case OperationType::CONV_2D: {
|
|
if (operand == operation.inputs[1] &&
|
|
(type == OperandType::TENSOR_QUANT8_ASYMM ||
|
|
type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
|
|
return true;
|
|
}
|
|
} break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) {
|
|
if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) {
|
|
continue;
|
|
}
|
|
const std::string message = "mutateOperationOperandTypeTest: operand " +
|
|
std::to_string(operand) + " set to type " +
|
|
toString(invalidOperandType);
|
|
validate(device, message, model,
|
|
[operand, invalidOperandType](Model* model, ExecutionPreference*) {
|
|
mutateOperand(&model->operands[operand], invalidOperandType);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
|
|
|
|
static const uint32_t invalidOperationTypes[] = {
|
|
static_cast<uint32_t>(OperationTypeRange::FUNDAMENTAL_MAX) + 1,
|
|
static_cast<uint32_t>(OperationTypeRange::OEM_MIN) - 1,
|
|
static_cast<uint32_t>(OperationTypeRange::OEM_MAX) + 1,
|
|
};
|
|
|
|
static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
for (uint32_t invalidOperationType : invalidOperationTypes) {
|
|
const std::string message = "mutateOperationTypeTest: operation " +
|
|
std::to_string(operation) + " set to value " +
|
|
std::to_string(invalidOperationType);
|
|
validate(device, message, model,
|
|
[operation, invalidOperationType](Model* model, ExecutionPreference*) {
|
|
model->operations[operation].type =
|
|
static_cast<OperationType>(invalidOperationType);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
|
|
|
|
static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
const uint32_t invalidOperand = model.operands.size();
|
|
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
|
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
|
|
std::to_string(operation) + " input " +
|
|
std::to_string(input);
|
|
validate(device, message, model,
|
|
[operation, input, invalidOperand](Model* model, ExecutionPreference*) {
|
|
model->operations[operation].inputs[input] = invalidOperand;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
|
|
|
|
static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
const uint32_t invalidOperand = model.operands.size();
|
|
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
|
|
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
|
|
std::to_string(operation) + " output " +
|
|
std::to_string(output);
|
|
validate(device, message, model,
|
|
[operation, output, invalidOperand](Model* model, ExecutionPreference*) {
|
|
model->operations[operation].outputs[output] = invalidOperand;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE MODEL OPERANDS WRITTEN ///////////////////////////////////////
|
|
|
|
static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size();
|
|
++outputNum) {
|
|
const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum];
|
|
if (model.operands[outputOperandIndex].numberOfConsumers > 0) {
|
|
const std::string message = "mutateOperationRemoveWriteTest: operation " +
|
|
std::to_string(operation) + " writes to " +
|
|
std::to_string(outputOperandIndex);
|
|
validate(device, message, model,
|
|
[operation, outputNum](Model* model, ExecutionPreference*) {
|
|
uint32_t& outputOperandIndex =
|
|
model->operations[operation].outputs[outputNum];
|
|
Operand operandValue = model->operands[outputOperandIndex];
|
|
operandValue.numberOfConsumers = 0;
|
|
if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
|
|
operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
|
|
} else {
|
|
ASSERT_EQ(operandValue.lifetime,
|
|
OperandLifeTime::TEMPORARY_VARIABLE);
|
|
}
|
|
outputOperandIndex =
|
|
hidl_vec_push_back(&model->operands, operandValue);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
|
|
|
|
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
|
|
if (vec) {
|
|
// remove elements matching "value"
|
|
auto last = std::remove(vec->begin(), vec->end(), value);
|
|
vec->resize(std::distance(vec->begin(), last));
|
|
|
|
// decrement elements exceeding "value"
|
|
std::transform(vec->begin(), vec->end(), vec->begin(),
|
|
[value](uint32_t v) { return v > value ? v-- : v; });
|
|
}
|
|
}
|
|
|
|
static void removeOperand(Model* model, uint32_t index) {
|
|
hidl_vec_removeAt(&model->operands, index);
|
|
for (Operation& operation : model->operations) {
|
|
removeValueAndDecrementGreaterValues(&operation.inputs, index);
|
|
removeValueAndDecrementGreaterValues(&operation.outputs, index);
|
|
}
|
|
removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
|
|
removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
|
|
}
|
|
|
|
static bool removeOperandSkip(size_t operand, const Model& model) {
|
|
for (const Operation& operation : model.operations) {
|
|
// Skip removeOperandTest for the following operations.
|
|
// - SPLIT's outputs are not checked during prepareModel.
|
|
if (operation.type == OperationType::SPLIT) {
|
|
for (const size_t outOprand : operation.outputs) {
|
|
if (operand == outOprand) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
// BIDIRECTIONAL_SEQUENCE_LSTM and BIDIRECTIONAL_SEQUENCE_RNN can have either one or two
|
|
// outputs depending on their mergeOutputs parameter.
|
|
if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_LSTM ||
|
|
operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) {
|
|
for (const size_t outOprand : operation.outputs) {
|
|
if (operand == outOprand) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
|
|
if (removeOperandSkip(operand, model)) {
|
|
continue;
|
|
}
|
|
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
|
|
validate(device, message, model,
|
|
[operand](Model* model, ExecutionPreference*) { removeOperand(model, operand); });
|
|
}
|
|
}
|
|
|
|
///////////////////////// REMOVE OPERATION /////////////////////////
|
|
|
|
static void removeOperation(Model* model, uint32_t index) {
|
|
for (uint32_t operand : model->operations[index].inputs) {
|
|
model->operands[operand].numberOfConsumers--;
|
|
}
|
|
hidl_vec_removeAt(&model->operations, index);
|
|
}
|
|
|
|
static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
|
|
validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
|
|
removeOperation(model, operation);
|
|
});
|
|
}
|
|
}
|
|
|
|
///////////////////////// REMOVE OPERATION INPUT /////////////////////////
|
|
|
|
static bool removeOperationInputSkip(const Operation& op, size_t input) {
|
|
// Skip removeOperationInputTest for the following operations.
|
|
// - CONCATENATION has at least 2 inputs, with the last element being INT32.
|
|
// - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR,
|
|
// SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional
|
|
// layout parameter.
|
|
// - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis
|
|
// parameter.
|
|
switch (op.type) {
|
|
case OperationType::CONCATENATION: {
|
|
if (op.inputs.size() > 2 && input != op.inputs.size() - 1) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::DEPTHWISE_CONV_2D: {
|
|
if ((op.inputs.size() == 12 && input == 11) || (op.inputs.size() == 9 && input == 8)) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::CONV_2D:
|
|
case OperationType::AVERAGE_POOL_2D:
|
|
case OperationType::MAX_POOL_2D:
|
|
case OperationType::L2_POOL_2D: {
|
|
if ((op.inputs.size() == 11 && input == 10) || (op.inputs.size() == 8 && input == 7)) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::RESIZE_BILINEAR: {
|
|
if (op.inputs.size() == 4 && input == 3) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::SPACE_TO_DEPTH:
|
|
case OperationType::DEPTH_TO_SPACE:
|
|
case OperationType::BATCH_TO_SPACE_ND: {
|
|
if (op.inputs.size() == 3 && input == 2) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::SPACE_TO_BATCH_ND: {
|
|
if (op.inputs.size() == 4 && input == 3) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::L2_NORMALIZATION: {
|
|
if (op.inputs.size() == 2 && input == 1) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::LOCAL_RESPONSE_NORMALIZATION: {
|
|
if (op.inputs.size() == 6 && input == 5) {
|
|
return true;
|
|
}
|
|
} break;
|
|
case OperationType::SOFTMAX: {
|
|
if (op.inputs.size() == 3 && input == 2) {
|
|
return true;
|
|
}
|
|
} break;
|
|
default:
|
|
break;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
|
|
const Operation& op = model.operations[operation];
|
|
if (removeOperationInputSkip(op, input)) {
|
|
continue;
|
|
}
|
|
const std::string message = "removeOperationInputTest: operation " +
|
|
std::to_string(operation) + ", input " +
|
|
std::to_string(input);
|
|
validate(device, message, model,
|
|
[operation, input](Model* model, ExecutionPreference*) {
|
|
uint32_t operand = model->operations[operation].inputs[input];
|
|
model->operands[operand].numberOfConsumers--;
|
|
hidl_vec_removeAt(&model->operations[operation].inputs, input);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
|
|
|
|
static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
|
|
const std::string message = "removeOperationOutputTest: operation " +
|
|
std::to_string(operation) + ", output " +
|
|
std::to_string(output);
|
|
validate(device, message, model,
|
|
[operation, output](Model* model, ExecutionPreference*) {
|
|
hidl_vec_removeAt(&model->operations[operation].outputs, output);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////// MODEL VALIDATION /////////////////////////
|
|
|
|
// TODO: remove model input
|
|
// TODO: remove model output
|
|
// TODO: add unused operation
|
|
|
|
///////////////////////// ADD OPERATION INPUT /////////////////////////
|
|
|
|
static bool addOperationInputSkip(const Operation& op) {
|
|
// Skip addOperationInputTest for the following operations.
|
|
// - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional INT32 axis
|
|
// parameter.
|
|
if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) ||
|
|
(op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) ||
|
|
(op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) {
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
if (addOperationInputSkip(model.operations[operation])) {
|
|
continue;
|
|
}
|
|
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
|
|
validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
|
|
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
|
|
hidl_vec_push_back(&model->operations[operation].inputs, index);
|
|
hidl_vec_push_back(&model->inputIndexes, index);
|
|
});
|
|
}
|
|
}
|
|
|
|
///////////////////////// ADD OPERATION OUTPUT /////////////////////////
|
|
|
|
static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
|
|
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
|
|
const std::string message =
|
|
"addOperationOutputTest: operation " + std::to_string(operation);
|
|
validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
|
|
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
|
|
hidl_vec_push_back(&model->operations[operation].outputs, index);
|
|
hidl_vec_push_back(&model->outputIndexes, index);
|
|
});
|
|
}
|
|
}
|
|
|
|
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
|
|
|
|
static const int32_t invalidExecutionPreferences[] = {
|
|
static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
|
|
static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
|
|
};
|
|
|
|
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
|
|
for (int32_t invalidPreference : invalidExecutionPreferences) {
|
|
const std::string message =
|
|
"mutateExecutionPreferenceTest: preference " + std::to_string(invalidPreference);
|
|
validate(device, message, model,
|
|
[invalidPreference](Model*, ExecutionPreference* preference) {
|
|
*preference = static_cast<ExecutionPreference>(invalidPreference);
|
|
});
|
|
}
|
|
}
|
|
|
|
////////////////////////// ENTRY POINT //////////////////////////////
|
|
|
|
void validateModel(const sp<IDevice>& device, const Model& model) {
|
|
mutateExecutionOrderTest(device, model);
|
|
mutateOperandTypeTest(device, model);
|
|
mutateOperandRankTest(device, model);
|
|
mutateOperandScaleTest(device, model);
|
|
mutateOperandZeroPointTest(device, model);
|
|
mutateOperandLifeTimeTest(device, model);
|
|
mutateOperandInputOutputTest(device, model);
|
|
mutateOperandNumberOfConsumersTest(device, model);
|
|
mutateOperandAddWriterTest(device, model);
|
|
mutateOperationOperandTypeTest(device, model);
|
|
mutateOperationTypeTest(device, model);
|
|
mutateOperationInputOperandIndexTest(device, model);
|
|
mutateOperationOutputOperandIndexTest(device, model);
|
|
mutateOperationRemoveWriteTest(device, model);
|
|
removeOperandTest(device, model);
|
|
removeOperationTest(device, model);
|
|
removeOperationInputTest(device, model);
|
|
removeOperationOutputTest(device, model);
|
|
addOperationInputTest(device, model);
|
|
addOperationOutputTest(device, model);
|
|
mutateExecutionPreferenceTest(device, model);
|
|
}
|
|
|
|
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
|