Remove operationTuple.

Removed operationTuple from Model in the HAL, as the data type was
redundant information.

Removed	supportedOperationTuples from Capabilities, as real drivers need
more complex restrictions than this provided.  For the OC-MR1, we'll just
rely on getSupportedNodes.

Also removed the unused	cachesCompilation.

Bug: 63905942
Test: Compiled and ran tests.
Change-Id: I15f33d14634f2e1c8d726b1bd01d5b9e123b47ea
This commit is contained in:
Jean-Luc Brouillet
2017-09-23 15:15:58 -07:00
parent e9a134b868
commit 39ac22e908
3 changed files with 4 additions and 39 deletions

View File

@@ -1002,21 +1002,6 @@ enum DeviceStatus : int32_t {
UNKNOWN,
};
/**
* A typed operation.
*/
struct OperationTuple {
/**
* The type of operation.
*/
OperationType operationType;
/**
* The input data type of operation.
*/
OperandType operandType;
};
/**
* Performance information for the reference workload.
*
@@ -1038,20 +1023,6 @@ struct PerformanceInfo {
* The capabilities of a driver.
*/
struct Capabilities {
/**
* A collection of typed operations supported by the driver.
*/
vec<OperationTuple> supportedOperationTuples;
/**
* Indicates whether a driver caches its prepared model for reuse the next
* time the application begins. This is useful because the model may have
* been prepared in a previous run.
*
* True if caching is supported, false otherwise.
*/
bool cachesCompilation;
/**
* Driver performance when operating on float32 data.
*/
@@ -1144,9 +1115,9 @@ struct Operand {
*/
struct Operation {
/**
* The tuple describing the operation type and input type.
* The operation type.
*/
OperationTuple opTuple;
OperationType type;
/**
* Describes the table that contains the indexes of the inputs of the

View File

@@ -78,9 +78,7 @@ Model createValidTestModel() {
};
const std::vector<Operation> operations = {{
.opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
.inputs = {operand1, operand2, operand3},
.outputs = {operand4},
.type = OperationType::ADD, .inputs = {operand1, operand2, operand3}, .outputs = {operand4},
}};
const std::vector<uint32_t> inputIndexes = {operand1};
@@ -107,8 +105,7 @@ Model createValidTestModel() {
// create first invalid model
Model createInvalidTestModel1() {
Model model = createValidTestModel();
model.operations[0].opTuple = {static_cast<OperationType>(0xDEADBEEF) /* INVALID */,
OperandType::TENSOR_FLOAT32};
model.operations[0].type = static_cast<OperationType>(0xDEADBEEF); /* INVALID */
return model;
}

View File

@@ -107,9 +107,6 @@ TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
Return<void> ret =
device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);