diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp index daa10fdb69..d98fef025d 100644 --- a/neuralnetworks/1.0/utils/src/Conversions.cpp +++ b/neuralnetworks/1.0/utils/src/Conversions.cpp @@ -110,8 +110,9 @@ nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_ return NN_ERROR() << "Unable to convert invalid ashmem memory object with " << memory.handle()->numInts << " numInts, but expected 0"; } + auto fd = NN_TRY(nn::dupFd(memory.handle()->data[0])); auto handle = nn::Memory::Ashmem{ - .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])), + .fd = std::move(fd), .size = static_cast(memory.size()), }; return std::make_shared(nn::Memory{.handle = std::move(handle)}); @@ -137,12 +138,13 @@ nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_ } if (memory.name() != "hardware_buffer_blob") { - auto handle = nn::Memory::Unknown{ - .handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())), + auto handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())); + auto unknown = nn::Memory::Unknown{ + .handle = std::move(handle), .size = static_cast(memory.size()), .name = memory.name(), }; - return std::make_shared(nn::Memory{.handle = std::move(handle)}); + return std::make_shared(nn::Memory{.handle = std::move(unknown)}); } #ifdef __ANDROID__ @@ -245,19 +247,23 @@ GeneralResult unvalidatedConvert(const hal::V1_0::DataLocation& lo } GeneralResult unvalidatedConvert(const hal::V1_0::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), + .lifetime = lifetime, + .location = location, }; } GeneralResult unvalidatedConvert(const hal::V1_0::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -298,26 +304,30 @@ GeneralResult unvalidatedConvert(const hal::V1_0::Model& model) { } } + auto operands = NN_TRY(unvalidatedConvert(model.operands)); auto main = Model::Subgraph{ - .operands = NN_TRY(unvalidatedConvert(model.operands)), + .operands = std::move(operands), .operations = std::move(operations), .inputIndexes = model.inputIndexes, .outputIndexes = model.outputIndexes, }; + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); return Model{ .main = std::move(main), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .operandValues = std::move(operandValues), + .pools = std::move(pools), }; } GeneralResult unvalidatedConvert(const hal::V1_0::RequestArgument& argument) { const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE : Request::Argument::LifeTime::POOL; + const auto location = NN_TRY(unvalidatedConvert(argument.location)); return Request::Argument{ .lifetime = lifetime, - .location = NN_TRY(unvalidatedConvert(argument.location)), + .location = location, .dimensions = argument.dimensions, }; } @@ -328,9 +338,11 @@ GeneralResult unvalidatedConvert(const hal::V1_0::Request& request) { pools.reserve(memories.size()); std::move(memories.begin(), memories.end(), std::back_inserter(pools)); + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), .pools = std::move(pools), }; } @@ -500,11 +512,13 @@ nn::GeneralResult unvalidatedConvert( } nn::GeneralResult unvalidatedConvert(const nn::Capabilities& capabilities) { + const auto float32Performance = NN_TRY(unvalidatedConvert( + capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))); + const auto quantized8Performance = NN_TRY(unvalidatedConvert( + capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))); return Capabilities{ - .float32Performance = NN_TRY(unvalidatedConvert( - capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))), - .quantized8Performance = NN_TRY(unvalidatedConvert( - capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))), + .float32Performance = float32Performance, + .quantized8Performance = quantized8Performance, }; } @@ -517,20 +531,24 @@ nn::GeneralResult unvalidatedConvert(const nn::DataLocation& locat } nn::GeneralResult unvalidatedConvert(const nn::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .numberOfConsumers = 0, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), + .lifetime = lifetime, + .location = location, }; } nn::GeneralResult unvalidatedConvert(const nn::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -572,13 +590,16 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { operands[i].numberOfConsumers = numberOfConsumers[i]; } + auto operations = NN_TRY(unvalidatedConvert(model.main.operations)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); return Model{ .operands = std::move(operands), - .operations = NN_TRY(unvalidatedConvert(model.main.operations)), + .operations = std::move(operations), .inputIndexes = model.main.inputIndexes, .outputIndexes = model.main.outputIndexes, - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .operandValues = std::move(operandValues), + .pools = std::move(pools), }; } @@ -589,9 +610,10 @@ nn::GeneralResult unvalidatedConvert( << "Request cannot be unvalidatedConverted because it contains pointer-based memory"; } const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE; + const auto location = NN_TRY(unvalidatedConvert(requestArgument.location)); return RequestArgument{ .hasNoValue = hasNoValue, - .location = NN_TRY(unvalidatedConvert(requestArgument.location)), + .location = location, .dimensions = requestArgument.dimensions, }; } @@ -606,10 +628,13 @@ nn::GeneralResult unvalidatedConvert(const nn::Request& request) { << "Request cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp index 5bdbe314b6..887c8eca05 100644 --- a/neuralnetworks/1.1/utils/src/Conversions.cpp +++ b/neuralnetworks/1.1/utils/src/Conversions.cpp @@ -88,8 +88,9 @@ GeneralResult unvalidatedConvert(const hal::V1_1::Capabilities& ca } GeneralResult unvalidatedConvert(const hal::V1_1::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -110,17 +111,20 @@ GeneralResult unvalidatedConvert(const hal::V1_1::Model& model) { } } + auto operands = NN_TRY(unvalidatedConvert(model.operands)); auto main = Model::Subgraph{ - .operands = NN_TRY(unvalidatedConvert(model.operands)), + .operands = std::move(operands), .operations = std::move(operations), .inputIndexes = model.inputIndexes, .outputIndexes = model.outputIndexes, }; + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); return Model{ .main = std::move(main), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, }; } @@ -195,19 +199,23 @@ nn::GeneralResult unvalidatedConvert(const nn::OperationType& ope } nn::GeneralResult unvalidatedConvert(const nn::Capabilities& capabilities) { + const auto float32Performance = NN_TRY(unvalidatedConvert( + capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))); + const auto quanitized8Performance = NN_TRY(unvalidatedConvert( + capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))); + const auto relaxedFloat32toFloat16Performance = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); return Capabilities{ - .float32Performance = NN_TRY(unvalidatedConvert( - capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))), - .quantized8Performance = NN_TRY(unvalidatedConvert( - capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM))), - .relaxedFloat32toFloat16Performance = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), + .float32Performance = float32Performance, + .quantized8Performance = quanitized8Performance, + .relaxedFloat32toFloat16Performance = relaxedFloat32toFloat16Performance, }; } nn::GeneralResult unvalidatedConvert(const nn::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -229,13 +237,16 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { operands[i].numberOfConsumers = numberOfConsumers[i]; } + auto operations = NN_TRY(unvalidatedConvert(model.main.operations)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); return Model{ .operands = std::move(operands), - .operations = NN_TRY(unvalidatedConvert(model.main.operations)), + .operations = std::move(operations), .inputIndexes = model.main.inputIndexes, .outputIndexes = model.main.outputIndexes, - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, }; } diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index 62ec2ed6c6..78d71cf990 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -131,15 +131,18 @@ GeneralResult unvalidatedConvert(const hal::V1_2::Capabilities& ca GeneralResult unvalidatedConvert( const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } GeneralResult unvalidatedConvert(const hal::V1_2::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -154,14 +157,18 @@ GeneralResult unvalidatedConvert( } GeneralResult unvalidatedConvert(const hal::V1_2::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } @@ -196,19 +203,23 @@ GeneralResult unvalidatedConvert(const hal::V1_2::Model& model) { } } + auto operands = NN_TRY(unvalidatedConvert(model.operands)); auto main = Model::Subgraph{ - .operands = NN_TRY(unvalidatedConvert(model.operands)), + .operands = std::move(operands), .operations = std::move(operations), .inputIndexes = model.inputIndexes, .outputIndexes = model.outputIndexes, }; + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ .main = std::move(main), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -248,9 +259,10 @@ GeneralResult unvalidatedConvert(const hal::V1_2::Timing& timing) { } GeneralResult unvalidatedConvert(const hal::V1_2::Extension& extension) { + auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)); return Extension{ .name = extension.name, - .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)), + .operandTypes = std::move(operandTypes), }; } @@ -406,35 +418,41 @@ nn::GeneralResult unvalidatedConvert(const nn::DeviceType& deviceTyp } nn::GeneralResult unvalidatedConvert(const nn::Capabilities& capabilities) { - std::vector operandPerformance; - operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); + std::vector filteredOperandPerformances; + filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size()); std::copy_if(capabilities.operandPerformance.asVector().begin(), capabilities.operandPerformance.asVector().end(), - std::back_inserter(operandPerformance), + std::back_inserter(filteredOperandPerformances), [](const nn::Capabilities::OperandPerformance& operandPerformance) { return compliantVersion(operandPerformance.type).has_value(); }); + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), - .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)), + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, + .operandPerformance = std::move(operandPerformance), }; } nn::GeneralResult unvalidatedConvert( const nn::Capabilities::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } nn::GeneralResult unvalidatedConvert(const nn::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -449,15 +467,19 @@ nn::GeneralResult unvalidatedConvert( } nn::GeneralResult unvalidatedConvert(const nn::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .numberOfConsumers = 0, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } @@ -482,15 +504,19 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { operands[i].numberOfConsumers = numberOfConsumers[i]; } + auto operations = NN_TRY(unvalidatedConvert(model.main.operations)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ .operands = std::move(operands), - .operations = NN_TRY(unvalidatedConvert(model.main.operations)), + .operations = std::move(operations), .inputIndexes = model.main.inputIndexes, .outputIndexes = model.main.outputIndexes, - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -524,9 +550,10 @@ nn::GeneralResult unvalidatedConvert(const nn::Timing& timing) { } nn::GeneralResult unvalidatedConvert(const nn::Extension& extension) { + auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)); return Extension{ .name = extension.name, - .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)), + .operandTypes = std::move(operandTypes), }; } diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index 09e9d80d38..4eeb414dc8 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -133,28 +133,35 @@ GeneralResult unvalidatedConvert(const hal::V1_3::Capabilities& ca auto table = NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance))); + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)); + const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, .operandPerformance = std::move(table), - .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), - .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + .ifPerformance = ifPerformance, + .whilePerformance = whilePerformance, }; } GeneralResult unvalidatedConvert( const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } GeneralResult unvalidatedConvert(const hal::V1_3::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -166,25 +173,34 @@ GeneralResult unvalidatedConvert( } GeneralResult unvalidatedConvert(const hal::V1_3::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } GeneralResult unvalidatedConvert(const hal::V1_3::Model& model) { + auto main = NN_TRY(unvalidatedConvert(model.main)); + auto referenced = NN_TRY(unvalidatedConvert(model.referenced)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ - .main = NN_TRY(unvalidatedConvert(model.main)), - .referenced = NN_TRY(unvalidatedConvert(model.referenced)), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .main = std::move(main), + .referenced = std::move(referenced), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -204,8 +220,9 @@ GeneralResult unvalidatedConvert(const hal::V1_3::Subgraph& sub } } + auto operands = NN_TRY(unvalidatedConvert(subgraph.operands)); return Model::Subgraph{ - .operands = NN_TRY(unvalidatedConvert(subgraph.operands)), + .operands = std::move(operands), .operations = std::move(operations), .inputIndexes = subgraph.inputIndexes, .outputIndexes = subgraph.outputIndexes, @@ -225,10 +242,13 @@ GeneralResult unvalidatedConvert(const hal::V1_3::BufferRole& buffer } GeneralResult unvalidatedConvert(const hal::V1_3::Request& request) { + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } @@ -463,37 +483,45 @@ nn::GeneralResult unvalidatedConvert(const nn::Priority& priority) { } nn::GeneralResult unvalidatedConvert(const nn::Capabilities& capabilities) { - std::vector operandPerformance; - operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); + std::vector filteredOperandPerformances; + filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size()); std::copy_if(capabilities.operandPerformance.asVector().begin(), capabilities.operandPerformance.asVector().end(), - std::back_inserter(operandPerformance), + std::back_inserter(filteredOperandPerformances), [](const nn::Capabilities::OperandPerformance& operandPerformance) { return compliantVersion(operandPerformance.type).has_value(); }); + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances)); + const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)); + const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), - .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)), - .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), - .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, + .operandPerformance = std::move(operandPerformance), + .ifPerformance = ifPerformance, + .whilePerformance = whilePerformance, }; } nn::GeneralResult unvalidatedConvert( const nn::Capabilities::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } nn::GeneralResult unvalidatedConvert(const nn::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -509,15 +537,19 @@ nn::GeneralResult unvalidatedConvert( } nn::GeneralResult unvalidatedConvert(const nn::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .numberOfConsumers = 0, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } @@ -527,13 +559,18 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { << "Model cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto main = NN_TRY(unvalidatedConvert(model.main)); + auto referenced = NN_TRY(unvalidatedConvert(model.referenced)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ - .main = NN_TRY(unvalidatedConvert(model.main)), - .referenced = NN_TRY(unvalidatedConvert(model.referenced)), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .main = std::move(main), + .referenced = std::move(referenced), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -548,9 +585,10 @@ nn::GeneralResult unvalidatedConvert(const nn::Model::Subgraph& subgra operands[i].numberOfConsumers = numberOfConsumers[i]; } + auto operations = NN_TRY(unvalidatedConvert(subgraph.operations)); return Subgraph{ .operands = std::move(operands), - .operations = NN_TRY(unvalidatedConvert(subgraph.operations)), + .operations = std::move(operations), .inputIndexes = subgraph.inputIndexes, .outputIndexes = subgraph.outputIndexes, }; @@ -574,10 +612,13 @@ nn::GeneralResult unvalidatedConvert(const nn::Request& request) { << "Request cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp index 081e3d7142..47c72b47af 100644 --- a/neuralnetworks/aidl/utils/src/Conversions.cpp +++ b/neuralnetworks/aidl/utils/src/Conversions.cpp @@ -177,22 +177,28 @@ GeneralResult unvalidatedConvert(const aidl_hal::Capabilities& cap auto table = NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance))); + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)); + const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, .operandPerformance = std::move(table), - .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), - .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + .ifPerformance = ifPerformance, + .whilePerformance = whilePerformance, }; } GeneralResult unvalidatedConvert( const aidl_hal::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } @@ -228,10 +234,13 @@ GeneralResult unvalidatedConvert(const aidl_hal::DataLocation& loc } GeneralResult unvalidatedConvert(const aidl_hal::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); + auto inputs = NN_TRY(toUnsigned(operation.inputs)); + auto outputs = NN_TRY(toUnsigned(operation.outputs)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), - .inputs = NN_TRY(toUnsigned(operation.inputs)), - .outputs = NN_TRY(toUnsigned(operation.outputs)), + .type = type, + .inputs = std::move(inputs), + .outputs = std::move(outputs), }; } @@ -241,14 +250,19 @@ GeneralResult unvalidatedConvert( } GeneralResult unvalidatedConvert(const aidl_hal::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + auto dimensions = NN_TRY(toUnsigned(operand.dimensions)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), - .dimensions = NN_TRY(toUnsigned(operand.dimensions)), + .type = type, + .dimensions = std::move(dimensions), .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } @@ -280,22 +294,31 @@ GeneralResult unvalidatedConvert( } GeneralResult unvalidatedConvert(const aidl_hal::Model& model) { + auto main = NN_TRY(unvalidatedConvert(model.main)); + auto referenced = NN_TRY(unvalidatedConvert(model.referenced)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ - .main = NN_TRY(unvalidatedConvert(model.main)), - .referenced = NN_TRY(unvalidatedConvert(model.referenced)), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .main = std::move(main), + .referenced = std::move(referenced), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } GeneralResult unvalidatedConvert(const aidl_hal::Subgraph& subgraph) { + auto operands = NN_TRY(unvalidatedConvert(subgraph.operands)); + auto operations = NN_TRY(unvalidatedConvert(subgraph.operations)); + auto inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes)); + auto outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes)); return Model::Subgraph{ - .operands = NN_TRY(unvalidatedConvert(subgraph.operands)), - .operations = NN_TRY(unvalidatedConvert(subgraph.operations)), - .inputIndexes = NN_TRY(toUnsigned(subgraph.inputIndexes)), - .outputIndexes = NN_TRY(toUnsigned(subgraph.outputIndexes)), + .operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = std::move(inputIndexes), + .outputIndexes = std::move(outputIndexes), }; } @@ -308,9 +331,10 @@ GeneralResult unvalidatedConvert( } GeneralResult unvalidatedConvert(const aidl_hal::Extension& extension) { + auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)); return Extension{ .name = extension.name, - .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)), + .operandTypes = std::move(operandTypes), }; } @@ -326,8 +350,9 @@ GeneralResult unvalidatedConvert( } GeneralResult unvalidatedConvert(const aidl_hal::OutputShape& outputShape) { + auto dimensions = NN_TRY(toUnsigned(outputShape.dimensions)); return OutputShape{ - .dimensions = NN_TRY(toUnsigned(outputShape.dimensions)), + .dimensions = std::move(dimensions), .isSufficient = outputShape.isSufficient, }; } @@ -346,8 +371,9 @@ GeneralResult unvalidatedConvert(const aidl_hal::Memory& memory) { return NN_ERROR() << "Memory: size must be <= std::numeric_limits::max()"; } + auto fd = NN_TRY(dupFd(ashmem.fd.get())); auto handle = Memory::Ashmem{ - .fd = NN_TRY(dupFd(ashmem.fd.get())), + .fd = std::move(fd), .size = static_cast(ashmem.size), }; return std::make_shared(Memory{.handle = std::move(handle)}); @@ -426,7 +452,8 @@ GeneralResult unvalidatedConvert(const std::vector unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc) { - return BufferDesc{.dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions))}; + auto dimensions = NN_TRY(toUnsigned(bufferDesc.dimensions)); + return BufferDesc{.dimensions = std::move(dimensions)}; } GeneralResult unvalidatedConvert(const aidl_hal::BufferRole& bufferRole) { @@ -440,20 +467,25 @@ GeneralResult unvalidatedConvert(const aidl_hal::BufferRole& bufferR } GeneralResult unvalidatedConvert(const aidl_hal::Request& request) { + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } GeneralResult unvalidatedConvert(const aidl_hal::RequestArgument& argument) { const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE : Request::Argument::LifeTime::POOL; + const auto location = NN_TRY(unvalidatedConvert(argument.location)); + auto dimensions = NN_TRY(toUnsigned(argument.dimensions)); return Request::Argument{ .lifetime = lifetime, - .location = NN_TRY(unvalidatedConvert(argument.location)), - .dimensions = NN_TRY(toUnsigned(argument.dimensions)), + .location = location, + .dimensions = std::move(dimensions), }; } @@ -720,8 +752,9 @@ nn::GeneralResult unvalidatedConvert( nn::GeneralResult unvalidatedConvert( const nn::Capabilities::OperandPerformance& operandPerformance) { - return OperandPerformance{.type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info))}; + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); + return OperandPerformance{.type = type, .info = info}; } nn::GeneralResult> unvalidatedConvert( @@ -788,7 +821,8 @@ nn::GeneralResult> unvalidatedConvert(const nn::CacheToken& } nn::GeneralResult unvalidatedConvert(const nn::BufferDesc& bufferDesc) { - return BufferDesc{.dimensions = NN_TRY(toSigned(bufferDesc.dimensions))}; + auto dimensions = NN_TRY(toSigned(bufferDesc.dimensions)); + return BufferDesc{.dimensions = std::move(dimensions)}; } nn::GeneralResult unvalidatedConvert(const nn::BufferRole& bufferRole) { @@ -847,7 +881,8 @@ nn::GeneralResult unvalidatedConvert(const nn::ErrorStatus& errorSt } nn::GeneralResult unvalidatedConvert(const nn::OutputShape& outputShape) { - return OutputShape{.dimensions = NN_TRY(toSigned(outputShape.dimensions)), + auto dimensions = NN_TRY(toSigned(outputShape.dimensions)); + return OutputShape{.dimensions = std::move(dimensions), .isSufficient = outputShape.isSufficient}; } @@ -915,14 +950,19 @@ nn::GeneralResult> unvalidatedConvert( } nn::GeneralResult unvalidatedConvert(const nn::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + auto dimensions = NN_TRY(toSigned(operand.dimensions)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), - .dimensions = NN_TRY(toSigned(operand.dimensions)), + .type = type, + .dimensions = std::move(dimensions), .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } @@ -934,19 +974,26 @@ nn::GeneralResult unvalidatedConvert(const nn::OperationType& ope } nn::GeneralResult unvalidatedConvert(const nn::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); + auto inputs = NN_TRY(toSigned(operation.inputs)); + auto outputs = NN_TRY(toSigned(operation.outputs)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), - .inputs = NN_TRY(toSigned(operation.inputs)), - .outputs = NN_TRY(toSigned(operation.outputs)), + .type = type, + .inputs = std::move(inputs), + .outputs = std::move(outputs), }; } nn::GeneralResult unvalidatedConvert(const nn::Model::Subgraph& subgraph) { + auto operands = NN_TRY(unvalidatedConvert(subgraph.operands)); + auto operations = NN_TRY(unvalidatedConvert(subgraph.operations)); + auto inputIndexes = NN_TRY(toSigned(subgraph.inputIndexes)); + auto outputIndexes = NN_TRY(toSigned(subgraph.outputIndexes)); return Subgraph{ - .operands = NN_TRY(unvalidatedConvert(subgraph.operands)), - .operations = NN_TRY(unvalidatedConvert(subgraph.operations)), - .inputIndexes = NN_TRY(toSigned(subgraph.inputIndexes)), - .outputIndexes = NN_TRY(toSigned(subgraph.outputIndexes)), + .operands = std::move(operands), + .operations = std::move(operations), + .inputIndexes = std::move(inputIndexes), + .outputIndexes = std::move(outputIndexes), }; } @@ -969,13 +1016,18 @@ nn::GeneralResult unvalidatedConvert(const nn::Model& model) { << "Model cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto main = NN_TRY(unvalidatedConvert(model.main)); + auto referenced = NN_TRY(unvalidatedConvert(model.referenced)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ - .main = NN_TRY(unvalidatedConvert(model.main)), - .referenced = NN_TRY(unvalidatedConvert(model.referenced)), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .main = std::move(main), + .referenced = std::move(referenced), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -989,10 +1041,13 @@ nn::GeneralResult unvalidatedConvert(const nn::Request& request) { << "Request cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } @@ -1003,10 +1058,12 @@ nn::GeneralResult unvalidatedConvert( << "Request cannot be unvalidatedConverted because it contains pointer-based memory"; } const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE; + const auto location = NN_TRY(unvalidatedConvert(requestArgument.location)); + auto dimensions = NN_TRY(toSigned(requestArgument.dimensions)); return RequestArgument{ .hasNoValue = hasNoValue, - .location = NN_TRY(unvalidatedConvert(requestArgument.location)), - .dimensions = NN_TRY(toSigned(requestArgument.dimensions)), + .location = location, + .dimensions = std::move(dimensions), }; } @@ -1033,9 +1090,11 @@ nn::GeneralResult unvalidatedConvert(const nn::Request::Memor } nn::GeneralResult unvalidatedConvert(const nn::Timing& timing) { + const auto timeOnDeviceNs = NN_TRY(unvalidatedConvert(timing.timeOnDevice)); + const auto timeInDriverNs = NN_TRY(unvalidatedConvert(timing.timeInDriver)); return Timing{ - .timeOnDeviceNs = NN_TRY(unvalidatedConvert(timing.timeOnDevice)), - .timeInDriverNs = NN_TRY(unvalidatedConvert(timing.timeInDriver)), + .timeOnDeviceNs = timeOnDeviceNs, + .timeInDriverNs = timeInDriverNs, }; } @@ -1064,20 +1123,25 @@ nn::GeneralResult unvalidatedConvert(const nn::Shared } nn::GeneralResult unvalidatedConvert(const nn::Capabilities& capabilities) { + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)); + const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)); + const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)), - .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), - .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .operandPerformance = std::move(operandPerformance), + .ifPerformance = ifPerformance, + .whilePerformance = whilePerformance, }; } nn::GeneralResult unvalidatedConvert(const nn::Extension& extension) { - return Extension{.name = extension.name, - .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))}; + auto operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)); + return Extension{.name = extension.name, .operandTypes = std::move(operandTypes)}; } #ifdef NN_AIDL_V4_OR_ABOVE nn::GeneralResult unvalidatedConvert(const nn::TokenValuePair& tokenValuePair) { diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp index 76a0b07d86..f9b4f6edbc 100644 --- a/neuralnetworks/aidl/utils/src/Utils.cpp +++ b/neuralnetworks/aidl/utils/src/Utils.cpp @@ -51,8 +51,9 @@ nn::GeneralResult clone(const ndk::ScopedFileDescript } nn::GeneralResult clone(const common::NativeHandle& handle) { + auto fds = NN_TRY(cloneVec(handle.fds)); return common::NativeHandle{ - .fds = NN_TRY(cloneVec(handle.fds)), + .fds = std::move(fds), .ints = handle.ints, }; } @@ -63,29 +64,32 @@ nn::GeneralResult clone(const Memory& memory) { switch (memory.getTag()) { case Memory::Tag::ashmem: { const auto& ashmem = memory.get(); + auto fd = NN_TRY(clone(ashmem.fd)); auto handle = common::Ashmem{ - .fd = NN_TRY(clone(ashmem.fd)), + .fd = std::move(fd), .size = ashmem.size, }; return Memory::make(std::move(handle)); } case Memory::Tag::mappableFile: { const auto& memFd = memory.get(); + auto fd = NN_TRY(clone(memFd.fd)); auto handle = common::MappableFile{ .length = memFd.length, .prot = memFd.prot, - .fd = NN_TRY(clone(memFd.fd)), + .fd = std::move(fd), .offset = memFd.offset, }; return Memory::make(std::move(handle)); } case Memory::Tag::hardwareBuffer: { const auto& hardwareBuffer = memory.get(); - auto handle = graphics::common::HardwareBuffer{ + auto handle = NN_TRY(clone(hardwareBuffer.handle)); + auto ahwbHandle = graphics::common::HardwareBuffer{ .description = hardwareBuffer.description, - .handle = NN_TRY(clone(hardwareBuffer.handle)), + .handle = std::move(handle), }; - return Memory::make(std::move(handle)); + return Memory::make(std::move(ahwbHandle)); } } return (NN_ERROR() << "Unrecognized Memory::Tag: " << underlyingType(memory.getTag())) @@ -109,19 +113,21 @@ nn::GeneralResult clone(const RequestMemoryPool& requestPool) } nn::GeneralResult clone(const Request& request) { + auto pools = NN_TRY(clone(request.pools)); return Request{ .inputs = request.inputs, .outputs = request.outputs, - .pools = NN_TRY(clone(request.pools)), + .pools = std::move(pools), }; } nn::GeneralResult clone(const Model& model) { + auto pools = NN_TRY(clone(model.pools)); return Model{ .main = model.main, .referenced = model.referenced, .operandValues = model.operandValues, - .pools = NN_TRY(clone(model.pools)), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, .extensionNameToPrefix = model.extensionNameToPrefix, };