diff --git a/current.txt b/current.txt index adc8a54493..333f7b3440 100644 --- a/current.txt +++ b/current.txt @@ -578,7 +578,7 @@ eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardwar 5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice 40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel -7f7ef383268c95a1b8fe4e55c662bc806bb0ac11a154f6b049a113a44b0f024f android.hardware.neuralnetworks@1.2::types +6c29d6fdd5445911df5456b3b84b949cdd59fca0c0b5507662f26a5cac0cf5e5 android.hardware.neuralnetworks@1.2::types a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types 1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface @@ -625,12 +625,12 @@ bbeee9604128ede83ee755b67e73b5ad29e6e1dbac9ec41fea6ffe2745b0c50a android.hardwar adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation ddcf89cd8ee2df0d32aee55050826446fb64f7aafde0a7cd946c64f61b1a364c android.hardware.keymaster@4.1::types 65c16331e57f6dd68b3971f06f78fe9e3209afb60630c31705aa355f9a52bf0d android.hardware.neuralnetworks@1.3::IBuffer -9b41dd49e2dcc2ecb4243d03f8421d72494ada5cf2945bff88f0019eeca56923 android.hardware.neuralnetworks@1.3::IDevice +9db064ee44268a876be0367ff771e618362d39ec603b6ecab17e1575725fcd87 android.hardware.neuralnetworks@1.3::IDevice 4167dc3ad35e9cd0d2057d4868c7675ae2c3c9d05bbd614c1f5dccfa5fd68797 android.hardware.neuralnetworks@1.3::IExecutionCallback 2fa3679ad7c94b5e88724adcd560c561041068a4ca565c63830e68101988746a android.hardware.neuralnetworks@1.3::IFencedExecutionCallback 237b23b126a66f3432658020fed78cdd06ba6297459436fe6bae0ba753370833 android.hardware.neuralnetworks@1.3::IPreparedModel 0439a1fbbec7f16e5e4c653d85ac685d51bfafbae15b8f8cca530acdd7d6a8ce android.hardware.neuralnetworks@1.3::IPreparedModelCallback -3646950b10f7cacdafca13609b0e18496cea942f3bdfe920494661856eff48bb android.hardware.neuralnetworks@1.3::types +abbc4e1a969881c9f8ab587add5b5e75b08df834c9c969c013ae38cb4bb16f6a android.hardware.neuralnetworks@1.3::types 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant 44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index e867120906..993a10513f 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -3165,7 +3165,7 @@ enum OperationType : int32_t { * * 8: An {@link OperandType::INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 9: An {@link OperandType::INT32} scalar, specifying the number of - groups. + * groups. * * 10: An {@link OperandType::INT32} scalar, and has to be one of the * {@link FusedActivationFunc} values. Specifies the activation to * invoke on the result. diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal index 493153950a..79f9c325ac 100644 --- a/neuralnetworks/1.3/IDevice.hal +++ b/neuralnetworks/1.3/IDevice.hal @@ -372,5 +372,5 @@ interface IDevice extends @1.2::IDevice { */ allocate(BufferDesc desc, vec preparedModels, vec inputRoles, vec outputRoles) - generates (ErrorStatus status, IBuffer buffer, int32_t token); + generates (ErrorStatus status, IBuffer buffer, uint32_t token); }; diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal index ed577e4d9d..c5dc08c338 100644 --- a/neuralnetworks/1.3/types.hal +++ b/neuralnetworks/1.3/types.hal @@ -22,9 +22,9 @@ import @1.0::PerformanceInfo; import @1.0::RequestArgument; import @1.2::Model.ExtensionNameAndPrefix; import @1.2::Model.ExtensionTypeEncoding; +import @1.2::Operand.ExtraParams; import @1.2::OperandType; import @1.2::OperationType; -import @1.2::SymmPerChannelQuantParams; import android.hidl.safe_union@1.0::Monostate; @@ -3253,7 +3253,7 @@ enum OperationType : int32_t { * * 8: An {@link OperandType::INT32} scalar, specifying the stride when * walking through input in the ‘height’ dimension. * * 9: An {@link OperandType::INT32} scalar, specifying the number of - groups. + * groups. * * 10: An {@link OperandType::INT32} scalar, and has to be one of the * {@link FusedActivationFunc} values. Specifies the activation to * invoke on the result. @@ -5343,27 +5343,7 @@ struct Operand { /** * Additional parameters specific to a particular operand type. */ - safe_union ExtraParams { - /** - * No additional parameters. - */ - Monostate none; - - /** - * Symmetric per-channel quantization parameters. - * - * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL. - */ - SymmPerChannelQuantParams channelQuant; - - /** - * Extension operand parameters. - * - * The framework treats this as an opaque data blob. - * The format is up to individual extensions. - */ - vec extension; - } extraParams; + @1.2::Operand.ExtraParams extraParams; }; /** @@ -5551,7 +5531,7 @@ struct Request { * Specifies a driver-managed buffer. It is the token returned from IDevice::allocate, * and is specific to the IDevice object. */ - int32_t token; + uint32_t token; }; /** @@ -5573,7 +5553,7 @@ safe_union OptionalTimePoint { * Time point of the steady clock (as from std::chrono::steady_clock) * measured in nanoseconds. */ - uint64_t nanoseconds; + uint64_t nanosecondsSinceEpoch; }; /** diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t index d4351ec8d7..3d0d02df46 100644 --- a/neuralnetworks/1.3/types.t +++ b/neuralnetworks/1.3/types.t @@ -24,9 +24,9 @@ import @1.0::PerformanceInfo; import @1.0::RequestArgument; import @1.2::Model.ExtensionNameAndPrefix; import @1.2::Model.ExtensionTypeEncoding; +import @1.2::Operand.ExtraParams; import @1.2::OperandType; import @1.2::OperationType; -import @1.2::SymmPerChannelQuantParams; import android.hidl.safe_union@1.0::Monostate; @@ -319,27 +319,7 @@ struct Operand { /** * Additional parameters specific to a particular operand type. */ - safe_union ExtraParams { - /** - * No additional parameters. - */ - Monostate none; - - /** - * Symmetric per-channel quantization parameters. - * - * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL. - */ - SymmPerChannelQuantParams channelQuant; - - /** - * Extension operand parameters. - * - * The framework treats this as an opaque data blob. - * The format is up to individual extensions. - */ - vec extension; - } extraParams; + @1.2::Operand.ExtraParams extraParams; }; /** @@ -527,7 +507,7 @@ struct Request { * Specifies a driver-managed buffer. It is the token returned from IDevice::allocate, * and is specific to the IDevice object. */ - int32_t token; + uint32_t token; }; /** @@ -549,7 +529,7 @@ safe_union OptionalTimePoint { * Time point of the steady clock (as from std::chrono::steady_clock) * measured in nanoseconds. */ - uint64_t nanoseconds; + uint64_t nanosecondsSinceEpoch; }; /** diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp index 8ea0b7eb47..82f34ff779 100644 --- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp @@ -122,15 +122,15 @@ class DeviceMemoryAllocator { // Return {IBuffer object, token} if successful. // Return {nullptr, 0} if device memory is not supported. template - std::pair, int32_t> allocate(uint32_t index) { - std::pair, int32_t> buffer; + std::pair, uint32_t> allocate(uint32_t index) { + std::pair, uint32_t> buffer; allocateInternal(index, &buffer); return buffer; } private: template - void allocateInternal(uint32_t index, std::pair, int32_t>* result) { + void allocateInternal(uint32_t index, std::pair, uint32_t>* result) { ASSERT_NE(result, nullptr); // Prepare arguments. @@ -145,14 +145,14 @@ class DeviceMemoryAllocator { // Allocate device memory. ErrorStatus status; sp buffer; - int32_t token; - const auto ret = kDevice->allocate( - {}, {kPreparedModel}, inputRoles, outputRoles, - [&status, &buffer, &token](ErrorStatus error, const sp& buf, int32_t tok) { - status = error; - buffer = buf; - token = tok; - }); + uint32_t token; + auto cb = [&status, &buffer, &token](ErrorStatus error, const sp& buf, + uint32_t tok) { + status = error; + buffer = buf; + token = tok; + }; + const auto ret = kDevice->allocate({}, {kPreparedModel}, inputRoles, outputRoles, cb); // Check allocation results. ASSERT_TRUE(ret.isOk()); @@ -217,7 +217,7 @@ Model createModel(const TestModel& testModel) { constRefSize += op.data.alignedSize(); } - Operand::ExtraParams extraParams; + V1_2::Operand::ExtraParams extraParams; if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { extraParams.channelQuant(SymmPerChannelQuantParams{ .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim}); @@ -317,7 +317,7 @@ static std::pair>> createRequest( // - [2+i, 2+i+o): Output device memories DeviceMemoryAllocator allocator(device, preparedModel, testModel); std::vector> buffers; - std::vector tokens; + std::vector tokens; // Model inputs. hidl_vec inputs(testModel.inputIndexes.size()); diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp index 62ffcda036..2f1e05c5c6 100644 --- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp +++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp @@ -64,11 +64,11 @@ static OptionalTimePoint makeOptionalTimePoint(DeadlineBoundType deadlineBoundTy std::chrono::time_point_cast(currentTime); const uint64_t nanosecondsSinceEpoch = currentTimeInNanoseconds.time_since_epoch().count(); - deadline.nanoseconds(nanosecondsSinceEpoch); + deadline.nanosecondsSinceEpoch(nanosecondsSinceEpoch); } break; case DeadlineBoundType::UNLIMITED: { uint64_t unlimited = std::numeric_limits::max(); - deadline.nanoseconds(unlimited); + deadline.nanosecondsSinceEpoch(unlimited); } break; } return deadline; diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index 1245432307..0a35e2d233 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -50,7 +50,7 @@ static void validatePrepareModel(const sp& device, const std::string& m OptionalTimePoint deadline; if (testDeadline) { - deadline.nanoseconds(std::numeric_limits::max()); + deadline.nanosecondsSinceEpoch(std::numeric_limits::max()); } sp preparedModelCallback = new PreparedModelCallback(); diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp index a29d158e7d..2a4269f3d4 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp @@ -61,7 +61,7 @@ static void validate(const sp& preparedModel, const std::string& OptionalTimePoint deadline; if (testDeadline) { - deadline.nanoseconds(std::numeric_limits::max()); + deadline.nanosecondsSinceEpoch(std::numeric_limits::max()); } // asynchronous