diff --git a/audio/5.0/IStreamIn.hal b/audio/5.0/IStreamIn.hal index b042960b17..e15b0347f5 100644 --- a/audio/5.0/IStreamIn.hal +++ b/audio/5.0/IStreamIn.hal @@ -169,6 +169,10 @@ interface IStreamIn extends IStream { /** * Specifies the logical microphone (for processing). * + * If the feature is not supported an error should be returned + * If multiple microphones are present, this should be treated as a preference + * for their combined direction. + * * Optional method * * @param Direction constant @@ -180,6 +184,10 @@ interface IStreamIn extends IStream { /** * Specifies the zoom factor for the selected microphone (for processing). * + * If the feature is not supported an error should be returned + * If multiple microphones are present, this should be treated as a preference + * for their combined field dimension. + * * Optional method * * @param the desired field dimension of microphone capture. Range is from -1 (wide angle), diff --git a/audio/common/5.0/types.hal b/audio/common/5.0/types.hal index 0cbf35ee54..e1279ee64a 100644 --- a/audio/common/5.0/types.hal +++ b/audio/common/5.0/types.hal @@ -146,6 +146,7 @@ enum AudioSource : int32_t { */ ECHO_REFERENCE = 1997, FM_TUNER = 1998, + HOTWORD = 1999, }; typedef int32_t AudioSession; diff --git a/current.txt b/current.txt index 0c501400aa..1108bebb2f 100644 --- a/current.txt +++ b/current.txt @@ -416,11 +416,11 @@ dfdb4d04b65dc363e5621c85bfdf3023c277b75c31d821d8e71b3f44f198e214 android.hardwar 0a911297821854985cfcdb17b63d7948af0f0f51ce8c68cc86367c185bbc772e android.hardware.audio@5.0::IDevicesFactory ce2e8c6c8559fd42bd69e0dee27b4d9c93cd9b2eff487b4e6b6395b6a1a993d6 android.hardware.audio@5.0::IPrimaryDevice 4a4e5e5d9357004a1256bde8d36010ee00c51cea811a1c1e0dd969a9fc0bf862 android.hardware.audio@5.0::IStream -e05e48c583de14c1e5a6fa9d48ea50244e3e0924b76b342374e7471dc8007ba9 android.hardware.audio@5.0::IStreamIn +b9d41ff4031266de1ecef394a8a64de7d857634dd08dc6be855fca2fe3075975 android.hardware.audio@5.0::IStreamIn 9471b12b1c255bb530695720bc4174bd74987b75b1f820854af8944bc8c215c9 android.hardware.audio@5.0::IStreamOut 1b0500367ed2b32a841667ac3200edf3d3a164e8004aca445ff1b085ac831e93 android.hardware.audio@5.0::IStreamOutCallback 83e365479cc77d8717c155e1787ee668cd2ae4c557b467cf75b8e7cd53697ad8 android.hardware.audio@5.0::types -a0df6961e65444e1ca40a206d7f31304d313e8b7e5b122855e3272ab02720cd4 android.hardware.audio.common@5.0::types +07d17800b298331e90d4ea5d8ba19a1ae3fe9c1dbff08d9f75fd3ade09496d67 android.hardware.audio.common@5.0::types f269297866765b95ddd1825676cc8a772f0c7c9863286df596fc302781a42ff5 android.hardware.audio.effect@5.0::IAcousticEchoCancelerEffect fa187b602d8939644ef708ed7627f2e3deac97899a4bda1de07f2ff126abe243 android.hardware.audio.effect@5.0::IAutomaticGainControlEffect e1bf864ccb8458c0da1dcc74a2e748b1dca8ac360df590591cf82d98292d7981 android.hardware.audio.effect@5.0::IBassBoostEffect @@ -464,7 +464,9 @@ f27baaa587bc3dd9b740cb6928ab812b9b7d105b5187663938aee578105f3c39 android.hardwar 7f460e795f5d1ed5e378935f98c6db4d39497de988aef1b4c2a4a07a6c400392 android.hardware.gnss@2.0::IAGnss 2e5ad983734069e84a760004b32da0d09e4170c05380abe27e6eb80e4aa70d5a android.hardware.gnss@2.0::IAGnssCallback 1f4ac068a88a72360280d94a7f6fd7c63813c1eea4891a0eb01394d3e7e775f2 android.hardware.gnss@2.0::IAGnssRil -63216fcb23eaf4d6f12ea0e99b8bfdb8e4e57c02f215d433cd30943d850f61a7 android.hardware.gnss@2.0::IGnss +4deafcdcffa2d002119e7f58810b767a84666e76475aae68e757ec2845d9756d android.hardware.gnss@2.0::IGnss +db6bdf6dfc5edf6c85d2944976db899227abb51079c893874353c322342c50b6 android.hardware.gnss@2.0::IGnssBatching +1f89392f1ebb693d8fa6f50324b1635fc79fab246d31900e63998e1b0e17511c android.hardware.gnss@2.0::IGnssBatchingCallback b11a5e4a1602d3f408716b6fe2c578a79f060d571aad8e828f9a4426d161fbcf android.hardware.gnss@2.0::IGnssCallback ecc966c68bddbd95c8dae782b84204cf01c75734675e8769963f3b5106ec128b android.hardware.gnss@2.0::IGnssConfiguration b670bae2ab8517336290532e364502b4db9120340d75474ccc8442b1b15d6ab7 android.hardware.gnss@2.0::IGnssDebug @@ -506,11 +508,11 @@ b9422a9aca84df1ff9623dc12c0562abce97716e28d63a965f2bfb88f9ad9607 android.hardwar 4cb139f729c29d8d6f4ecdab149c4feb571dad8a06e56cd57fcb52e70208bab4 android.hardware.media.c2@1.0::types 4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback 19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext -96249c852dabeefa3a9496ecdfc44681a071c665bfbf88527bf775c88bf1ab1b android.hardware.neuralnetworks@1.2::IDevice +b83317b66721241887d2770b5ae95fd5af1e77c5daa7530ecb08fae8892f2b43 android.hardware.neuralnetworks@1.2::IDevice 92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback -83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel +36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback -114056b3b9303e0e858f28e718ba45722de5678d1d54eec0dcd10788604bf2bb android.hardware.neuralnetworks@1.2::types +209a5ee694b94328afb2af2768f1fe6a69148e2cbb85ec3c340a36eed818c697 android.hardware.neuralnetworks@1.2::types cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types 4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats @@ -542,9 +544,10 @@ b47f90302595874dfddb19bd05a054727bf18b3a930bc810ea14957b859ae8bf android.hardwar 61bc302e7c974c59b25898c585c6e9685e8a81021b1bed3eedf5224198f2785a android.hardware.usb@1.2::IUsb 46996cd2a1c66261a75a1f6ecada77eeb5861eb264fa39b996548fe0a7f22dd3 android.hardware.usb@1.2::IUsbCallback 3bbaa8cbc5d6b1da21f5509b2b641e05fc7eeca1354751eb1bb3cf37f89aa32f android.hardware.usb@1.2::types -92c1a726c80970d623b891f7c2f9a989a40a15ee1244092b49f4eb6adcdce4e9 android.hardware.vibrator@1.3::IVibrator +0f7ff73793548d5154014059b7e0fe9ef6355d32218ace157954d02055f5248b android.hardware.vibrator@1.3::IVibrator +2e313dc27a1327a29862ab3e085917f75c9e996f7c8df5a0ce37b9a0ed076b80 android.hardware.vibrator@1.3::types f19832856a3f53ced5ef91d3cc630a57fb7f4d4ce15f364dbed09099b89f6830 android.hardware.wifi@1.3::IWifi -7c6799c19bfdb3dec016b751556fe246cf7d37191ee7bb82a0091ab9fbf6f2fb android.hardware.wifi@1.3::IWifiChip +64be084b6e1ef330b75fa916593dc0b94b0ec7a16d5cfaa5a31e6c9143c8288d android.hardware.wifi@1.3::IWifiChip 3bef30e8b61ab050c0f6fd26572712be5ebb7707d624c9aa6c74bbb9d6a5b4a9 android.hardware.wifi@1.3::IWifiStaIface f3dbd8dd0d6333c005610288a4785d0ef79a72a7bbe6d0a46d46fa89fc886f1e android.hardware.wifi@1.3::types 2fae61e962f68091335f7ff4581fcfe2e28ce7f6132d7a712fa13d7965543e4d android.hardware.wifi.hostapd@1.1::IHostapd diff --git a/gnss/2.0/Android.bp b/gnss/2.0/Android.bp index 30dc55de57..6cfd3462f2 100644 --- a/gnss/2.0/Android.bp +++ b/gnss/2.0/Android.bp @@ -12,6 +12,8 @@ hidl_interface { "IAGnssCallback.hal", "IAGnssRil.hal", "IGnss.hal", + "IGnssBatching.hal", + "IGnssBatchingCallback.hal", "IGnssCallback.hal", "IGnssConfiguration.hal", "IGnssDebug.hal", diff --git a/gnss/2.0/IGnss.hal b/gnss/2.0/IGnss.hal index ba757d73d6..f19f8d0566 100644 --- a/gnss/2.0/IGnss.hal +++ b/gnss/2.0/IGnss.hal @@ -27,6 +27,7 @@ import IGnssDebug; import IGnssMeasurement; import IAGnss; import IAGnssRil; +import IGnssBatching; /** * Represents the standard GNSS (Global Navigation Satellite System) interface. @@ -104,6 +105,13 @@ interface IGnss extends @1.1::IGnss { */ getExtensionVisibilityControl() generates (IGnssVisibilityControl visibilityControlIface); + /** + * This method returns the IGnssBatching interface. + * + * @return batchingIface Handle to the IGnssBatching interface. + */ + getExtensionGnssBatching_2_0() generates (IGnssBatching batchingIface); + /** * Injects current location from the best available location provider. * diff --git a/gnss/2.0/IGnssBatching.hal b/gnss/2.0/IGnssBatching.hal new file mode 100644 index 0000000000..961fa69548 --- /dev/null +++ b/gnss/2.0/IGnssBatching.hal @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.gnss@2.0; + +import @1.0::IGnssBatching; +import IGnssBatchingCallback; + +/** + * Extended interface for GNSS Batching support. + * + * If this interface is supported, this batching request must be able to run in + * parallel with, or without, non-batched location requested by the + * IGnss start() & stop() - i.e. both requests must be handled independently, + * and not interfere with each other. + * + * For example, if a 1Hz continuous output is underway on the IGnssCallback, + * due to an IGnss start() operation, + * and then a IGnssBatching start() is called for a location every 10 + * seconds, the newly added batching request must not disrupt the 1Hz + * continuous location output on the IGnssCallback. + * + * As with GNSS Location outputs, source of location must be GNSS satellite + * measurements, optionally using interial and baro sensors to improve + * relative motion filtering. No additional absolute positioning information, + * such as WiFi derived location, may be mixed with the GNSS information. + */ +interface IGnssBatching extends @1.0::IGnssBatching { + /** + * Opens the interface and provides the callback routines + * to the implementation of this interface. + * + * @param callback Callback interface for IGnssBatching. + * + * @return success Returns true on success. + */ + init_2_0(IGnssBatchingCallback callback) generates (bool success); +}; \ No newline at end of file diff --git a/gnss/2.0/IGnssBatchingCallback.hal b/gnss/2.0/IGnssBatchingCallback.hal new file mode 100644 index 0000000000..4f8b4ecbba --- /dev/null +++ b/gnss/2.0/IGnssBatchingCallback.hal @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.gnss@2.0; + +/** The callback interface to report measurements from the HAL. */ +interface IGnssBatchingCallback { + /** + * Called when a batch of locations is output, by various means, including + * a flush request, as well as the buffer becoming full (if appropriate option + * is set.) + * + * All locations returned by this callback must be cleared from the hardware + * buffer, such the sequential calls of this callback do not return any + * redundant locations. (Same lat/lon, at a new time, is acceptable.) + * + * The GnssLocation struct in gnss@2.0 is extended to include elapsed realtime + * information. + * + * @param locations GNSS Location information from HAL. + */ + gnssLocationBatchCb(vec locations); +}; diff --git a/gnss/2.0/default/Android.bp b/gnss/2.0/default/Android.bp index 64187e24d9..0fcd76495c 100644 --- a/gnss/2.0/default/Android.bp +++ b/gnss/2.0/default/Android.bp @@ -25,6 +25,7 @@ cc_binary { "AGnss.cpp", "AGnssRil.cpp", "Gnss.cpp", + "GnssBatching.cpp", "GnssMeasurement.cpp", "GnssMeasurementCorrections.cpp", "GnssVisibilityControl.cpp", diff --git a/gnss/2.0/default/Gnss.cpp b/gnss/2.0/default/Gnss.cpp index bb89b8b423..75c2385169 100644 --- a/gnss/2.0/default/Gnss.cpp +++ b/gnss/2.0/default/Gnss.cpp @@ -23,6 +23,7 @@ #include "AGnss.h" #include "AGnssRil.h" +#include "GnssBatching.h" #include "GnssConfiguration.h" #include "GnssMeasurement.h" #include "GnssMeasurementCorrections.h" @@ -265,6 +266,10 @@ Return> Gnss::getExtensionV return new GnssVisibilityControl(); } +Return> Gnss::getExtensionGnssBatching_2_0() { + return new GnssBatching(); +} + Return Gnss::setCallback_2_0(const sp& callback) { ALOGD("Gnss::setCallback_2_0"); if (callback == nullptr) { diff --git a/gnss/2.0/default/Gnss.h b/gnss/2.0/default/Gnss.h index a500128670..72f77976e5 100644 --- a/gnss/2.0/default/Gnss.h +++ b/gnss/2.0/default/Gnss.h @@ -92,6 +92,7 @@ struct Gnss : public IGnss { getExtensionMeasurementCorrections() override; Return> getExtensionVisibilityControl() override; + Return> getExtensionGnssBatching_2_0() override; Return injectBestLocation_2_0(const V2_0::GnssLocation& location) override; private: diff --git a/gnss/2.0/default/GnssBatching.cpp b/gnss/2.0/default/GnssBatching.cpp new file mode 100644 index 0000000000..d56cdfb33f --- /dev/null +++ b/gnss/2.0/default/GnssBatching.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "GnssBatching" + +#include "GnssBatching.h" + +namespace android { +namespace hardware { +namespace gnss { +namespace V2_0 { +namespace implementation { + +sp GnssBatching::sCallback = nullptr; + +// Methods from ::android::hardware::gnss::V1_0::IGnssBatching follow. +Return GnssBatching::init(const sp&) { + // TODO implement + return bool{}; +} + +Return GnssBatching::getBatchSize() { + // TODO implement + return uint16_t{}; +} + +Return GnssBatching::start(const V1_0::IGnssBatching::Options&) { + // TODO implement + return bool{}; +} + +Return GnssBatching::flush() { + // TODO implement + return Void(); +} + +Return GnssBatching::stop() { + // TODO implement + return bool{}; +} + +Return GnssBatching::cleanup() { + // TODO implement + return Void(); +} + +// Methods from V2_0::IGnssBatching follow. +Return GnssBatching::init_2_0(const sp& callback) { + sCallback = callback; + return true; +} + +} // namespace implementation +} // namespace V2_0 +} // namespace gnss +} // namespace hardware +} // namespace android diff --git a/gnss/2.0/default/GnssBatching.h b/gnss/2.0/default/GnssBatching.h new file mode 100644 index 0000000000..62ac580897 --- /dev/null +++ b/gnss/2.0/default/GnssBatching.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +namespace android { +namespace hardware { +namespace gnss { +namespace V2_0 { +namespace implementation { + +using ::android::sp; +using ::android::hardware::hidl_array; +using ::android::hardware::hidl_memory; +using ::android::hardware::hidl_string; +using ::android::hardware::hidl_vec; +using ::android::hardware::Return; +using ::android::hardware::Void; + +struct GnssBatching : public IGnssBatching { + // Methods from ::android::hardware::gnss::V1_0::IGnssBatching follow. + Return init(const sp& callback) override; + Return getBatchSize() override; + Return start(const V1_0::IGnssBatching::Options& options) override; + Return flush() override; + Return stop() override; + Return cleanup() override; + + // Methods from V2_0::IGnssBatching follow. + Return init_2_0(const sp& callback) override; + + private: + static sp sCallback; +}; + +} // namespace implementation +} // namespace V2_0 +} // namespace gnss +} // namespace hardware +} // namespace android diff --git a/gnss/2.0/default/GnssConfiguration.cpp b/gnss/2.0/default/GnssConfiguration.cpp index 4389dd28f3..6bf1712aff 100644 --- a/gnss/2.0/default/GnssConfiguration.cpp +++ b/gnss/2.0/default/GnssConfiguration.cpp @@ -33,13 +33,11 @@ Return GnssConfiguration::setSuplEs(bool enable) { } Return GnssConfiguration::setSuplVersion(uint32_t) { - // TODO implement - return bool{}; + return true; } Return GnssConfiguration::setSuplMode(hidl_bitfield) { - // TODO implement - return bool{}; + return true; } Return GnssConfiguration::setGpsLock(hidl_bitfield gpsLock) { @@ -49,18 +47,15 @@ Return GnssConfiguration::setGpsLock(hidl_bitfield gpsLock) { } Return GnssConfiguration::setLppProfile(hidl_bitfield) { - // TODO implement - return bool{}; + return true; } Return GnssConfiguration::setGlonassPositioningProtocol(hidl_bitfield) { - // TODO implement - return bool{}; + return true; } Return GnssConfiguration::setEmergencySuplPdn(bool) { - // TODO implement - return bool{}; + return true; } // Methods from ::android::hardware::gnss::V1_1::IGnssConfiguration follow. diff --git a/gnss/2.0/vts/functional/gnss_hal_test.cpp b/gnss/2.0/vts/functional/gnss_hal_test.cpp index b8c343753f..da6092bb4b 100644 --- a/gnss/2.0/vts/functional/gnss_hal_test.cpp +++ b/gnss/2.0/vts/functional/gnss_hal_test.cpp @@ -26,6 +26,7 @@ using ::android::hardware::gnss::common::Utils; GnssHalTest::GnssHalTest() : info_called_count_(0), capabilities_called_count_(0), + measurement_corrections_capabilities_called_count_(0), location_called_count_(0), name_called_count_(0), notify_count_(0) {} @@ -43,6 +44,7 @@ void GnssHalTest::TearDown() { // Reset counters info_called_count_ = 0; capabilities_called_count_ = 0; + measurement_corrections_capabilities_called_count_ = 0; location_called_count_ = 0; name_called_count_ = 0; measurement_called_count_ = 0; diff --git a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp index 230c9799f1..0682f84d5f 100644 --- a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp +++ b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp @@ -32,6 +32,8 @@ using IAGnssRil_2_0 = android::hardware::gnss::V2_0::IAGnssRil; using IAGnss_2_0 = android::hardware::gnss::V2_0::IAGnss; using IAGnss_1_0 = android::hardware::gnss::V1_0::IAGnss; using IAGnssCallback_2_0 = android::hardware::gnss::V2_0::IAGnssCallback; +using IGnssBatching_V1_0 = android::hardware::gnss::V1_0::IGnssBatching; +using IGnssBatching_V2_0 = android::hardware::gnss::V2_0::IGnssBatching; using android::hardware::gnss::common::Utils; using android::hardware::gnss::measurement_corrections::V1_0::IMeasurementCorrections; @@ -326,6 +328,10 @@ TEST_F(GnssHalTest, TestGnssMeasurementCorrections) { return; } + sp iMeasurementCorrectionsCallback = + new GnssMeasurementCorrectionsCallback(*this); + iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback); + const int kMeasurementCorrectionsCapabilitiesTimeoutSeconds = 5; waitForMeasurementCorrectionsCapabilities(kMeasurementCorrectionsCapabilitiesTimeoutSeconds); ASSERT_TRUE(measurement_corrections_capabilities_called_count_ > 0); @@ -395,3 +401,20 @@ TEST_F(GnssHalTest, TestInjectBestLocation_2_0) { gnss_hal_->injectBestLocation_2_0(last_location_); StopAndClearLocations(); } + +/* + * TestGnssBatchingExtension: + * Gets the GnssBatchingExtension and verifies that it supports either the @1.0::IGnssBatching + * or @2.0::IGnssBatching extension. + */ +TEST_F(GnssHalTest, TestGnssBatchingExtension) { + auto gnssBatching_V2_0 = gnss_hal_->getExtensionGnssBatching_2_0(); + ASSERT_TRUE(gnssBatching_V2_0.isOk()); + + auto gnssBatching_V1_0 = gnss_hal_->getExtensionGnssBatching(); + ASSERT_TRUE(gnssBatching_V1_0.isOk()); + + sp iGnssBatching_V1_0 = gnssBatching_V1_0; + sp iGnssBatching_V2_0 = gnssBatching_V2_0; + ASSERT_TRUE(iGnssBatching_V1_0 != nullptr || iGnssBatching_V2_0 != nullptr); +} diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index f5cb0d7cf5..106f33279d 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -52,6 +52,7 @@ using ::test_helper::for_each; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; using ::test_helper::resize_accordingly; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; template void copy_back_(std::map>* dst, const std::vector& ra, @@ -540,7 +541,8 @@ void PrepareModel(const sp& device, const V1_2::Model& model, sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel_1_2( - model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal index b9fa38870e..d83f9e6758 100644 --- a/neuralnetworks/1.2/IDevice.hal +++ b/neuralnetworks/1.2/IDevice.hal @@ -75,6 +75,17 @@ interface IDevice extends @1.1::IDevice { */ getType() generates (ErrorStatus status, DeviceType type); + /** + * Gets the capabilities of a driver. + * + * @return status Error status of the call, must be: + * - NONE if successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * @return capabilities Capabilities of the driver. + */ + getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities); + /** * Gets information about extensions supported by the driver implementation. * @@ -113,44 +124,83 @@ interface IDevice extends @1.1::IDevice { generates (ErrorStatus status, vec supportedOperations); /** - * Gets whether the driver supports compilation caching. + * Gets the caching requirements of the driver implementation. * - * isCachingSupported indicates whether the driver supports compilation caching. - * Even if so, the driver may still choose not to cache certain compiled models. + * There are two types of cache file descriptors provided to the driver: model cache + * and data cache. * - * If the device reports the caching is not supported, the user may avoid calling - * IDevice::prepareModelFromCache and IPreparedModel::saveToCache. + * The data cache is for caching constant data, possibly including preprocessed + * and transformed tensor buffers. Any modification to the data cache should + * have no worse effect than generating bad output values at execution time. + * + * The model cache is for caching security-sensitive data such as compiled + * executable machine code in the device's native binary format. A modification + * to the model cache may affect the driver's execution behavior, and a malicious + * client could make use of this to execute beyond the granted permission. Thus, + * the driver must always check whether the model cache is corrupted before + * preparing the model from cache. + * + * getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver + * implementation needs to cache a single prepared model. Returning 0 for both types + * indicates compilation caching is not supported by this driver. The driver may + * still choose not to cache certain compiled models even if it reports that caching + * is supported. + * + * If the device reports that caching is not supported, the user may avoid calling + * IDevice::prepareModelFromCache or providing cache file descriptors to + * IDevice::prepareModel_1_2. * * @return status Error status of the call, must be: * - NONE if successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error - * @return supported A boolean indicating whether the driver supports compilation - * caching. Even on returning true, the driver may still choose - * not to cache certain compiled models. + * @return numModelCache An unsigned integer indicating how many files for model cache + * the driver needs to cache a single prepared model. It must + * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES. + * @return numDataCache An unsigned integer indicating how many files for data cache + * the driver needs to cache a single prepared model. It must + * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES. */ - isCachingSupported() generates (ErrorStatus status, bool supported); + getNumberOfCacheFilesNeeded() + generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache); /** - * Creates a prepared model for execution. + * Asynchronously creates a prepared model for execution and optionally saves it + * into cache files. * - * prepareModel is used to make any necessary transformations or alternative + * prepareModel is used to make any necessary transformations to or alternative * representations to a model for execution, possibly including * transformations on the constant data, optimization on the model's graph, * or compilation into the device's native binary format. The model itself * is not changed. * + * Optionally, caching information may be provided for the driver to save + * the prepared model to cache files for faster model compilation time + * when the same model preparation is requested in the future. There are + * two types of cache file handles provided to the driver: model cache + * and data cache. For more information on the two types of cache handles, + * refer to getNumberOfCacheFilesNeeded. + * + * The file descriptors must be opened with read and write permission. A file may + * have any size, and the corresponding file descriptor may have any offset. The + * driver must truncate a file to zero size before writing to that file. The file + * descriptors may be closed by the client once the asynchronous preparation has + * finished. The driver must dup a file descriptor if it wants to get access to + * the cache file later. + * * The model is prepared asynchronously with respect to the caller. The - * prepareModel function must verify the inputs to the prepareModel function - * are correct. If there is an error, prepareModel must immediately invoke + * prepareModel function must verify the inputs to the preparedModel function + * related to preparing the model (as opposed to saving the prepared model to + * cache) are correct. If there is an error, prepareModel must immediately invoke * the callback with the appropriate ErrorStatus value and nullptr for the - * IPreparedModel, then return with the same ErrorStatus. If the inputs to - * the prepareModel function are valid and there is no error, prepareModel - * must launch an asynchronous task to prepare the model in the background, - * and immediately return from prepareModel with ErrorStatus::NONE. If the - * asynchronous task fails to launch, prepareModel must immediately invoke - * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the - * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE. + * IPreparedModel, then return with the same ErrorStatus. If the inputs to the + * prepareModel function that are related to preparing the model are valid and + * there is no error, prepareModel must launch an asynchronous task + * to prepare the model in the background, and immediately return from + * prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch, + * prepareModel must immediately invoke the callback with + * ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return + * with ErrorStatus::GENERAL_FAILURE. * * When the asynchronous task has finished preparing the model, it must * immediately invoke the callback function provided as an input to @@ -160,6 +210,14 @@ interface IDevice extends @1.1::IDevice { * the callback object must be invoked with the appropriate ErrorStatus * value and nullptr for the IPreparedModel. * + * Optionally, the driver may save the prepared model to cache during the + * asynchronous preparation. Any error that occurs when saving to cache must + * not affect the status of preparing the model. Even if the input arguments + * related to the cache may be invalid, or the driver may fail to save to cache, + * the prepareModel function must finish preparing the model. The driver + * may choose not to save to cache even if the caching information is + * provided and valid. + * * The only information that may be unknown to the model at this stage is * the shape of the tensors, which may only be known at execution time. As * such, some driver services may return partially prepared models, where @@ -173,6 +231,26 @@ interface IDevice extends @1.1::IDevice { * @param model The model to be prepared for execution. * @param preference Indicates the intended execution behavior of a prepared * model. + * @param modelCache A vector of handles with each entry holding exactly one + * cache file descriptor for the security-sensitive cache. The length of + * the vector must either be 0 indicating that caching information is not provided, + * or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache + * handles will be provided in the same order when retrieving the + * preparedModel from cache files with prepareModelFromCache. + * @param dataCache A vector of handles with each entry holding exactly one + * cache file descriptor for the constants' cache. The length of + * the vector must either be 0 indicating that caching information is not provided, + * or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache + * handles will be provided in the same order when retrieving the + * preparedModel from cache files with prepareModelFromCache. + * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN + * identifying the prepared model. The same token will be provided when retrieving + * the prepared model from the cache files with prepareModelFromCache. + * Tokens should be chosen to have a low rate of collision for a particular + * application. The driver cannot detect a collision; a collision will result + * in a failed execution or in a successful execution that produces incorrect + * output values. If both modelCache and dataCache are empty indicating that + * caching information is not provided, this token must be ignored. * @param callback A callback object used to return the error status of * preparing the model for execution and the prepared model if * successful, nullptr otherwise. The callback object's notify function @@ -182,9 +260,12 @@ interface IDevice extends @1.1::IDevice { * - NONE if preparation task is successfully launched * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if one of the input arguments is invalid + * - INVALID_ARGUMENT if one of the input arguments related to preparing the + * model is invalid */ prepareModel_1_2(Model model, ExecutionPreference preference, + vec modelCache, vec dataCache, + uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token, IPreparedModelCallback callback) generates (ErrorStatus status); @@ -192,22 +273,17 @@ interface IDevice extends @1.1::IDevice { * Creates a prepared model from cache files for execution. * * prepareModelFromCache is used to retrieve a prepared model directly from - * cache files to avoid slow model compilation time. There are exactly two - * cache file descriptors provided to the driver: modelCache and dataCache. + * cache files to avoid slow model compilation time. There are + * two types of cache file handles provided to the driver: model cache + * and data cache. For more information on the two types of cache handles, + * refer to getNumberOfCacheFilesNeeded. * - * The dataCache is for caching constant data, possibly including preprocessed - * and transformed tensor buffers. Any modification to the dataCache should - * have no worse effect than generating bad output values at execution time. - * - * The modelCache is for caching security-sensitive data such as compiled - * executable machine code in the device's native binary format. A modification - * to the modelCache may affect the driver's execution behavior, and a malicious - * client could make use of this to execute beyond the granted permission. Thus, - * the driver must always check whether the modelCache is corrupted before preparing - * the model from cache. - * - * The two file descriptors may be closed by the client once the asynchronous - * preparation has finished. The driver has to copy all the data it needs. + * The file descriptors must be opened with read and write permission. A file may + * have any size, and the corresponding file descriptor may have any offset. The + * driver must truncate a file to zero size before writing to that file. The file + * descriptors may be closed by the client once the asynchronous preparation has + * finished. The driver must dup a file descriptor if it wants to get access to + * the cache file later. * * The model is prepared asynchronously with respect to the caller. The * prepareModelFromCache function must verify the inputs to the @@ -241,13 +317,17 @@ interface IDevice extends @1.1::IDevice { * used with different shapes of inputs on different (possibly concurrent) * executions. * - * @param modelCache A handle holding exactly one cache file descriptor for the - * security-sensitive cache. - * @param dataCache A handle holding exactly one cache file descriptor for the - * constants' cache. + * @param modelCache A vector of handles with each entry holding exactly one + * cache file descriptor for the security-sensitive cache. The length of + * the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded. + * The cache handles will be provided in the same order as with prepareModel_1_2. + * @param dataCache A vector of handles with each entry holding exactly one + * cache file descriptor for the constants' cache. The length of the vector + * must match the numDataCache returned from getNumberOfCacheFilesNeeded. + * The cache handles will be provided in the same order as with prepareModel_1_2. * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN * identifying the prepared model. It is the same token provided when saving - * the cache files with IPreparedModel::saveToCache. Tokens should be chosen + * the cache files with prepareModel_1_2. Tokens should be chosen * to have a low rate of collision for a particular application. The driver * cannot detect a collision; a collision will result in a failed execution * or in a successful execution that produces incorrect output values. @@ -263,7 +343,7 @@ interface IDevice extends @1.1::IDevice { * unspecified error * - INVALID_ARGUMENT if one of the input arguments is invalid */ - prepareModelFromCache(handle modelCache, handle dataCache, + prepareModelFromCache(vec modelCache, vec dataCache, uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token, IPreparedModelCallback callback) generates (ErrorStatus status); diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal index 757d5f1467..5d2d80ff71 100644 --- a/neuralnetworks/1.2/IPreparedModel.hal +++ b/neuralnetworks/1.2/IPreparedModel.hal @@ -157,62 +157,4 @@ interface IPreparedModel extends @1.0::IPreparedModel { fmq_sync requestChannel, fmq_sync resultChannel) generates (ErrorStatus status, IBurstContext context); - - /* - * Saves the prepared model to cache files. - * - * saveToCache is used to save a prepared model to cache files for faster - * model compilation time when the same model preparation is requested in - * the future. There are exactly two cache file descriptors provided to the - * driver: modelCache and dataCache. - * - * The dataCache is for caching constant data, possibly including preprocessed - * and transformed tensor buffers. Any modification to the dataCache should - * have no worse effect than generating bad output values at execution time. - * - * The modelCache is for caching security-sensitive data such as compiled - * executable machine code in the device's native binary format. A modification - * to the modelCache may affect the driver's execution behavior, and a malicious - * client could make use of this to execute beyond the granted permission. Thus, - * the driver must always check whether the modelCache is corrupted before preparing - * the model from cache. - * - * The two file descriptors must point to two zero-length files with offset - * positioned at the beginning of the file. The file descriptors may be closed - * by the client once the method has returned. - * - * If the driver decides not to save the prepared model without looking at the - * input arguments to the saveToCache function, saveToCache must return with - * ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify - * the input arguments to the saveToCache function are valid, and return with - * ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver - * could not save the prepared model, saveToCache must return with the appropriate - * ErrorStatus. Otherwise, it must write the cache files and return - * ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents - * of the cache files are undefined. - * - * @param modelCache A handle holding exactly one cache file descriptor for the - * security-sensitive cache. - * @param dataCache A handle holding exactly one cache file descriptor for the - * constants' cache. - * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN - * identifying the prepared model. The same token will be provided - * when retrieving the prepared model from cache files with - * IDevice::prepareModelFromCache. Tokens should be chosen to have - * a low rate of collision for a particular application. The driver - * cannot detect a collision; a collision will result in a failed - * execution or in a successful execution that produces incorrect - * output values. - * @return status Error status of saveToCache, must be: - * - NONE if saveToCache is performed successfully - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if the driver could not save the - * prepared model or if there is an unspecified error - * - INVALID_ARGUMENT if one of the input arguments is invalid, - * unless the driver decides not to save the prepared model - * without looking at the input arguments - */ - saveToCache(handle modelCache, handle dataCache, - uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token) - generates (ErrorStatus status); }; diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index f2e02b8ce3..8c577961cc 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -30,6 +30,11 @@ enum Constant : uint32_t { * The byte size of the cache token. */ BYTE_SIZE_OF_CACHE_TOKEN = 32, + + /** + * The maximum number of files for each type of cache in compilation caching. + */ + MAX_NUMBER_OF_CACHE_FILES = 32, }; enum OperandType : @1.0::OperandType { @@ -182,6 +187,10 @@ enum OperationType : int32_t { * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * * Supported tensor {@link OperandType}: * * {@link OperandType::TENSOR_FLOAT16} (since API level 29) * * {@link OperandType::TENSOR_FLOAT32} @@ -231,7 +240,8 @@ enum OperationType : int32_t { * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link OperandType::INT32} scalar, specifying the padding on @@ -257,7 +267,8 @@ enum OperationType : int32_t { * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the implicit * padding scheme, has to be one of the * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. @@ -304,6 +315,7 @@ enum OperationType : int32_t { * Before API level 29, all input tensors of * {@link OperandType::TENSOR_QUANT8_ASYMM} * must have the same scale and zeroPoint as the output tensor. + * Since API level 29, zero-sized tensors are supported. * * n: An {@link OperandType::INT32} scalar, specifying the * concatenation axis. * @@ -361,7 +373,8 @@ enum OperationType : int32_t { * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. + * specifying the input. Since API level 29, zero batches is supported + * for this tensor. * * 1: A 4-D tensor, of shape * [depth_out, filter_height, filter_width, depth_in], specifying the * filter. For tensor of type @@ -408,7 +421,8 @@ enum OperationType : int32_t { * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. + * specifying the input. Since API level 29, zero batches is supported + * for this tensor. * * 1: A 4-D tensor, of shape * [depth_out, filter_height, filter_width, depth_in], specifying the * filter. For tensor of type @@ -450,11 +464,10 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. For output tensor of - * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition - * must be satisfied: output_scale > input_scale * filter_scale (for - * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * this condition must be true for all filter scales). + * [batches, out_height, out_width, depth_out]. Before API level 29, + * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the + * following condition must be satisfied: + * output_scale > input_scale * filter_scale * * Available since API level 27. */ @@ -600,11 +613,10 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. For output tensor of - * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition - * must be satisfied: output_scale > input_scale * filter_scale (for - * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * this condition must be true for all filter scales). + * [batches, out_height, out_width, depth_out]. Before API level 29, + * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the + * following condition must be satisfied: + * output_scale > input_scale * filter_scale * * Available since API level 27. */ @@ -672,7 +684,7 @@ enum OperationType : int32_t { * Supported tensor rank: up to 4 * * Inputs: - * * 0: A tensor. + * * 0: A tensor. Since API level 29, this tensor may be zero-sized. * * Outputs: * * 0: A tensor with the same shape as input0. @@ -765,7 +777,8 @@ enum OperationType : int32_t { * [batch_size, input_size], where "input_size" corresponds to the * number of inputs to the layer, matching the second dimension of * weights, and "batch_size" is calculated by dividing the number of - * elements by "input_size". + * elements by "input_size". Since API level 29, zero batch_size is + * supported for this tensor. * * 1: A 2-D tensor, specifying the weights, of shape * [num_units, input_size], where "num_units" corresponds to the number * of output nodes. @@ -780,10 +793,10 @@ enum OperationType : int32_t { * invoke on the result. * * Outputs: - * * 0: The output tensor, of shape [batch_size, num_units]. For output - * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following - * condition must be satisfied: - * output_scale > input_scale * filter_scale. + * * 0: The output tensor, of shape [batch_size, num_units]. Before API + * level 29, For output tensor of {@link + * OperandType::TENSOR_QUANT8_ASYMM}, the following condition must be + * satisfied: output_scale > input_scale * filter_scale. * * Available since API level 27. */ @@ -861,6 +874,7 @@ enum OperationType : int32_t { * Supported tensor {@link OperandType}: * * {@link OperandType::TENSOR_FLOAT16} (since API level 29) * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29) * * Supported tensor rank: up to 4 * Tensors with rank less than 4 are only supported since API level 29. @@ -875,6 +889,8 @@ enum OperationType : int32_t { * * Outputs: * * 0: A tensor of the same {@link OperandType} and same shape as input0. + * For {@link OperandType::TENSOR_QUANT8_ASYMM}, + * the scale must be 1.f / 128 and the zeroPoint must be 128. * * Available since API level 27. */ @@ -905,7 +921,8 @@ enum OperationType : int32_t { * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link OperandType::INT32} scalar, specifying the padding on @@ -931,7 +948,8 @@ enum OperationType : int32_t { * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the implicit * padding scheme, has to be one of the * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. @@ -1021,7 +1039,8 @@ enum OperationType : int32_t { * Supported tensor rank: up to 4. * * Inputs: - * * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. Since API level 29, this tensor may + * be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. @@ -1333,7 +1352,8 @@ enum OperationType : int32_t { * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link OperandType::INT32} scalar, specifying the padding on @@ -1359,7 +1379,8 @@ enum OperationType : int32_t { * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the implicit * padding scheme, has to be one of the * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. @@ -1406,6 +1427,10 @@ enum OperationType : int32_t { * * {@link OperandType::TENSOR_FLOAT32} * * {@link OperandType::TENSOR_QUANT8_ASYMM} * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * * Supported tensor rank: up to 4 * * Inputs: @@ -1441,7 +1466,8 @@ enum OperationType : int32_t { * Supported tensor rank: up to 4. * * Inputs: - * * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. Since API level 29, this tensor may + * be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. @@ -1465,7 +1491,8 @@ enum OperationType : int32_t { * Supported tensor rank: up to 4. * * Inputs: - * * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. Since API level 29, this tensor may + * be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. @@ -1489,7 +1516,8 @@ enum OperationType : int32_t { * Supported tensor rank: up to 4. * * Inputs: - * * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. Since API level 29, this tensor may + * be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. @@ -1541,9 +1569,12 @@ enum OperationType : int32_t { * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * - * Inputs: + * Both resizing by shape and resizing by scale are supported. + * + * Inputs (resizing by shape): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Since API level 29, zero batches is supported for this + * tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the output * height of the output tensor. * * 2: An {@link OperandType::INT32} scalar, specifying the output @@ -1552,6 +1583,24 @@ enum OperationType : int32_t { * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. * + * Inputs (resizing by scale, since API level 29): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. Zero batches is supported for this tensor. + * * 1: A scalar, specifying height_scale, the scaling factor of the height + * dimension from the input tensor to the output tensor. The output + * height is calculated as new_height = floor(height * height_scale). + * The scalar must be of {@link OperandType::FLOAT16} if input0 is + * of {@link OperandType::TENSOR_FLOAT16} and of + * {@link OperandType::FLOAT32} otherwise. + * * 2: A scalar, specifying width_scale, the scaling factor of the width + * dimension from the input tensor to the output tensor. The output + * width is calculated as new_width = floor(width * width_scale). + * The scalar must be of {@link OperandType::FLOAT16} if input0 is + * of {@link OperandType::TENSOR_FLOAT16} and of + * {@link OperandType::FLOAT32} otherwise. + * * 3: An optional {@link OperandType::BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, new_height, new_width, depth]. @@ -1637,7 +1686,8 @@ enum OperationType : int32_t { * Tensors with rank other than 2 or 4 are only supported since API level 29. * * Inputs: - * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. + * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since + * API level 29, this tensor may be zero-sized. * * 1: A scalar, specifying the positive scaling factor for the exponent, * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of @@ -1795,7 +1845,8 @@ enum OperationType : int32_t { * Supported tensor rank: up to 4. * * Inputs: - * * 0: A tensor, specifying the input. + * * 0: A tensor, specifying the input. Since API level 29, this tensor may + * be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0. @@ -1862,6 +1913,10 @@ enum OperationType : int32_t { * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * * Supported tensor {@link OperandType}: * * {@link OperandType::TENSOR_FLOAT16} (since API level 29) * * {@link OperandType::TENSOR_FLOAT32} @@ -2095,6 +2150,10 @@ enum OperationType : int32_t { * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * * Supported tensor {@link OperandType}: * * {@link OperandType::TENSOR_FLOAT16} (since API level 29) * * {@link OperandType::TENSOR_FLOAT32} @@ -2135,6 +2194,7 @@ enum OperationType : int32_t { * * Inputs: * * 0: An n-D tensor, specifying the tensor to be transposed. + * Since API level 29, this tensor may be zero-sized. * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32}, * the permutation of the dimensions of the input tensor. * @@ -2231,7 +2291,8 @@ enum OperationType : int32_t { * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the * bounding box proposals, each line with format [x1, y1, x2, y2]. * For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM}, - * the zeroPoint must be 0 and the scale must be 0.125. + * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois + * is supported for this tensor. * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the * bounding box delta for each region of interest and each class. The * bounding box deltas are organized in the following order @@ -2240,10 +2301,12 @@ enum OperationType : int32_t { * and height, dw and dh is the log-scale relative correction factor * for the width and height. For input0 of type * {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be - * of {@link OperandType::TENSOR_QUANT8_ASYMM}. + * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is + * supported for this tensor. * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. + * the same batch index are grouped together. Zero num_rois is + * supported for this tensor. * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of * each image in the batch, each line with format * [image_height, image_width]. @@ -2272,113 +2335,113 @@ enum OperationType : int32_t { * Inputs: * * 0: The input. * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, output_size] - * If batch-major: [batch_size, max_time, output_size] + * If time-major: [max_time, batch_size, input_size] + * If batch-major: [batch_size, max_time, input_size] * where "max_time" is the number of timesteps (sequence length), * "batch_size" corresponds to the batching dimension, and * "input_size" is the size of the input. * * 1: The forward input-to-input weights. Optional. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of cell units. + * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” + * corresponds to the number of forward cell units. * * 2: The forward input-to-forget weights. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 3: The forward input-to-cell weights. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 4: The forward input-to-output weights. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 5: The forward recurrent-to-input weights. Optional. - * A 2-D tensor of shape [num_units, output_size], where “output_size” - * corresponds to either the number of cell units (i.e., “num_units”), - * or the second dimension of the “projection_weights”, if defined. + * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” + * corresponds to either the number of cell units (i.e., fw_num_units), + * or the second dimension of the “fw_projection_weights”, if defined. * * 6: The forward recurrent-to-forget weights. - * A 2-D tensor of shape [num_units, output_size]. + * A 2-D tensor of shape [fw_num_units, fw_output_size]. * * 7: The forward recurrent-to-cell weights. - * A 2-D tensor of shape [num_units, output_size]. + * A 2-D tensor of shape [fw_num_units, fw_output_size]. * * 8: The forward recurrent-to-output weights. - * A 2-D tensor of shape [num_units, output_size]. + * A 2-D tensor of shape [fw_num_units, fw_output_size]. * * 9: The forward cell-to-input weights. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 10: The forward cell-to-forget weights. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 11: The forward cell-to-output weights. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 12: The forward input gate bias. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 13: The forward forget gate bias. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 14: The forward cell gate bias. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 15: The forward output gate bias. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [fw_num_units]. * * 16: The forward projection weights. Optional. - * A 2-D tensor of shape [output_size, num_units]. + * A 2-D tensor of shape [fw_output_size, fw_num_units]. * * 17: The forward projection bias. Optional. - * A 1-D tensor of shape [output_size]. + * A 1-D tensor of shape [fw_output_size]. * * 18: The backward input-to-input weights. Optional. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of cell units. + * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” + * corresponds to the number of backward cell units. * * 19: The backward input-to-forget weights. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 20: The backward input-to-cell weights. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 21: The backward input-to-output weights. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 22: The backward recurrent-to-input weights. Optional. - * A 2-D tensor of shape [num_units, output_size], where “output_size” - * corresponds to either the number of cell units (i.e., “num_units”), - * or the second dimension of the “projection_weights”, if defined. + * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” + * corresponds to either the number of cell units (i.e., “bw_num_units”), + * or the second dimension of the “bw_projection_weights”, if defined. * * 23: The backward recurrent-to-forget weights. - * A 2-D tensor of shape [num_units, output_size]. + * A 2-D tensor of shape [bw_num_units, bw_output_size]. * * 24: The backward recurrent-to-cell weights. - * A 2-D tensor of shape [num_units, output_size]. + * A 2-D tensor of shape [bw_num_units, bw_output_size]. * * 25: The backward recurrent-to-output weights. - * A 2-D tensor of shape [num_units, output_size]. + * A 2-D tensor of shape [bw_num_units, bw_output_size]. * * 26: The backward cell-to-input weights. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 27: The backward cell-to-forget weights. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 28: The backward cell-to-output weights. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 29: The backward input gate bias. Optional. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 30: The backward forget gate bias. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 31: The backward cell gate bias. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 32: The backward output gate bias. - * A 1-D tensor of shape [num_units]. + * A 1-D tensor of shape [bw_num_units]. * * 33: The backward projection weights. Optional. - * A 2-D tensor of shape [output_size, num_units]. + * A 2-D tensor of shape [bw_output_size, bw_num_units]. * * 34: The backward projection bias. Optional. - * A 1-D tensor of shape [output_size]. + * A 1-D tensor of shape [bw_output_size]. * * 35: The forward input activation state. - * A 2-D tensor of shape [batch_size, output_size]. + * A 2-D tensor of shape [batch_size, bw_output_size]. * * 36: The forward input cell state. - * A 2-D tensor of shape [batch_size, num_units]. + * A 2-D tensor of shape [batch_size, bw_num_units]. * * 37: The backward input activation state. - * A 2-D tensor of shape [batch_size, output_size]. + * A 2-D tensor of shape [batch_size, bw_output_size]. * * 38: The backward input cell state. - * A 2-D tensor of shape [batch_size, num_units]. + * A 2-D tensor of shape [batch_size, bw_num_units]. * * 39: The auxiliary input. Optional. * A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size” * corresponds to the batching dimension, and “input_size” is the size * of the input. * * 40: The forward auxiliary input-to-input weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 41: The forward auxiliary input-to-forget weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 42: The forward auxiliary input-to-cell weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 43: The forward auxiliary input-to-output weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [fw_num_units, input_size]. * * 44: The backward auxiliary input-to-input weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 45: The backward auxiliary input-to-forget weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 46: The backward auxiliary input-to-cell weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 47: The backward auxiliary input-to-output weights. Optional. - * A 2-D tensor of shape [num_units, input_size]. + * A 2-D tensor of shape [bw_num_units, input_size]. * * 48: The activation function. * A value indicating the activation function: *
    @@ -2410,16 +2473,46 @@ enum OperationType : int32_t { * * 52: time_major * An {@link OperandType::BOOL} scalar specifying the shape format * of input and output tensors. + * * 53: The forward input layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at input gate. + * * 54: The forward forget layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at forget gate. + * * 55: The forward cell layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at cell gate. + * * 56: The forward output layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at output gate. + * * 57: The backward input layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at input gate. + * * 58: The backward forget layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at forget gate. + * * 59: The backward cell layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at cell gate. + * * 60: The backward output layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at output gate. * * Outputs: * * 0: The forward output. * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, output_size] - * If batch-major: [batch_size, max_time, output_size] + * If time-major and not merge_outputs: + * [max_time, batch_size, fw_output_size] + * If time-major and merge_outputs: + * [max_time, batch_size, fw_output_size + bw_output_size] + * If batch-major and not merge_outputs: + * [batch_size, max_time, fw_output_size] + * If batch-major and merge_outputs: + * [batch_size, max_time, fw_output_size + bw_output_size] * * 1: The backward output. Unused if merge_outputs is true. * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, output_size] - * If batch-major: [batch_size, max_time, output_size] + * If time-major: [max_time, batch_size, bw_output_size] + * If batch-major: [batch_size, max_time, bw_output_size] * * Available since API level 29. */ @@ -2547,10 +2640,17 @@ enum OperationType : int32_t { /** * Greedily selects a subset of bounding boxes in descending order of score. * - * This op applies hard NMS algorithm to each class. In each loop of - * execution, the box with maximum score gets selected, and any boxes with - * the intersection-over-union (IOU) greater than a threshold are removed - * from the pending set. + * This op applies NMS algorithm to each class. In each loop of execution, + * the box with maximum score gets selected and removed from the pending set. + * The scores of the rest of boxes are lowered according to the + * intersection-over-union (IOU) overlapping with the previously selected + * boxes and a specified NMS kernel method. Any boxes with score less + * than a threshold are removed from the pending set. + * + * Three NMS kernels are supported: + * * Hard: score_new = score_old * (1 if IoU < threshold else 0) + * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU) + * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma) * * Axis-aligned bounding boxes are represented by its upper-left corner * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid @@ -2564,25 +2664,34 @@ enum OperationType : int32_t { * Inputs: * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score * of each bounding box proposal. The boxes are grouped by batches in the - * first dimension. + * first dimension. Zero num_rois is supported for this tensor. * * 1: A 2-D Tensor specifying the bounding boxes of shape * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2]. * The boxes are grouped by batches in the first dimension. The sequential * order of the boxes corresponds with input0. For input0 of type * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of * {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and - * scale of 0.125. + * scale of 0.125. Zero num_rois is supported for this tensor. * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape * [num_rois], specifying the batch index of each box. Boxes with * the same batch index are grouped together. * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes * with scores lower than the threshold are filtered before sending * to the NMS algorithm. - * * 4: An {@link OperandType::FLOAT32} scalar, specifying the IoU - * threshold. - * * 5: An {@link OperandType::INT32} scalar, specifying the maximum + * * 4: An {@link OperandType::INT32} scalar, specifying the maximum * number of selected bounding boxes for each image. Set to a negative * value for unlimited number of output bounding boxes. + * * 5: An {@link OperandType::INT32} scalar, specifying the NMS + * kernel method, options are 0:hard, 1:linear, 2:gaussian. + * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU + * threshold in hard and linear NMS kernel. This field is ignored if + * gaussian kernel is selected. + * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in + * gaussian NMS kernel. This field is ignored if gaussian kernel is + * not selected. + * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold. + * Boxes with scores lower than the threshold are dropped during the + * score updating phase in soft NMS. * * Outputs: * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape @@ -2600,8 +2709,8 @@ enum OperationType : int32_t { * [num_output_rois], specifying the class of each output box. The * sequential order of the boxes corresponds with output0. * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. + * [num_output_rois], specifying the batch index of each box. Boxes + * with the same batch index are grouped together. * * Available since API level 29. */ @@ -2937,8 +3046,8 @@ enum OperationType : int32_t { * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the * scale must be 0.125 and the zero point must be 0. * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. + * [num_output_rois], specifying the batch index of each box. Boxes + * with the same batch index are grouped together. * * Available since API level 29. */ @@ -3122,11 +3231,7 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. For output tensor of - * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition - * must be satisfied: output_scale > input_scale * filter_scale (for - * filter tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * this condition must be true for all filter scales). + * [batches, out_height, out_width, depth_out]. * * Available since API level 29. */ @@ -3608,7 +3713,7 @@ enum OperationType : int32_t { * Supported tensor rank: from 1 * * Inputs: - * * 0: A tensor. + * * 0: A tensor, may be zero-sized. * * Outputs: * * 0: The output tensor of same shape as input0, but with @@ -3940,10 +4045,12 @@ enum OperationType : int32_t { * the regions of interest, each line with format [x1, y1, x2, y2]. * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, - * with zeroPoint of 0 and scale of 0.125. + * with zeroPoint of 0 and scale of 0.125. Zero num_rois is + * supported for this tensor. * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. + * the same batch index are grouped together. Zero num_rois is + * supported for this tensor. * * 3: An {@link OperandType::INT32} scalar, specifying the output * height of the output tensor. * * 4: An {@link OperandType::INT32} scalar, specifying the output @@ -4108,7 +4215,7 @@ enum OperationType : int32_t { * Supported tensor rank: from 1 * * Inputs: - * * 0: An n-D tensor to take slice from. + * * 0: An n-D tensor to take slice from, may be zero-sized. * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying * the beginning indices of the slice in each dimension. * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying @@ -4331,11 +4438,7 @@ enum OperationType : int32_t { * * Outputs: * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. For output tensor of - * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition - * must be satisfied: output_scale > input_scale * filter_scale (for - * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * this condition must be true for all filter scales). + * [batches, out_height, out_width, depth_out]. * * Available since API level 29. */ @@ -4367,9 +4470,9 @@ enum OperationType : int32_t { * Inputs: * * 0: The input (\f$x_t\f$). * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, output_size] - * If batch-major: [batch_size, max_time, output_size] - * where “max_size” is the number of timesteps (sequence length), + * If time-major: [max_time, batch_size, input_size] + * If batch-major: [batch_size, max_time, input_size] + * where “max_time” is the number of timesteps (sequence length), * “batch_size” corresponds to the batching dimension, and * “input_size” is the size of the input. * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. @@ -4429,16 +4532,16 @@ enum OperationType : int32_t { * projection layer, such that values are bound within * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. * * 23:Time-major if true, batch-major if false. - * * 24:The input layer normalization weights. + * * 24:The input layer normalization weights. Optional. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at input gate. - * * 25:The forget layer normalization weights. + * * 25:The forget layer normalization weights. Optional. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at forget gate. - * * 26:The cell layer normalization weights. + * * 26:The cell layer normalization weights. Optional. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at cell gate. - * * 27:The output layer normalization weights. + * * 27:The output layer normalization weights. Optional. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at output gate. * @@ -4526,9 +4629,11 @@ enum OperationType : int32_t { * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. * - * Inputs: + * Both resizing by shape and resizing by scale are supported. + * + * Inputs (resizing by shape): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. + * the input. Zero batches is supported for this tensor. * * 1: An {@link OperandType::INT32} scalar, specifying the output * height of the output tensor. * * 2: An {@link OperandType::INT32} scalar, specifying the output @@ -4536,6 +4641,24 @@ enum OperationType : int32_t { * * 3: An {@link OperandType::BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * + * Inputs (resizing by scale): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. Zero batches is supported for this tensor. + * * 1: A scalar, specifying height_scale, the scaling factor of the height + * dimension from the input tensor to the output tensor. The output + * height is calculated as new_height = floor(height * height_scale). + * The scalar must be of {@link OperandType::FLOAT16} if input0 is + * of {@link OperandType::TENSOR_FLOAT16} and of + * {@link OperandType::FLOAT32} otherwise. + * * 2: A scalar, specifying width_scale, the scaling factor of the width + * dimension from the input tensor to the output tensor. The output + * width is calculated as new_width = floor(width * width_scale). + * The scalar must be of {@link OperandType::FLOAT16} if input0 is + * of {@link OperandType::TENSOR_FLOAT16} and of + * {@link OperandType::FLOAT32} otherwise. + * * 3: An {@link OperandType::BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, new_height, new_width, depth]. @@ -4592,6 +4715,39 @@ enum DeviceType : int32_t { ACCELERATOR = 4, }; +/** + * The capabilities of a driver. + * + * Performance of an operation comes from the type of its first operand. + * This represents performance for non extension operand types. + */ +struct Capabilities { + /** + * Driver performance when operating on float32 data but performing + * calculations with range and/or precision as low as that of the IEEE + * 754 16-bit floating-point format. + */ + PerformanceInfo relaxedFloat32toFloat16PerformanceScalar; + PerformanceInfo relaxedFloat32toFloat16PerformanceTensor; + + /** + * Driver performance when operating on a particular data type. + * In the case of float32 data, this is used when the calculations + * are not relaxed. + */ + struct OperandPerformance { + OperandType type; + PerformanceInfo info; + }; + + /** + * Performance by operand type. Must be sorted by OperandType. + * If a particular OperandType is not present in operandPerformance, + * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }. + */ + vec operandPerformance; +}; + /** * Describes one operation of the model's graph. */ diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp index 365a750fdb..5c269df275 100644 --- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp @@ -25,7 +25,7 @@ namespace V1_2 { namespace vts { namespace functional { -using V1_1::Capabilities; +using V1_0::PerformanceInfo; // create device test TEST_F(NeuralnetworksHidlTest, CreateDevice) {} @@ -37,6 +37,31 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) { EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast(status)); } +// initialization +TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { + using OperandPerformance = Capabilities::OperandPerformance; + Return ret = device->getCapabilities_1_2([](ErrorStatus status, + const Capabilities& capabilities) { + EXPECT_EQ(ErrorStatus::NONE, status); + + auto isPositive = [](const PerformanceInfo& perf) { + return perf.execTime > 0.0f && perf.powerUsage > 0.0f; + }; + + EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + const auto& opPerf = capabilities.operandPerformance; + EXPECT_TRUE(std::all_of( + opPerf.begin(), opPerf.end(), + [isPositive](const OperandPerformance& a) { return isPositive(a.info); })); + EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + })); + }); + EXPECT_TRUE(ret.isOk()); +} + // device version test TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) { Return ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) { @@ -77,10 +102,15 @@ TEST_F(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) { EXPECT_TRUE(ret.isOk()); } -// isCachingSupported test -TEST_F(NeuralnetworksHidlTest, IsCachingSupported) { - Return ret = device->isCachingSupported( - [](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); }); +// getNumberOfCacheFilesNeeded test +TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) { + Return ret = device->getNumberOfCacheFilesNeeded( + [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LE(numModelCache, + static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); + EXPECT_LE(numDataCache, static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); + }); EXPECT_TRUE(ret.isOk()); } } // namespace functional diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp index 00989e5bdc..167fc096ce 100644 --- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp @@ -54,29 +54,39 @@ namespace { [[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape; [[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape; -enum class AccessMode { READ_ONLY, WRITE_ONLY }; +enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY }; -void createCacheHandle(const std::vector& files, AccessMode mode, - hidl_handle* handle) { - std::vector fds; - for (const auto& file : files) { - int fd; - if (mode == AccessMode::READ_ONLY) { - fd = open(file.c_str(), O_RDONLY); - } else if (mode == AccessMode::WRITE_ONLY) { - fd = open(file.c_str(), O_WRONLY | O_TRUNC | O_CREAT, S_IRUSR | S_IWUSR); - } else { - FAIL(); +// Creates cache handles based on provided file groups. +// The outer vector corresponds to handles and the inner vector is for fds held by each handle. +void createCacheHandles(const std::vector>& fileGroups, + const std::vector& mode, hidl_vec* handles) { + handles->resize(fileGroups.size()); + for (uint32_t i = 0; i < fileGroups.size(); i++) { + std::vector fds; + for (const auto& file : fileGroups[i]) { + int fd; + if (mode[i] == AccessMode::READ_ONLY) { + fd = open(file.c_str(), O_RDONLY); + } else if (mode[i] == AccessMode::WRITE_ONLY) { + fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); + } else if (mode[i] == AccessMode::READ_WRITE) { + fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + } else { + FAIL(); + } + ASSERT_GE(fd, 0); + fds.push_back(fd); } - ASSERT_GE(fd, 0); - fds.push_back(fd); + native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0); + ASSERT_NE(cacheNativeHandle, nullptr); + std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]); + (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true); } - native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0); - ASSERT_NE(cacheNativeHandle, nullptr); - for (uint32_t i = 0; i < fds.size(); i++) { - cacheNativeHandle->data[i] = fds[i]; - } - handle->setTo(cacheNativeHandle, /*shouldOwn=*/true); +} + +void createCacheHandles(const std::vector>& fileGroups, AccessMode mode, + hidl_vec* handles) { + createCacheHandles(fileGroups, std::vector(fileGroups.size(), mode), handles); } } // namespace @@ -88,38 +98,43 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { NeuralnetworksHidlTest::SetUp(); ASSERT_NE(device.get(), nullptr); - // Create cache directory. The cache directory and cache files are always created to test - // the behavior of prepareModelFromCache, even when caching is not supported. + // Create cache directory. The cache directory and a temporary cache file is always created + // to test the behavior of prepareModelFromCache, even when caching is not supported. char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX"; char* cacheDir = mkdtemp(cacheDirTemp); ASSERT_NE(cacheDir, nullptr); mCacheDir = cacheDir; + mCacheDir.push_back('/'); - // Create empty cache files. - mCache1 = mCacheDir + "/cache1"; - mCache2 = mCacheDir + "/cache2"; - mCache3 = mCacheDir + "/cache3"; - // A dummy handle, use AccessMode::WRITE_ONLY for createCacheHandle to create files. - hidl_handle handle; - createCacheHandle({mCache1, mCache2, mCache3}, AccessMode::WRITE_ONLY, &handle); - - // Check if caching is supported. - bool isCachingSupported; - Return ret = device->isCachingSupported( - [&isCachingSupported](ErrorStatus status, bool supported) { + Return ret = device->getNumberOfCacheFilesNeeded( + [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { EXPECT_EQ(ErrorStatus::NONE, status); - isCachingSupported = supported; + mNumModelCache = numModelCache; + mNumDataCache = numDataCache; }); EXPECT_TRUE(ret.isOk()); - if (isCachingSupported) { - mIsCachingSupported = true; - } else { + mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0; + + // Create empty cache files. + mTmpCache = mCacheDir + "tmp"; + for (uint32_t i = 0; i < mNumModelCache; i++) { + mModelCache.push_back({mCacheDir + "model" + std::to_string(i)}); + } + for (uint32_t i = 0; i < mNumDataCache; i++) { + mDataCache.push_back({mCacheDir + "data" + std::to_string(i)}); + } + // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files. + hidl_vec modelHandle, dataHandle, tmpHandle; + createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle); + createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle); + createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle); + + if (!mIsCachingSupported) { LOG(INFO) << "NN VTS: Early termination of test because vendor service does not " "support compilation caching."; std::cout << "[ ] Early termination of test because vendor service does not " "support compilation caching." << std::endl; - mIsCachingSupported = false; } } @@ -127,22 +142,49 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { // The tmp directory is only removed when the driver reports caching not supported, // otherwise it is kept for debugging purpose. if (!mIsCachingSupported) { - remove(mCache1.c_str()); - remove(mCache2.c_str()); - remove(mCache3.c_str()); + remove(mTmpCache.c_str()); rmdir(mCacheDir.c_str()); } NeuralnetworksHidlTest::TearDown(); } - void saveModelToCache(sp preparedModel, const hidl_handle& cache1, - const hidl_handle& cache2, ErrorStatus* status) { - // Save IPreparedModel to cache. + void saveModelToCache(const V1_2::Model& model, const hidl_vec& modelCache, + const hidl_vec& dataCache, bool* supported, + sp* preparedModel = nullptr) { + if (preparedModel != nullptr) *preparedModel = nullptr; + + // See if service can handle model. + bool fullySupportsModel = false; + Return supportedCall = device->getSupportedOperations_1_2( + model, + [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_EQ(supported.size(), model.operations.size()); + fullySupportsModel = std::all_of(supported.begin(), supported.end(), + [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + *supported = fullySupportsModel; + if (!fullySupportsModel) return; + + // Launch prepare model. + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); hidl_array cacheToken(mToken); - Return saveToCacheStatus = - preparedModel->saveToCache(cache1, cache2, cacheToken); - ASSERT_TRUE(saveToCacheStatus.isOk()); - *status = static_cast(saveToCacheStatus); + Return prepareLaunchStatus = + device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache, + dataCache, cacheToken, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(static_cast(prepareLaunchStatus), ErrorStatus::NONE); + + // Retrieve prepared model. + preparedModelCallback->wait(); + ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE); + if (preparedModel != nullptr) { + *preparedModel = + V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) + .withDefault(nullptr); + } } bool checkEarlyTermination(ErrorStatus status) { @@ -157,14 +199,27 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { return false; } - void prepareModelFromCache(const hidl_handle& cache1, const hidl_handle& cache2, + bool checkEarlyTermination(bool supported) { + if (!supported) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return true; + } + return false; + } + + void prepareModelFromCache(const hidl_vec& modelCache, + const hidl_vec& dataCache, sp* preparedModel, ErrorStatus* status) { // Launch prepare model from cache. sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); hidl_array cacheToken(mToken); - Return prepareLaunchStatus = - device->prepareModelFromCache(cache1, cache2, cacheToken, preparedModelCallback); + Return prepareLaunchStatus = device->prepareModelFromCache( + modelCache, dataCache, cacheToken, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); if (static_cast(prepareLaunchStatus) != ErrorStatus::NONE) { *preparedModel = nullptr; @@ -179,49 +234,54 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { .withDefault(nullptr); } + // Absolute path to the temporary cache directory. std::string mCacheDir; - std::string mCache1; - std::string mCache2; - std::string mCache3; + + // Groups of file paths for model and data cache in the tmp cache directory, initialized with + // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles + // and the inner vector is for fds held by each handle. + std::vector> mModelCache; + std::vector> mDataCache; + + // A separate temporary file path in the tmp cache directory. + std::string mTmpCache; + uint8_t mToken[static_cast(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {}; - bool mIsCachingSupported; + uint32_t mNumModelCache; + uint32_t mNumDataCache; + uint32_t mIsCachingSupported; }; TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) { // Create test HIDL model and compile. Model testModel = createTestModel(); sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (!mIsCachingSupported) { - EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } else { - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); - } + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } // Retrieve preparedModel from cache. { preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (!mIsCachingSupported) { ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); return; + } else if (checkEarlyTermination(status)) { + ASSERT_EQ(preparedModel, nullptr); + return; } else { ASSERT_EQ(status, ErrorStatus::NONE); ASSERT_NE(preparedModel, nullptr); @@ -238,41 +298,54 @@ TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { // Create test HIDL model and compile. Model testModel = createTestModel(); sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (!mIsCachingSupported) { - EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } else { - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + uint8_t dummyBytes[] = {0, 0}; + // Write a dummy integer to the cache. + // The driver should be able to handle non-empty cache and non-zero fd offset. + for (uint32_t i = 0; i < modelCache.size(); i++) { + ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes, + sizeof(dummyBytes)), + sizeof(dummyBytes)); } + for (uint32_t i = 0; i < dataCache.size(); i++) { + ASSERT_EQ( + write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)), + sizeof(dummyBytes)); + } + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } // Retrieve preparedModel from cache. { preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); uint8_t dummyByte = 0; - // Advance offset by one byte. - ASSERT_GE(read(cache1.getNativeHandle()->data[0], &dummyByte, 1), 0); - ASSERT_GE(read(cache2.getNativeHandle()->data[0], &dummyByte, 1), 0); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + // Advance the offset of each handle by one byte. + // The driver should be able to handle non-zero fd offset. + for (uint32_t i = 0; i < modelCache.size(); i++) { + ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0); + } + for (uint32_t i = 0; i < dataCache.size(); i++) { + ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0); + } + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (!mIsCachingSupported) { ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); return; + } else if (checkEarlyTermination(status)) { + ASSERT_EQ(preparedModel, nullptr); + return; } else { ASSERT_EQ(status, ErrorStatus::NONE); ASSERT_NE(preparedModel, nullptr); @@ -285,234 +358,512 @@ TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { /*testDynamicOutputShape=*/false); } +TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) { + // Create test HIDL model and compile. + Model testModel = createTestModel(); + + // Test with number of model cache files greater than mNumModelCache. + { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an additional cache file for model cache. + mModelCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of model cache files smaller than mNumModelCache. + if (mModelCache.size() > 0) { + bool supported; + hidl_vec modelCache, dataCache; + // Pop out the last cache file. + auto tmp = mModelCache.back(); + mModelCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files greater than mNumDataCache. + { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an additional cache file for data cache. + mDataCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files smaller than mNumDataCache. + if (mDataCache.size() > 0) { + bool supported; + hidl_vec modelCache, dataCache; + // Pop out the last cache file. + auto tmp = mDataCache.back(); + mDataCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) { + // Create test HIDL model and compile. + Model testModel = createTestModel(); + + // Save the compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Test with number of model cache files greater than mNumModelCache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mModelCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of model cache files smaller than mNumModelCache. + if (mModelCache.size() > 0) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mModelCache.back(); + mModelCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files greater than mNumDataCache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mDataCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files smaller than mNumDataCache. + if (mDataCache.size() > 0) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mDataCache.back(); + mDataCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - // cache1 with invalid NumFd. - { + // Go through each handle in model cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumModelCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + mModelCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1, mCache3}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); } + ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid NumFd. - { + // Go through each handle in model cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumModelCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + auto tmp = mModelCache[i].back(); + mModelCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2, mCache3}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumDataCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + mDataCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumDataCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + auto tmp = mDataCache[i].back(); + mDataCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); } } TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::NONE); - } + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } - // cache1 with invalid NumFd. - { - preparedModel = nullptr; + // Go through each handle in model cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1, mCache3}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + mModelCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (status != ErrorStatus::GENERAL_FAILURE) { ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); - ASSERT_EQ(preparedModel, nullptr); } + ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid NumFd. - { - preparedModel = nullptr; + // Go through each handle in model cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2, mCache3}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + auto tmp = mModelCache[i].back(); + mModelCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (status != ErrorStatus::GENERAL_FAILURE) { ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); - ASSERT_EQ(preparedModel, nullptr); } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mDataCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mDataCache[i].back(); + mDataCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); } } TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; + std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); + std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); - // cache1 with invalid access mode. - { + // Go through each handle in model cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumModelCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + modelCacheMode[i] = AccessMode::READ_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + modelCacheMode[i] = AccessMode::READ_WRITE; + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid access mode. - { + // Go through each handle in data cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumDataCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + dataCacheMode[i] = AccessMode::READ_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + dataCacheMode[i] = AccessMode::READ_WRITE; + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); } } TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; + std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); + std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::NONE); - } + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } - // cache1 with invalid access mode. - { - preparedModel = nullptr; + // Go through each handle in model cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + modelCacheMode[i] = AccessMode::WRITE_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + modelCacheMode[i] = AccessMode::READ_WRITE; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid access mode. - { - preparedModel = nullptr; + // Go through each handle in data cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + dataCacheMode[i] = AccessMode::WRITE_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + dataCacheMode[i] = AccessMode::READ_WRITE; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); } } -TEST_F(CompilationCachingTest, SaveToCacheInvalidOffset) { - // Create test HIDL model and compile. - Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - - // cache1 with invalid file descriptor offset. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Advance offset by one byte. - ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } - - // cache2 with invalid file descriptor offset. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Advance offset by one byte. - ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } -} - -TEST_F(CompilationCachingTest, SaveToCacheInvalidFileSize) { - // Create test HIDL model and compile. - Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - - // cache1 with invalid file size. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Write one byte and seek back to the beginning. - ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1); - ASSERT_EQ(lseek(cache1.getNativeHandle()->data[0], 0, SEEK_SET), 0); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } - - // cache2 with invalid file size. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Write one byte and seek back to the beginning. - ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1); - ASSERT_EQ(lseek(cache2.getNativeHandle()->data[0], 0, SEEK_SET), 0); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } -} - class CompilationCachingSecurityTest : public CompilationCachingTest, public ::testing::WithParamInterface { protected: @@ -537,44 +888,44 @@ TEST_P(CompilationCachingSecurityTest, CorruptedSecuritySensitiveCache) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - // Save the compilation to cache. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); - } + for (uint32_t i = 0; i < mNumModelCache; i++) { + // Save the compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } - // Randomly flip one single bit of the cache entry. - FILE* pFile = fopen(mCache1.c_str(), "r+"); - ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0); - long int fileSize = ftell(pFile); - ASSERT_GT(fileSize, 0); - ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0); - int readByte = fgetc(pFile); - ASSERT_NE(readByte, EOF); - ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0); - ASSERT_NE(fputc(static_cast(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF); - fclose(pFile); + // Randomly flip one single bit of the cache entry. + FILE* pFile = fopen(mModelCache[i][0].c_str(), "r+"); + ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0); + long int fileSize = ftell(pFile); + if (fileSize == 0) { + fclose(pFile); + continue; + } + ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0); + int readByte = fgetc(pFile); + ASSERT_NE(readByte, EOF); + ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0); + ASSERT_NE(fputc(static_cast(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF); + fclose(pFile); - // Retrieve preparedModel from cache, expect failure. - { - preparedModel = nullptr; - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - ASSERT_EQ(preparedModel, nullptr); + // Retrieve preparedModel from cache, expect failure. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } } } @@ -583,40 +934,37 @@ TEST_P(CompilationCachingSecurityTest, WrongLengthSecuritySensitiveCache) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - // Save the compilation to cache. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); - } + for (uint32_t i = 0; i < mNumModelCache; i++) { + // Save the compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } - // Randomly append bytes to the cache entry. - FILE* pFile = fopen(mCache1.c_str(), "a"); - uint32_t appendLength = getRandomInt(1, 256); - for (uint32_t i = 0; i < appendLength; i++) { - ASSERT_NE(fputc(getRandomInt(0, 255), pFile), EOF); - } - fclose(pFile); + // Randomly append bytes to the cache entry. + FILE* pFile = fopen(mModelCache[i][0].c_str(), "a"); + uint32_t appendLength = getRandomInt(1, 256); + for (uint32_t i = 0; i < appendLength; i++) { + ASSERT_NE(fputc(getRandomInt(0, 255), pFile), EOF); + } + fclose(pFile); - // Retrieve preparedModel from cache, expect failure. - { - preparedModel = nullptr; - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - ASSERT_EQ(preparedModel, nullptr); + // Retrieve preparedModel from cache, expect failure. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } } } @@ -625,20 +973,15 @@ TEST_P(CompilationCachingSecurityTest, WrongToken) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } // Randomly flip one single bit in mToken. @@ -647,12 +990,12 @@ TEST_P(CompilationCachingSecurityTest, WrongToken) { // Retrieve the preparedModel from cache, expect failure. { - preparedModel = nullptr; + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); } diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index c2330b581e..2988211e5a 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -33,6 +33,7 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -54,7 +55,8 @@ static void validatePrepareModel(const sp& device, const std::string& m sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = - device->prepareModel_1_2(model, preference, preparedModelCallback); + device->prepareModel_1_2(model, preference, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index d411da4819..b15f657348 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -37,6 +37,7 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; using ::android::hidl::memory::V1_0::IMemory; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; using test_helper::for_all; using test_helper::MixedTyped; using test_helper::MixedTypedExample; @@ -66,7 +67,8 @@ static void createPreparedModel(const sp& device, const Model& model, sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel_1_2( - model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); diff --git a/thermal/2.0/default/Thermal.cpp b/thermal/2.0/default/Thermal.cpp index 0ef4b63324..bbbecb8583 100644 --- a/thermal/2.0/default/Thermal.cpp +++ b/thermal/2.0/default/Thermal.cpp @@ -38,46 +38,47 @@ using ::android::hardware::thermal::V1_0::ThermalStatusCode; std::set> gCallbacks; static const Temperature_1_0 kTemp_1_0 = { - .type = static_cast<::android::hardware::thermal::V1_0::TemperatureType>(TemperatureType::CPU), - .name = "test temperature sensor", - .currentValue = 98.6, - .throttlingThreshold = 58, - .shutdownThreshold = 60.0, - .vrThrottlingThreshold = 59.0, + .type = static_cast<::android::hardware::thermal::V1_0::TemperatureType>( + TemperatureType::SKIN), + .name = "test temperature sensor", + .currentValue = 30.8, + .throttlingThreshold = 48.0, + .shutdownThreshold = 60.0, + .vrThrottlingThreshold = 49.0, }; static const Temperature_2_0 kTemp_2_0 = { - .type = TemperatureType::SKIN, - .name = "test temperature sensor", - .value = 98.6, - .throttlingStatus = ThrottlingSeverity::CRITICAL, + .type = TemperatureType::SKIN, + .name = "test temperature sensor", + .value = 30.8, + .throttlingStatus = ThrottlingSeverity::NONE, }; static const TemperatureThreshold kTempThreshold = { - .type = TemperatureType::SKIN, - .name = "test temperature sensor", - .hotThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}}, - .coldThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}}, - .vrThrottlingThreshold = NAN, + .type = TemperatureType::SKIN, + .name = "test temperature sensor", + .hotThrottlingThresholds = {{NAN, NAN, NAN, 48.0, NAN, NAN, 60.0}}, + .coldThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}}, + .vrThrottlingThreshold = 49.0, }; static const CoolingDevice_1_0 kCooling_1_0 = { - .type = ::android::hardware::thermal::V1_0::CoolingType::FAN_RPM, - .name = "test cooling device", - .currentValue = 100.0, + .type = ::android::hardware::thermal::V1_0::CoolingType::FAN_RPM, + .name = "test cooling device", + .currentValue = 100.0, }; static const CoolingDevice_2_0 kCooling_2_0 = { - .type = CoolingType::CPU, - .name = "test cooling device", - .value = 1, + .type = CoolingType::FAN, + .name = "test cooling device", + .value = 100, }; static const CpuUsage kCpuUsage = { - .name = "cpu_name", - .active = 0, - .total = 0, - .isOnline = true, + .name = "cpu_name", + .active = 0, + .total = 0, + .isOnline = true, }; // Methods from ::android::hardware::thermal::V1_0::IThermal follow. diff --git a/vibrator/1.3/Android.bp b/vibrator/1.3/Android.bp index 28370d6e4d..a2ff784a6c 100644 --- a/vibrator/1.3/Android.bp +++ b/vibrator/1.3/Android.bp @@ -8,6 +8,7 @@ hidl_interface { }, srcs: [ "IVibrator.hal", + "types.hal", ], interfaces: [ "android.hardware.vibrator@1.0", diff --git a/vibrator/1.3/IVibrator.hal b/vibrator/1.3/IVibrator.hal index 01c2801720..1c870ee4cd 100644 --- a/vibrator/1.3/IVibrator.hal +++ b/vibrator/1.3/IVibrator.hal @@ -16,6 +16,7 @@ package android.hardware.vibrator@1.3; +import @1.0::EffectStrength; import @1.0::Status; import @1.2::IVibrator; @@ -41,4 +42,18 @@ interface IVibrator extends @1.2::IVibrator { * not supported by the device. */ setExternalControl(bool enabled) generates (Status status); + + /** + * Fire off a predefined haptic event. + * + * @param event The type of haptic event to trigger. + * @return status Whether the effect was successfully performed or not. Must + * return Status::UNSUPPORTED_OPERATION if the effect is not supported. + * @return lengthMs The length of time the event is expected to take in + * milliseconds. This doesn't need to be perfectly accurate, but should be a reasonable + * approximation. Should be a positive, non-zero value if the returned status is Status::OK, + * and set to 0 otherwise. + */ + perform_1_3(Effect effect, EffectStrength strength) + generates (Status status, uint32_t lengthMs); }; diff --git a/vibrator/1.3/example/Vibrator.cpp b/vibrator/1.3/example/Vibrator.cpp index bb9a057697..eb50187fe9 100644 --- a/vibrator/1.3/example/Vibrator.cpp +++ b/vibrator/1.3/example/Vibrator.cpp @@ -74,22 +74,9 @@ Return Vibrator::perform_1_1(V1_1::Effect_1_1 effect, EffectStrength stren // Methods from ::android::hardware::vibrator::V1_2::IVibrator follow. -Return Vibrator::perform_1_2(Effect effect, EffectStrength strength, perform_cb _hidl_cb) { - uint8_t amplitude; - uint32_t ms; - Status status; - - ALOGI("Perform: Effect %s\n", effectToName(effect)); - - amplitude = strengthToAmplitude(strength); - setAmplitude(amplitude); - - ms = effectToMs(effect); - status = activate(ms); - - _hidl_cb(status, ms); - - return Void(); +Return Vibrator::perform_1_2(V1_2::Effect effect, EffectStrength strength, + perform_cb _hidl_cb) { + return perform_1_3(static_cast(effect), strength, _hidl_cb); } // Methods from ::android::hardware::vibrator::V1_3::IVibrator follow. @@ -110,6 +97,24 @@ Return Vibrator::setExternalControl(bool enabled) { } } +Return Vibrator::perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) { + uint8_t amplitude; + uint32_t ms; + Status status; + + ALOGI("Perform: Effect %s\n", effectToName(effect)); + + amplitude = strengthToAmplitude(strength); + setAmplitude(amplitude); + + ms = effectToMs(effect); + status = activate(ms); + + _hidl_cb(status, ms); + + return Void(); +} + // Private methods follow. Status Vibrator::enable(bool enabled) { @@ -184,6 +189,7 @@ uint32_t Vibrator::effectToMs(Effect effect) { case Effect::DOUBLE_CLICK: return 15; case Effect::TICK: + case Effect::TEXTURE_TICK: return 5; case Effect::THUD: return 5; diff --git a/vibrator/1.3/example/Vibrator.h b/vibrator/1.3/example/Vibrator.h index a931b63b09..8cf0b1eb05 100644 --- a/vibrator/1.3/example/Vibrator.h +++ b/vibrator/1.3/example/Vibrator.h @@ -27,7 +27,6 @@ namespace implementation { using android::hardware::vibrator::V1_0::EffectStrength; using android::hardware::vibrator::V1_0::Status; -using android::hardware::vibrator::V1_2::Effect; class Vibrator : public IVibrator { public: @@ -46,11 +45,13 @@ class Vibrator : public IVibrator { perform_cb _hidl_cb) override; // Methods from ::android::hardware::vibrator::V1_2::IVibrator follow. - Return perform_1_2(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override; + Return perform_1_2(V1_2::Effect effect, EffectStrength strength, + perform_cb _hidl_cb) override; // Methods from ::android::hardware::vibrator::V1_3::IVibrator follow. Return supportsExternalControl() override; Return setExternalControl(bool enabled) override; + Return perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override; private: Status enable(bool enabled); diff --git a/vibrator/1.3/types.hal b/vibrator/1.3/types.hal new file mode 100644 index 0000000000..ceb62a5f66 --- /dev/null +++ b/vibrator/1.3/types.hal @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.vibrator@1.3; + +import @1.2::Effect; + +enum Effect : @1.2::Effect { + /** + * A soft tick effect meant to be played as a texture. + * + * A soft, short sensation like the tick of a clock. Unlike regular effects, texture effects + * are expected to be played multiple times in quick succession, replicating a specific + * texture to the user as a form of haptic feedback. + */ + TEXTURE_TICK +}; diff --git a/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp b/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp index a67d1dc8c7..818f9c7ab4 100644 --- a/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp +++ b/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp @@ -24,9 +24,16 @@ #include using ::android::sp; +using ::android::hardware::hidl_enum_range; +using ::android::hardware::Return; +using ::android::hardware::Void; +using ::android::hardware::vibrator::V1_0::EffectStrength; using ::android::hardware::vibrator::V1_0::Status; +using ::android::hardware::vibrator::V1_3::Effect; using ::android::hardware::vibrator::V1_3::IVibrator; +#define EXPECT_OK(ret) ASSERT_TRUE((ret).isOk()) + // Test environment for Vibrator HIDL HAL. class VibratorHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { public: @@ -71,6 +78,74 @@ TEST_F(VibratorHidlTest_1_3, SetExternalControlReturnUnsupportedOperationIfNotSu } } +static void validatePerformEffectUnsupportedOperation(Status status, uint32_t lengthMs) { + ASSERT_EQ(Status::UNSUPPORTED_OPERATION, status); + ASSERT_EQ(static_cast(0), lengthMs) + << "Effects that return UNSUPPORTED_OPERATION must have a duration of zero"; +} + +static void validatePerformEffect(Status status, uint32_t lengthMs) { + ASSERT_TRUE(status == Status::OK || status == Status::UNSUPPORTED_OPERATION); + if (status == Status::OK) { + ASSERT_LT(static_cast(0), lengthMs) + << "Effects that return OK must return a positive duration"; + } else { + validatePerformEffectUnsupportedOperation(status, lengthMs); + } +} + +/* + * Test to make sure effects within the valid range return are either supported and return OK with + * a valid duration, or are unsupported and return UNSUPPORTED_OPERATION with a duration of 0. + */ +TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3) { + for (const auto& effect : hidl_enum_range()) { + for (const auto& strength : hidl_enum_range()) { + EXPECT_OK(vibrator->perform_1_3(effect, strength, validatePerformEffect)); + } + } +} + +/* + * Test to make sure effect values above the valid range are rejected. + */ +TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadEffects_AboveValidRange) { + Effect effect = *std::prev(hidl_enum_range().end()); + Effect badEffect = static_cast(static_cast(effect) + 1); + EXPECT_OK(vibrator->perform_1_3(badEffect, EffectStrength::LIGHT, + validatePerformEffectUnsupportedOperation)); +} + +/* + * Test to make sure effect values below the valid range are rejected. + */ +TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadEffects_BelowValidRange) { + Effect effect = *hidl_enum_range().begin(); + Effect badEffect = static_cast(static_cast(effect) - 1); + EXPECT_OK(vibrator->perform_1_3(badEffect, EffectStrength::LIGHT, + validatePerformEffectUnsupportedOperation)); +} + +/* + * Test to make sure strength values above the valid range are rejected. + */ +TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadStrength_AboveValidRange) { + EffectStrength strength = *std::prev(hidl_enum_range().end()); + EffectStrength badStrength = static_cast(static_cast(strength) + 1); + EXPECT_OK(vibrator->perform_1_3(Effect::THUD, badStrength, + validatePerformEffectUnsupportedOperation)); +} + +/* + * Test to make sure strength values below the valid range are rejected. + */ +TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadStrength_BelowValidRange) { + EffectStrength strength = *hidl_enum_range().begin(); + EffectStrength badStrength = static_cast(static_cast(strength) - 1); + EXPECT_OK(vibrator->perform_1_3(Effect::THUD, badStrength, + validatePerformEffectUnsupportedOperation)); +} + int main(int argc, char** argv) { ::testing::AddGlobalTestEnvironment(VibratorHidlEnvironment::Instance()); ::testing::InitGoogleTest(&argc, argv); diff --git a/wifi/1.3/IWifiChip.hal b/wifi/1.3/IWifiChip.hal index fc6dbac40e..72cee899b0 100644 --- a/wifi/1.3/IWifiChip.hal +++ b/wifi/1.3/IWifiChip.hal @@ -65,10 +65,14 @@ interface IWifiChip extends @1.2::IWifiChip { /** * API to set the wifi latency mode * - * Latency mode determines whether or not to optimize for reducing wifi - * latency as a tradeoff with other wifi functionality such as scanning, - * roaming, etc. This optimization is suitable for some applications such - * as gaming and virtual reality applications. + * The latency mode is a hint to the HAL to enable or disable Wi-Fi latency + * optimization. The optimization should be enabled if the mode is set to |LOW| + * and should be disabled if the mode is set to |NORMAL|. + * Wi-Fi latency optimization may trade-off latency against other Wi-Fi + * functionality such as scanning, roaming, etc. but it should not result in + * completely halting this functionality. + * + * The low latency mode targets applications such as gaming and virtual reality. */ setLatencyMode(LatencyMode mode) generates (WifiStatus status);