mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 22:04:26 +00:00
Merge master@5406228 into git_qt-dev-plus-aosp.
Change-Id: I4a82f872dd706ac422ff596217e6f67fee8b7f91 BUG: 129345239
This commit is contained in:
@@ -169,6 +169,10 @@ interface IStreamIn extends IStream {
|
||||
/**
|
||||
* Specifies the logical microphone (for processing).
|
||||
*
|
||||
* If the feature is not supported an error should be returned
|
||||
* If multiple microphones are present, this should be treated as a preference
|
||||
* for their combined direction.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @param Direction constant
|
||||
@@ -180,6 +184,10 @@ interface IStreamIn extends IStream {
|
||||
/**
|
||||
* Specifies the zoom factor for the selected microphone (for processing).
|
||||
*
|
||||
* If the feature is not supported an error should be returned
|
||||
* If multiple microphones are present, this should be treated as a preference
|
||||
* for their combined field dimension.
|
||||
*
|
||||
* Optional method
|
||||
*
|
||||
* @param the desired field dimension of microphone capture. Range is from -1 (wide angle),
|
||||
|
||||
@@ -146,6 +146,7 @@ enum AudioSource : int32_t {
|
||||
*/
|
||||
ECHO_REFERENCE = 1997,
|
||||
FM_TUNER = 1998,
|
||||
HOTWORD = 1999,
|
||||
};
|
||||
|
||||
typedef int32_t AudioSession;
|
||||
|
||||
19
current.txt
19
current.txt
@@ -416,11 +416,11 @@ dfdb4d04b65dc363e5621c85bfdf3023c277b75c31d821d8e71b3f44f198e214 android.hardwar
|
||||
0a911297821854985cfcdb17b63d7948af0f0f51ce8c68cc86367c185bbc772e android.hardware.audio@5.0::IDevicesFactory
|
||||
ce2e8c6c8559fd42bd69e0dee27b4d9c93cd9b2eff487b4e6b6395b6a1a993d6 android.hardware.audio@5.0::IPrimaryDevice
|
||||
4a4e5e5d9357004a1256bde8d36010ee00c51cea811a1c1e0dd969a9fc0bf862 android.hardware.audio@5.0::IStream
|
||||
e05e48c583de14c1e5a6fa9d48ea50244e3e0924b76b342374e7471dc8007ba9 android.hardware.audio@5.0::IStreamIn
|
||||
b9d41ff4031266de1ecef394a8a64de7d857634dd08dc6be855fca2fe3075975 android.hardware.audio@5.0::IStreamIn
|
||||
9471b12b1c255bb530695720bc4174bd74987b75b1f820854af8944bc8c215c9 android.hardware.audio@5.0::IStreamOut
|
||||
1b0500367ed2b32a841667ac3200edf3d3a164e8004aca445ff1b085ac831e93 android.hardware.audio@5.0::IStreamOutCallback
|
||||
83e365479cc77d8717c155e1787ee668cd2ae4c557b467cf75b8e7cd53697ad8 android.hardware.audio@5.0::types
|
||||
a0df6961e65444e1ca40a206d7f31304d313e8b7e5b122855e3272ab02720cd4 android.hardware.audio.common@5.0::types
|
||||
07d17800b298331e90d4ea5d8ba19a1ae3fe9c1dbff08d9f75fd3ade09496d67 android.hardware.audio.common@5.0::types
|
||||
f269297866765b95ddd1825676cc8a772f0c7c9863286df596fc302781a42ff5 android.hardware.audio.effect@5.0::IAcousticEchoCancelerEffect
|
||||
fa187b602d8939644ef708ed7627f2e3deac97899a4bda1de07f2ff126abe243 android.hardware.audio.effect@5.0::IAutomaticGainControlEffect
|
||||
e1bf864ccb8458c0da1dcc74a2e748b1dca8ac360df590591cf82d98292d7981 android.hardware.audio.effect@5.0::IBassBoostEffect
|
||||
@@ -464,7 +464,9 @@ f27baaa587bc3dd9b740cb6928ab812b9b7d105b5187663938aee578105f3c39 android.hardwar
|
||||
7f460e795f5d1ed5e378935f98c6db4d39497de988aef1b4c2a4a07a6c400392 android.hardware.gnss@2.0::IAGnss
|
||||
2e5ad983734069e84a760004b32da0d09e4170c05380abe27e6eb80e4aa70d5a android.hardware.gnss@2.0::IAGnssCallback
|
||||
1f4ac068a88a72360280d94a7f6fd7c63813c1eea4891a0eb01394d3e7e775f2 android.hardware.gnss@2.0::IAGnssRil
|
||||
63216fcb23eaf4d6f12ea0e99b8bfdb8e4e57c02f215d433cd30943d850f61a7 android.hardware.gnss@2.0::IGnss
|
||||
4deafcdcffa2d002119e7f58810b767a84666e76475aae68e757ec2845d9756d android.hardware.gnss@2.0::IGnss
|
||||
db6bdf6dfc5edf6c85d2944976db899227abb51079c893874353c322342c50b6 android.hardware.gnss@2.0::IGnssBatching
|
||||
1f89392f1ebb693d8fa6f50324b1635fc79fab246d31900e63998e1b0e17511c android.hardware.gnss@2.0::IGnssBatchingCallback
|
||||
b11a5e4a1602d3f408716b6fe2c578a79f060d571aad8e828f9a4426d161fbcf android.hardware.gnss@2.0::IGnssCallback
|
||||
ecc966c68bddbd95c8dae782b84204cf01c75734675e8769963f3b5106ec128b android.hardware.gnss@2.0::IGnssConfiguration
|
||||
b670bae2ab8517336290532e364502b4db9120340d75474ccc8442b1b15d6ab7 android.hardware.gnss@2.0::IGnssDebug
|
||||
@@ -506,11 +508,11 @@ b9422a9aca84df1ff9623dc12c0562abce97716e28d63a965f2bfb88f9ad9607 android.hardwar
|
||||
4cb139f729c29d8d6f4ecdab149c4feb571dad8a06e56cd57fcb52e70208bab4 android.hardware.media.c2@1.0::types
|
||||
4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback
|
||||
19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext
|
||||
96249c852dabeefa3a9496ecdfc44681a071c665bfbf88527bf775c88bf1ab1b android.hardware.neuralnetworks@1.2::IDevice
|
||||
b83317b66721241887d2770b5ae95fd5af1e77c5daa7530ecb08fae8892f2b43 android.hardware.neuralnetworks@1.2::IDevice
|
||||
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
|
||||
83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel
|
||||
36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
|
||||
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
|
||||
114056b3b9303e0e858f28e718ba45722de5678d1d54eec0dcd10788604bf2bb android.hardware.neuralnetworks@1.2::types
|
||||
209a5ee694b94328afb2af2768f1fe6a69148e2cbb85ec3c340a36eed818c697 android.hardware.neuralnetworks@1.2::types
|
||||
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
|
||||
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
|
||||
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
|
||||
@@ -542,9 +544,10 @@ b47f90302595874dfddb19bd05a054727bf18b3a930bc810ea14957b859ae8bf android.hardwar
|
||||
61bc302e7c974c59b25898c585c6e9685e8a81021b1bed3eedf5224198f2785a android.hardware.usb@1.2::IUsb
|
||||
46996cd2a1c66261a75a1f6ecada77eeb5861eb264fa39b996548fe0a7f22dd3 android.hardware.usb@1.2::IUsbCallback
|
||||
3bbaa8cbc5d6b1da21f5509b2b641e05fc7eeca1354751eb1bb3cf37f89aa32f android.hardware.usb@1.2::types
|
||||
92c1a726c80970d623b891f7c2f9a989a40a15ee1244092b49f4eb6adcdce4e9 android.hardware.vibrator@1.3::IVibrator
|
||||
0f7ff73793548d5154014059b7e0fe9ef6355d32218ace157954d02055f5248b android.hardware.vibrator@1.3::IVibrator
|
||||
2e313dc27a1327a29862ab3e085917f75c9e996f7c8df5a0ce37b9a0ed076b80 android.hardware.vibrator@1.3::types
|
||||
f19832856a3f53ced5ef91d3cc630a57fb7f4d4ce15f364dbed09099b89f6830 android.hardware.wifi@1.3::IWifi
|
||||
7c6799c19bfdb3dec016b751556fe246cf7d37191ee7bb82a0091ab9fbf6f2fb android.hardware.wifi@1.3::IWifiChip
|
||||
64be084b6e1ef330b75fa916593dc0b94b0ec7a16d5cfaa5a31e6c9143c8288d android.hardware.wifi@1.3::IWifiChip
|
||||
3bef30e8b61ab050c0f6fd26572712be5ebb7707d624c9aa6c74bbb9d6a5b4a9 android.hardware.wifi@1.3::IWifiStaIface
|
||||
f3dbd8dd0d6333c005610288a4785d0ef79a72a7bbe6d0a46d46fa89fc886f1e android.hardware.wifi@1.3::types
|
||||
2fae61e962f68091335f7ff4581fcfe2e28ce7f6132d7a712fa13d7965543e4d android.hardware.wifi.hostapd@1.1::IHostapd
|
||||
|
||||
@@ -12,6 +12,8 @@ hidl_interface {
|
||||
"IAGnssCallback.hal",
|
||||
"IAGnssRil.hal",
|
||||
"IGnss.hal",
|
||||
"IGnssBatching.hal",
|
||||
"IGnssBatchingCallback.hal",
|
||||
"IGnssCallback.hal",
|
||||
"IGnssConfiguration.hal",
|
||||
"IGnssDebug.hal",
|
||||
|
||||
@@ -27,6 +27,7 @@ import IGnssDebug;
|
||||
import IGnssMeasurement;
|
||||
import IAGnss;
|
||||
import IAGnssRil;
|
||||
import IGnssBatching;
|
||||
|
||||
/**
|
||||
* Represents the standard GNSS (Global Navigation Satellite System) interface.
|
||||
@@ -104,6 +105,13 @@ interface IGnss extends @1.1::IGnss {
|
||||
*/
|
||||
getExtensionVisibilityControl() generates (IGnssVisibilityControl visibilityControlIface);
|
||||
|
||||
/**
|
||||
* This method returns the IGnssBatching interface.
|
||||
*
|
||||
* @return batchingIface Handle to the IGnssBatching interface.
|
||||
*/
|
||||
getExtensionGnssBatching_2_0() generates (IGnssBatching batchingIface);
|
||||
|
||||
/**
|
||||
* Injects current location from the best available location provider.
|
||||
*
|
||||
|
||||
51
gnss/2.0/IGnssBatching.hal
Normal file
51
gnss/2.0/IGnssBatching.hal
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.gnss@2.0;
|
||||
|
||||
import @1.0::IGnssBatching;
|
||||
import IGnssBatchingCallback;
|
||||
|
||||
/**
|
||||
* Extended interface for GNSS Batching support.
|
||||
*
|
||||
* If this interface is supported, this batching request must be able to run in
|
||||
* parallel with, or without, non-batched location requested by the
|
||||
* IGnss start() & stop() - i.e. both requests must be handled independently,
|
||||
* and not interfere with each other.
|
||||
*
|
||||
* For example, if a 1Hz continuous output is underway on the IGnssCallback,
|
||||
* due to an IGnss start() operation,
|
||||
* and then a IGnssBatching start() is called for a location every 10
|
||||
* seconds, the newly added batching request must not disrupt the 1Hz
|
||||
* continuous location output on the IGnssCallback.
|
||||
*
|
||||
* As with GNSS Location outputs, source of location must be GNSS satellite
|
||||
* measurements, optionally using interial and baro sensors to improve
|
||||
* relative motion filtering. No additional absolute positioning information,
|
||||
* such as WiFi derived location, may be mixed with the GNSS information.
|
||||
*/
|
||||
interface IGnssBatching extends @1.0::IGnssBatching {
|
||||
/**
|
||||
* Opens the interface and provides the callback routines
|
||||
* to the implementation of this interface.
|
||||
*
|
||||
* @param callback Callback interface for IGnssBatching.
|
||||
*
|
||||
* @return success Returns true on success.
|
||||
*/
|
||||
init_2_0(IGnssBatchingCallback callback) generates (bool success);
|
||||
};
|
||||
36
gnss/2.0/IGnssBatchingCallback.hal
Normal file
36
gnss/2.0/IGnssBatchingCallback.hal
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.gnss@2.0;
|
||||
|
||||
/** The callback interface to report measurements from the HAL. */
|
||||
interface IGnssBatchingCallback {
|
||||
/**
|
||||
* Called when a batch of locations is output, by various means, including
|
||||
* a flush request, as well as the buffer becoming full (if appropriate option
|
||||
* is set.)
|
||||
*
|
||||
* All locations returned by this callback must be cleared from the hardware
|
||||
* buffer, such the sequential calls of this callback do not return any
|
||||
* redundant locations. (Same lat/lon, at a new time, is acceptable.)
|
||||
*
|
||||
* The GnssLocation struct in gnss@2.0 is extended to include elapsed realtime
|
||||
* information.
|
||||
*
|
||||
* @param locations GNSS Location information from HAL.
|
||||
*/
|
||||
gnssLocationBatchCb(vec<GnssLocation> locations);
|
||||
};
|
||||
@@ -25,6 +25,7 @@ cc_binary {
|
||||
"AGnss.cpp",
|
||||
"AGnssRil.cpp",
|
||||
"Gnss.cpp",
|
||||
"GnssBatching.cpp",
|
||||
"GnssMeasurement.cpp",
|
||||
"GnssMeasurementCorrections.cpp",
|
||||
"GnssVisibilityControl.cpp",
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
|
||||
#include "AGnss.h"
|
||||
#include "AGnssRil.h"
|
||||
#include "GnssBatching.h"
|
||||
#include "GnssConfiguration.h"
|
||||
#include "GnssMeasurement.h"
|
||||
#include "GnssMeasurementCorrections.h"
|
||||
@@ -265,6 +266,10 @@ Return<sp<visibility_control::V1_0::IGnssVisibilityControl>> Gnss::getExtensionV
|
||||
return new GnssVisibilityControl();
|
||||
}
|
||||
|
||||
Return<sp<V2_0::IGnssBatching>> Gnss::getExtensionGnssBatching_2_0() {
|
||||
return new GnssBatching();
|
||||
}
|
||||
|
||||
Return<bool> Gnss::setCallback_2_0(const sp<V2_0::IGnssCallback>& callback) {
|
||||
ALOGD("Gnss::setCallback_2_0");
|
||||
if (callback == nullptr) {
|
||||
|
||||
@@ -92,6 +92,7 @@ struct Gnss : public IGnss {
|
||||
getExtensionMeasurementCorrections() override;
|
||||
Return<sp<visibility_control::V1_0::IGnssVisibilityControl>> getExtensionVisibilityControl()
|
||||
override;
|
||||
Return<sp<V2_0::IGnssBatching>> getExtensionGnssBatching_2_0() override;
|
||||
Return<bool> injectBestLocation_2_0(const V2_0::GnssLocation& location) override;
|
||||
|
||||
private:
|
||||
|
||||
70
gnss/2.0/default/GnssBatching.cpp
Normal file
70
gnss/2.0/default/GnssBatching.cpp
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "GnssBatching"
|
||||
|
||||
#include "GnssBatching.h"
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace gnss {
|
||||
namespace V2_0 {
|
||||
namespace implementation {
|
||||
|
||||
sp<V2_0::IGnssBatchingCallback> GnssBatching::sCallback = nullptr;
|
||||
|
||||
// Methods from ::android::hardware::gnss::V1_0::IGnssBatching follow.
|
||||
Return<bool> GnssBatching::init(const sp<V1_0::IGnssBatchingCallback>&) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
}
|
||||
|
||||
Return<uint16_t> GnssBatching::getBatchSize() {
|
||||
// TODO implement
|
||||
return uint16_t{};
|
||||
}
|
||||
|
||||
Return<bool> GnssBatching::start(const V1_0::IGnssBatching::Options&) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
}
|
||||
|
||||
Return<void> GnssBatching::flush() {
|
||||
// TODO implement
|
||||
return Void();
|
||||
}
|
||||
|
||||
Return<bool> GnssBatching::stop() {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
}
|
||||
|
||||
Return<void> GnssBatching::cleanup() {
|
||||
// TODO implement
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Methods from V2_0::IGnssBatching follow.
|
||||
Return<bool> GnssBatching::init_2_0(const sp<V2_0::IGnssBatchingCallback>& callback) {
|
||||
sCallback = callback;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace implementation
|
||||
} // namespace V2_0
|
||||
} // namespace gnss
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
57
gnss/2.0/default/GnssBatching.h
Normal file
57
gnss/2.0/default/GnssBatching.h
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <android/hardware/gnss/2.0/IGnssBatching.h>
|
||||
#include <hidl/MQDescriptor.h>
|
||||
#include <hidl/Status.h>
|
||||
|
||||
namespace android {
|
||||
namespace hardware {
|
||||
namespace gnss {
|
||||
namespace V2_0 {
|
||||
namespace implementation {
|
||||
|
||||
using ::android::sp;
|
||||
using ::android::hardware::hidl_array;
|
||||
using ::android::hardware::hidl_memory;
|
||||
using ::android::hardware::hidl_string;
|
||||
using ::android::hardware::hidl_vec;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
|
||||
struct GnssBatching : public IGnssBatching {
|
||||
// Methods from ::android::hardware::gnss::V1_0::IGnssBatching follow.
|
||||
Return<bool> init(const sp<V1_0::IGnssBatchingCallback>& callback) override;
|
||||
Return<uint16_t> getBatchSize() override;
|
||||
Return<bool> start(const V1_0::IGnssBatching::Options& options) override;
|
||||
Return<void> flush() override;
|
||||
Return<bool> stop() override;
|
||||
Return<void> cleanup() override;
|
||||
|
||||
// Methods from V2_0::IGnssBatching follow.
|
||||
Return<bool> init_2_0(const sp<V2_0::IGnssBatchingCallback>& callback) override;
|
||||
|
||||
private:
|
||||
static sp<IGnssBatchingCallback> sCallback;
|
||||
};
|
||||
|
||||
} // namespace implementation
|
||||
} // namespace V2_0
|
||||
} // namespace gnss
|
||||
} // namespace hardware
|
||||
} // namespace android
|
||||
@@ -33,13 +33,11 @@ Return<bool> GnssConfiguration::setSuplEs(bool enable) {
|
||||
}
|
||||
|
||||
Return<bool> GnssConfiguration::setSuplVersion(uint32_t) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
return true;
|
||||
}
|
||||
|
||||
Return<bool> GnssConfiguration::setSuplMode(hidl_bitfield<SuplMode>) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
return true;
|
||||
}
|
||||
|
||||
Return<bool> GnssConfiguration::setGpsLock(hidl_bitfield<GpsLock> gpsLock) {
|
||||
@@ -49,18 +47,15 @@ Return<bool> GnssConfiguration::setGpsLock(hidl_bitfield<GpsLock> gpsLock) {
|
||||
}
|
||||
|
||||
Return<bool> GnssConfiguration::setLppProfile(hidl_bitfield<LppProfile>) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
return true;
|
||||
}
|
||||
|
||||
Return<bool> GnssConfiguration::setGlonassPositioningProtocol(hidl_bitfield<GlonassPosProtocol>) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
return true;
|
||||
}
|
||||
|
||||
Return<bool> GnssConfiguration::setEmergencySuplPdn(bool) {
|
||||
// TODO implement
|
||||
return bool{};
|
||||
return true;
|
||||
}
|
||||
|
||||
// Methods from ::android::hardware::gnss::V1_1::IGnssConfiguration follow.
|
||||
|
||||
@@ -26,6 +26,7 @@ using ::android::hardware::gnss::common::Utils;
|
||||
GnssHalTest::GnssHalTest()
|
||||
: info_called_count_(0),
|
||||
capabilities_called_count_(0),
|
||||
measurement_corrections_capabilities_called_count_(0),
|
||||
location_called_count_(0),
|
||||
name_called_count_(0),
|
||||
notify_count_(0) {}
|
||||
@@ -43,6 +44,7 @@ void GnssHalTest::TearDown() {
|
||||
// Reset counters
|
||||
info_called_count_ = 0;
|
||||
capabilities_called_count_ = 0;
|
||||
measurement_corrections_capabilities_called_count_ = 0;
|
||||
location_called_count_ = 0;
|
||||
name_called_count_ = 0;
|
||||
measurement_called_count_ = 0;
|
||||
|
||||
@@ -32,6 +32,8 @@ using IAGnssRil_2_0 = android::hardware::gnss::V2_0::IAGnssRil;
|
||||
using IAGnss_2_0 = android::hardware::gnss::V2_0::IAGnss;
|
||||
using IAGnss_1_0 = android::hardware::gnss::V1_0::IAGnss;
|
||||
using IAGnssCallback_2_0 = android::hardware::gnss::V2_0::IAGnssCallback;
|
||||
using IGnssBatching_V1_0 = android::hardware::gnss::V1_0::IGnssBatching;
|
||||
using IGnssBatching_V2_0 = android::hardware::gnss::V2_0::IGnssBatching;
|
||||
|
||||
using android::hardware::gnss::common::Utils;
|
||||
using android::hardware::gnss::measurement_corrections::V1_0::IMeasurementCorrections;
|
||||
@@ -326,6 +328,10 @@ TEST_F(GnssHalTest, TestGnssMeasurementCorrections) {
|
||||
return;
|
||||
}
|
||||
|
||||
sp<IMeasurementCorrectionsCallback> iMeasurementCorrectionsCallback =
|
||||
new GnssMeasurementCorrectionsCallback(*this);
|
||||
iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback);
|
||||
|
||||
const int kMeasurementCorrectionsCapabilitiesTimeoutSeconds = 5;
|
||||
waitForMeasurementCorrectionsCapabilities(kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
|
||||
ASSERT_TRUE(measurement_corrections_capabilities_called_count_ > 0);
|
||||
@@ -395,3 +401,20 @@ TEST_F(GnssHalTest, TestInjectBestLocation_2_0) {
|
||||
gnss_hal_->injectBestLocation_2_0(last_location_);
|
||||
StopAndClearLocations();
|
||||
}
|
||||
|
||||
/*
|
||||
* TestGnssBatchingExtension:
|
||||
* Gets the GnssBatchingExtension and verifies that it supports either the @1.0::IGnssBatching
|
||||
* or @2.0::IGnssBatching extension.
|
||||
*/
|
||||
TEST_F(GnssHalTest, TestGnssBatchingExtension) {
|
||||
auto gnssBatching_V2_0 = gnss_hal_->getExtensionGnssBatching_2_0();
|
||||
ASSERT_TRUE(gnssBatching_V2_0.isOk());
|
||||
|
||||
auto gnssBatching_V1_0 = gnss_hal_->getExtensionGnssBatching();
|
||||
ASSERT_TRUE(gnssBatching_V1_0.isOk());
|
||||
|
||||
sp<IGnssBatching_V1_0> iGnssBatching_V1_0 = gnssBatching_V1_0;
|
||||
sp<IGnssBatching_V2_0> iGnssBatching_V2_0 = gnssBatching_V2_0;
|
||||
ASSERT_TRUE(iGnssBatching_V1_0 != nullptr || iGnssBatching_V2_0 != nullptr);
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ using ::test_helper::for_each;
|
||||
using ::test_helper::MixedTyped;
|
||||
using ::test_helper::MixedTypedExample;
|
||||
using ::test_helper::resize_accordingly;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
|
||||
template <typename T>
|
||||
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
|
||||
@@ -540,7 +541,8 @@ void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
|
||||
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
|
||||
@@ -75,6 +75,17 @@ interface IDevice extends @1.1::IDevice {
|
||||
*/
|
||||
getType() generates (ErrorStatus status, DeviceType type);
|
||||
|
||||
/**
|
||||
* Gets the capabilities of a driver.
|
||||
*
|
||||
* @return status Error status of the call, must be:
|
||||
* - NONE if successful
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* @return capabilities Capabilities of the driver.
|
||||
*/
|
||||
getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities);
|
||||
|
||||
/**
|
||||
* Gets information about extensions supported by the driver implementation.
|
||||
*
|
||||
@@ -113,44 +124,83 @@ interface IDevice extends @1.1::IDevice {
|
||||
generates (ErrorStatus status, vec<bool> supportedOperations);
|
||||
|
||||
/**
|
||||
* Gets whether the driver supports compilation caching.
|
||||
* Gets the caching requirements of the driver implementation.
|
||||
*
|
||||
* isCachingSupported indicates whether the driver supports compilation caching.
|
||||
* Even if so, the driver may still choose not to cache certain compiled models.
|
||||
* There are two types of cache file descriptors provided to the driver: model cache
|
||||
* and data cache.
|
||||
*
|
||||
* If the device reports the caching is not supported, the user may avoid calling
|
||||
* IDevice::prepareModelFromCache and IPreparedModel::saveToCache.
|
||||
* The data cache is for caching constant data, possibly including preprocessed
|
||||
* and transformed tensor buffers. Any modification to the data cache should
|
||||
* have no worse effect than generating bad output values at execution time.
|
||||
*
|
||||
* The model cache is for caching security-sensitive data such as compiled
|
||||
* executable machine code in the device's native binary format. A modification
|
||||
* to the model cache may affect the driver's execution behavior, and a malicious
|
||||
* client could make use of this to execute beyond the granted permission. Thus,
|
||||
* the driver must always check whether the model cache is corrupted before
|
||||
* preparing the model from cache.
|
||||
*
|
||||
* getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver
|
||||
* implementation needs to cache a single prepared model. Returning 0 for both types
|
||||
* indicates compilation caching is not supported by this driver. The driver may
|
||||
* still choose not to cache certain compiled models even if it reports that caching
|
||||
* is supported.
|
||||
*
|
||||
* If the device reports that caching is not supported, the user may avoid calling
|
||||
* IDevice::prepareModelFromCache or providing cache file descriptors to
|
||||
* IDevice::prepareModel_1_2.
|
||||
*
|
||||
* @return status Error status of the call, must be:
|
||||
* - NONE if successful
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* @return supported A boolean indicating whether the driver supports compilation
|
||||
* caching. Even on returning true, the driver may still choose
|
||||
* not to cache certain compiled models.
|
||||
* @return numModelCache An unsigned integer indicating how many files for model cache
|
||||
* the driver needs to cache a single prepared model. It must
|
||||
* be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
|
||||
* @return numDataCache An unsigned integer indicating how many files for data cache
|
||||
* the driver needs to cache a single prepared model. It must
|
||||
* be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
|
||||
*/
|
||||
isCachingSupported() generates (ErrorStatus status, bool supported);
|
||||
getNumberOfCacheFilesNeeded()
|
||||
generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache);
|
||||
|
||||
/**
|
||||
* Creates a prepared model for execution.
|
||||
* Asynchronously creates a prepared model for execution and optionally saves it
|
||||
* into cache files.
|
||||
*
|
||||
* prepareModel is used to make any necessary transformations or alternative
|
||||
* prepareModel is used to make any necessary transformations to or alternative
|
||||
* representations to a model for execution, possibly including
|
||||
* transformations on the constant data, optimization on the model's graph,
|
||||
* or compilation into the device's native binary format. The model itself
|
||||
* is not changed.
|
||||
*
|
||||
* Optionally, caching information may be provided for the driver to save
|
||||
* the prepared model to cache files for faster model compilation time
|
||||
* when the same model preparation is requested in the future. There are
|
||||
* two types of cache file handles provided to the driver: model cache
|
||||
* and data cache. For more information on the two types of cache handles,
|
||||
* refer to getNumberOfCacheFilesNeeded.
|
||||
*
|
||||
* The file descriptors must be opened with read and write permission. A file may
|
||||
* have any size, and the corresponding file descriptor may have any offset. The
|
||||
* driver must truncate a file to zero size before writing to that file. The file
|
||||
* descriptors may be closed by the client once the asynchronous preparation has
|
||||
* finished. The driver must dup a file descriptor if it wants to get access to
|
||||
* the cache file later.
|
||||
*
|
||||
* The model is prepared asynchronously with respect to the caller. The
|
||||
* prepareModel function must verify the inputs to the prepareModel function
|
||||
* are correct. If there is an error, prepareModel must immediately invoke
|
||||
* prepareModel function must verify the inputs to the preparedModel function
|
||||
* related to preparing the model (as opposed to saving the prepared model to
|
||||
* cache) are correct. If there is an error, prepareModel must immediately invoke
|
||||
* the callback with the appropriate ErrorStatus value and nullptr for the
|
||||
* IPreparedModel, then return with the same ErrorStatus. If the inputs to
|
||||
* the prepareModel function are valid and there is no error, prepareModel
|
||||
* must launch an asynchronous task to prepare the model in the background,
|
||||
* and immediately return from prepareModel with ErrorStatus::NONE. If the
|
||||
* asynchronous task fails to launch, prepareModel must immediately invoke
|
||||
* the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
|
||||
* IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
|
||||
* IPreparedModel, then return with the same ErrorStatus. If the inputs to the
|
||||
* prepareModel function that are related to preparing the model are valid and
|
||||
* there is no error, prepareModel must launch an asynchronous task
|
||||
* to prepare the model in the background, and immediately return from
|
||||
* prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch,
|
||||
* prepareModel must immediately invoke the callback with
|
||||
* ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return
|
||||
* with ErrorStatus::GENERAL_FAILURE.
|
||||
*
|
||||
* When the asynchronous task has finished preparing the model, it must
|
||||
* immediately invoke the callback function provided as an input to
|
||||
@@ -160,6 +210,14 @@ interface IDevice extends @1.1::IDevice {
|
||||
* the callback object must be invoked with the appropriate ErrorStatus
|
||||
* value and nullptr for the IPreparedModel.
|
||||
*
|
||||
* Optionally, the driver may save the prepared model to cache during the
|
||||
* asynchronous preparation. Any error that occurs when saving to cache must
|
||||
* not affect the status of preparing the model. Even if the input arguments
|
||||
* related to the cache may be invalid, or the driver may fail to save to cache,
|
||||
* the prepareModel function must finish preparing the model. The driver
|
||||
* may choose not to save to cache even if the caching information is
|
||||
* provided and valid.
|
||||
*
|
||||
* The only information that may be unknown to the model at this stage is
|
||||
* the shape of the tensors, which may only be known at execution time. As
|
||||
* such, some driver services may return partially prepared models, where
|
||||
@@ -173,6 +231,26 @@ interface IDevice extends @1.1::IDevice {
|
||||
* @param model The model to be prepared for execution.
|
||||
* @param preference Indicates the intended execution behavior of a prepared
|
||||
* model.
|
||||
* @param modelCache A vector of handles with each entry holding exactly one
|
||||
* cache file descriptor for the security-sensitive cache. The length of
|
||||
* the vector must either be 0 indicating that caching information is not provided,
|
||||
* or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache
|
||||
* handles will be provided in the same order when retrieving the
|
||||
* preparedModel from cache files with prepareModelFromCache.
|
||||
* @param dataCache A vector of handles with each entry holding exactly one
|
||||
* cache file descriptor for the constants' cache. The length of
|
||||
* the vector must either be 0 indicating that caching information is not provided,
|
||||
* or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache
|
||||
* handles will be provided in the same order when retrieving the
|
||||
* preparedModel from cache files with prepareModelFromCache.
|
||||
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
|
||||
* identifying the prepared model. The same token will be provided when retrieving
|
||||
* the prepared model from the cache files with prepareModelFromCache.
|
||||
* Tokens should be chosen to have a low rate of collision for a particular
|
||||
* application. The driver cannot detect a collision; a collision will result
|
||||
* in a failed execution or in a successful execution that produces incorrect
|
||||
* output values. If both modelCache and dataCache are empty indicating that
|
||||
* caching information is not provided, this token must be ignored.
|
||||
* @param callback A callback object used to return the error status of
|
||||
* preparing the model for execution and the prepared model if
|
||||
* successful, nullptr otherwise. The callback object's notify function
|
||||
@@ -182,9 +260,12 @@ interface IDevice extends @1.1::IDevice {
|
||||
* - NONE if preparation task is successfully launched
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if one of the input arguments is invalid
|
||||
* - INVALID_ARGUMENT if one of the input arguments related to preparing the
|
||||
* model is invalid
|
||||
*/
|
||||
prepareModel_1_2(Model model, ExecutionPreference preference,
|
||||
vec<handle> modelCache, vec<handle> dataCache,
|
||||
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
|
||||
IPreparedModelCallback callback)
|
||||
generates (ErrorStatus status);
|
||||
|
||||
@@ -192,22 +273,17 @@ interface IDevice extends @1.1::IDevice {
|
||||
* Creates a prepared model from cache files for execution.
|
||||
*
|
||||
* prepareModelFromCache is used to retrieve a prepared model directly from
|
||||
* cache files to avoid slow model compilation time. There are exactly two
|
||||
* cache file descriptors provided to the driver: modelCache and dataCache.
|
||||
* cache files to avoid slow model compilation time. There are
|
||||
* two types of cache file handles provided to the driver: model cache
|
||||
* and data cache. For more information on the two types of cache handles,
|
||||
* refer to getNumberOfCacheFilesNeeded.
|
||||
*
|
||||
* The dataCache is for caching constant data, possibly including preprocessed
|
||||
* and transformed tensor buffers. Any modification to the dataCache should
|
||||
* have no worse effect than generating bad output values at execution time.
|
||||
*
|
||||
* The modelCache is for caching security-sensitive data such as compiled
|
||||
* executable machine code in the device's native binary format. A modification
|
||||
* to the modelCache may affect the driver's execution behavior, and a malicious
|
||||
* client could make use of this to execute beyond the granted permission. Thus,
|
||||
* the driver must always check whether the modelCache is corrupted before preparing
|
||||
* the model from cache.
|
||||
*
|
||||
* The two file descriptors may be closed by the client once the asynchronous
|
||||
* preparation has finished. The driver has to copy all the data it needs.
|
||||
* The file descriptors must be opened with read and write permission. A file may
|
||||
* have any size, and the corresponding file descriptor may have any offset. The
|
||||
* driver must truncate a file to zero size before writing to that file. The file
|
||||
* descriptors may be closed by the client once the asynchronous preparation has
|
||||
* finished. The driver must dup a file descriptor if it wants to get access to
|
||||
* the cache file later.
|
||||
*
|
||||
* The model is prepared asynchronously with respect to the caller. The
|
||||
* prepareModelFromCache function must verify the inputs to the
|
||||
@@ -241,13 +317,17 @@ interface IDevice extends @1.1::IDevice {
|
||||
* used with different shapes of inputs on different (possibly concurrent)
|
||||
* executions.
|
||||
*
|
||||
* @param modelCache A handle holding exactly one cache file descriptor for the
|
||||
* security-sensitive cache.
|
||||
* @param dataCache A handle holding exactly one cache file descriptor for the
|
||||
* constants' cache.
|
||||
* @param modelCache A vector of handles with each entry holding exactly one
|
||||
* cache file descriptor for the security-sensitive cache. The length of
|
||||
* the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
|
||||
* The cache handles will be provided in the same order as with prepareModel_1_2.
|
||||
* @param dataCache A vector of handles with each entry holding exactly one
|
||||
* cache file descriptor for the constants' cache. The length of the vector
|
||||
* must match the numDataCache returned from getNumberOfCacheFilesNeeded.
|
||||
* The cache handles will be provided in the same order as with prepareModel_1_2.
|
||||
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
|
||||
* identifying the prepared model. It is the same token provided when saving
|
||||
* the cache files with IPreparedModel::saveToCache. Tokens should be chosen
|
||||
* the cache files with prepareModel_1_2. Tokens should be chosen
|
||||
* to have a low rate of collision for a particular application. The driver
|
||||
* cannot detect a collision; a collision will result in a failed execution
|
||||
* or in a successful execution that produces incorrect output values.
|
||||
@@ -263,7 +343,7 @@ interface IDevice extends @1.1::IDevice {
|
||||
* unspecified error
|
||||
* - INVALID_ARGUMENT if one of the input arguments is invalid
|
||||
*/
|
||||
prepareModelFromCache(handle modelCache, handle dataCache,
|
||||
prepareModelFromCache(vec<handle> modelCache, vec<handle> dataCache,
|
||||
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
|
||||
IPreparedModelCallback callback)
|
||||
generates (ErrorStatus status);
|
||||
|
||||
@@ -157,62 +157,4 @@ interface IPreparedModel extends @1.0::IPreparedModel {
|
||||
fmq_sync<FmqRequestDatum> requestChannel,
|
||||
fmq_sync<FmqResultDatum> resultChannel)
|
||||
generates (ErrorStatus status, IBurstContext context);
|
||||
|
||||
/*
|
||||
* Saves the prepared model to cache files.
|
||||
*
|
||||
* saveToCache is used to save a prepared model to cache files for faster
|
||||
* model compilation time when the same model preparation is requested in
|
||||
* the future. There are exactly two cache file descriptors provided to the
|
||||
* driver: modelCache and dataCache.
|
||||
*
|
||||
* The dataCache is for caching constant data, possibly including preprocessed
|
||||
* and transformed tensor buffers. Any modification to the dataCache should
|
||||
* have no worse effect than generating bad output values at execution time.
|
||||
*
|
||||
* The modelCache is for caching security-sensitive data such as compiled
|
||||
* executable machine code in the device's native binary format. A modification
|
||||
* to the modelCache may affect the driver's execution behavior, and a malicious
|
||||
* client could make use of this to execute beyond the granted permission. Thus,
|
||||
* the driver must always check whether the modelCache is corrupted before preparing
|
||||
* the model from cache.
|
||||
*
|
||||
* The two file descriptors must point to two zero-length files with offset
|
||||
* positioned at the beginning of the file. The file descriptors may be closed
|
||||
* by the client once the method has returned.
|
||||
*
|
||||
* If the driver decides not to save the prepared model without looking at the
|
||||
* input arguments to the saveToCache function, saveToCache must return with
|
||||
* ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify
|
||||
* the input arguments to the saveToCache function are valid, and return with
|
||||
* ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver
|
||||
* could not save the prepared model, saveToCache must return with the appropriate
|
||||
* ErrorStatus. Otherwise, it must write the cache files and return
|
||||
* ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents
|
||||
* of the cache files are undefined.
|
||||
*
|
||||
* @param modelCache A handle holding exactly one cache file descriptor for the
|
||||
* security-sensitive cache.
|
||||
* @param dataCache A handle holding exactly one cache file descriptor for the
|
||||
* constants' cache.
|
||||
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
|
||||
* identifying the prepared model. The same token will be provided
|
||||
* when retrieving the prepared model from cache files with
|
||||
* IDevice::prepareModelFromCache. Tokens should be chosen to have
|
||||
* a low rate of collision for a particular application. The driver
|
||||
* cannot detect a collision; a collision will result in a failed
|
||||
* execution or in a successful execution that produces incorrect
|
||||
* output values.
|
||||
* @return status Error status of saveToCache, must be:
|
||||
* - NONE if saveToCache is performed successfully
|
||||
* - DEVICE_UNAVAILABLE if driver is offline or busy
|
||||
* - GENERAL_FAILURE if the driver could not save the
|
||||
* prepared model or if there is an unspecified error
|
||||
* - INVALID_ARGUMENT if one of the input arguments is invalid,
|
||||
* unless the driver decides not to save the prepared model
|
||||
* without looking at the input arguments
|
||||
*/
|
||||
saveToCache(handle modelCache, handle dataCache,
|
||||
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token)
|
||||
generates (ErrorStatus status);
|
||||
};
|
||||
|
||||
@@ -30,6 +30,11 @@ enum Constant : uint32_t {
|
||||
* The byte size of the cache token.
|
||||
*/
|
||||
BYTE_SIZE_OF_CACHE_TOKEN = 32,
|
||||
|
||||
/**
|
||||
* The maximum number of files for each type of cache in compilation caching.
|
||||
*/
|
||||
MAX_NUMBER_OF_CACHE_FILES = 32,
|
||||
};
|
||||
|
||||
enum OperandType : @1.0::OperandType {
|
||||
@@ -182,6 +187,10 @@ enum OperationType : int32_t {
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Since API level 29, generic zero-sized input tensor is supported. Zero
|
||||
* dimension is only compatible with 0 or 1. The size of the output
|
||||
* dimension is zero if either of corresponding input dimension is zero.
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
@@ -231,7 +240,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (explicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
|
||||
* the left, in the ‘width’ dimension.
|
||||
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
|
||||
@@ -257,7 +267,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (implicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
|
||||
* padding scheme, has to be one of the
|
||||
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
|
||||
@@ -304,6 +315,7 @@ enum OperationType : int32_t {
|
||||
* Before API level 29, all input tensors of
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* must have the same scale and zeroPoint as the output tensor.
|
||||
* Since API level 29, zero-sized tensors are supported.
|
||||
* * n: An {@link OperandType::INT32} scalar, specifying the
|
||||
* concatenation axis.
|
||||
*
|
||||
@@ -361,7 +373,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (explicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
|
||||
* specifying the input.
|
||||
* specifying the input. Since API level 29, zero batches is supported
|
||||
* for this tensor.
|
||||
* * 1: A 4-D tensor, of shape
|
||||
* [depth_out, filter_height, filter_width, depth_in], specifying the
|
||||
* filter. For tensor of type
|
||||
@@ -408,7 +421,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (implicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
|
||||
* specifying the input.
|
||||
* specifying the input. Since API level 29, zero batches is supported
|
||||
* for this tensor.
|
||||
* * 1: A 4-D tensor, of shape
|
||||
* [depth_out, filter_height, filter_width, depth_in], specifying the
|
||||
* filter. For tensor of type
|
||||
@@ -450,11 +464,10 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output 4-D tensor, of shape
|
||||
* [batches, out_height, out_width, depth_out]. For output tensor of
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
|
||||
* must be satisfied: output_scale > input_scale * filter_scale (for
|
||||
* filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
|
||||
* this condition must be true for all filter scales).
|
||||
* [batches, out_height, out_width, depth_out]. Before API level 29,
|
||||
* for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
|
||||
* following condition must be satisfied:
|
||||
* output_scale > input_scale * filter_scale
|
||||
*
|
||||
* Available since API level 27.
|
||||
*/
|
||||
@@ -600,11 +613,10 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output 4-D tensor, of shape
|
||||
* [batches, out_height, out_width, depth_out]. For output tensor of
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
|
||||
* must be satisfied: output_scale > input_scale * filter_scale (for
|
||||
* filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
|
||||
* this condition must be true for all filter scales).
|
||||
* [batches, out_height, out_width, depth_out]. Before API level 29,
|
||||
* for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
|
||||
* following condition must be satisfied:
|
||||
* output_scale > input_scale * filter_scale
|
||||
*
|
||||
* Available since API level 27.
|
||||
*/
|
||||
@@ -672,7 +684,7 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor.
|
||||
* * 0: A tensor. Since API level 29, this tensor may be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: A tensor with the same shape as input0.
|
||||
@@ -765,7 +777,8 @@ enum OperationType : int32_t {
|
||||
* [batch_size, input_size], where "input_size" corresponds to the
|
||||
* number of inputs to the layer, matching the second dimension of
|
||||
* weights, and "batch_size" is calculated by dividing the number of
|
||||
* elements by "input_size".
|
||||
* elements by "input_size". Since API level 29, zero batch_size is
|
||||
* supported for this tensor.
|
||||
* * 1: A 2-D tensor, specifying the weights, of shape
|
||||
* [num_units, input_size], where "num_units" corresponds to the number
|
||||
* of output nodes.
|
||||
@@ -780,10 +793,10 @@ enum OperationType : int32_t {
|
||||
* invoke on the result.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor, of shape [batch_size, num_units]. For output
|
||||
* tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
|
||||
* condition must be satisfied:
|
||||
* output_scale > input_scale * filter_scale.
|
||||
* * 0: The output tensor, of shape [batch_size, num_units]. Before API
|
||||
* level 29, For output tensor of {@link
|
||||
* OperandType::TENSOR_QUANT8_ASYMM}, the following condition must be
|
||||
* satisfied: output_scale > input_scale * filter_scale.
|
||||
*
|
||||
* Available since API level 27.
|
||||
*/
|
||||
@@ -861,6 +874,7 @@ enum OperationType : int32_t {
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
* Tensors with rank less than 4 are only supported since API level 29.
|
||||
@@ -875,6 +889,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
|
||||
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||
* the scale must be 1.f / 128 and the zeroPoint must be 128.
|
||||
*
|
||||
* Available since API level 27.
|
||||
*/
|
||||
@@ -905,7 +921,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (explicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
|
||||
* the left, in the ‘width’ dimension.
|
||||
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
|
||||
@@ -931,7 +948,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (implicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
|
||||
* padding scheme, has to be one of the
|
||||
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
|
||||
@@ -1021,7 +1039,8 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor, specifying the input.
|
||||
* * 0: A tensor, specifying the input. Since API level 29, this tensor may
|
||||
* be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
@@ -1333,7 +1352,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (explicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
|
||||
* the left, in the ‘width’ dimension.
|
||||
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
|
||||
@@ -1359,7 +1379,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs (implicit padding):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
|
||||
* padding scheme, has to be one of the
|
||||
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
|
||||
@@ -1406,6 +1427,10 @@ enum OperationType : int32_t {
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
*
|
||||
* Since API level 29, generic zero-sized input tensor is supported. Zero
|
||||
* dimension is only compatible with 0 or 1. The size of the output
|
||||
* dimension is zero if either of corresponding input dimension is zero.
|
||||
*
|
||||
* Supported tensor rank: up to 4
|
||||
*
|
||||
* Inputs:
|
||||
@@ -1441,7 +1466,8 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor, specifying the input.
|
||||
* * 0: A tensor, specifying the input. Since API level 29, this tensor may
|
||||
* be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
@@ -1465,7 +1491,8 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor, specifying the input.
|
||||
* * 0: A tensor, specifying the input. Since API level 29, this tensor may
|
||||
* be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
@@ -1489,7 +1516,8 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor, specifying the input.
|
||||
* * 0: A tensor, specifying the input. Since API level 29, this tensor may
|
||||
* be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
@@ -1541,9 +1569,12 @@ enum OperationType : int32_t {
|
||||
* [batch, height, width, channels]. Alternatively, the data layout could
|
||||
* be NCHW, the data storage order of: [batch, channels, height, width].
|
||||
*
|
||||
* Inputs:
|
||||
* Both resizing by shape and resizing by scale are supported.
|
||||
*
|
||||
* Inputs (resizing by shape):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Since API level 29, zero batches is supported for this
|
||||
* tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the output
|
||||
* height of the output tensor.
|
||||
* * 2: An {@link OperandType::INT32} scalar, specifying the output
|
||||
@@ -1552,6 +1583,24 @@ enum OperationType : int32_t {
|
||||
* Set to true to specify NCHW data layout for input0 and output0.
|
||||
* Available since API level 29.
|
||||
*
|
||||
* Inputs (resizing by scale, since API level 29):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input. Zero batches is supported for this tensor.
|
||||
* * 1: A scalar, specifying height_scale, the scaling factor of the height
|
||||
* dimension from the input tensor to the output tensor. The output
|
||||
* height is calculated as new_height = floor(height * height_scale).
|
||||
* The scalar must be of {@link OperandType::FLOAT16} if input0 is
|
||||
* of {@link OperandType::TENSOR_FLOAT16} and of
|
||||
* {@link OperandType::FLOAT32} otherwise.
|
||||
* * 2: A scalar, specifying width_scale, the scaling factor of the width
|
||||
* dimension from the input tensor to the output tensor. The output
|
||||
* width is calculated as new_width = floor(width * width_scale).
|
||||
* The scalar must be of {@link OperandType::FLOAT16} if input0 is
|
||||
* of {@link OperandType::TENSOR_FLOAT16} and of
|
||||
* {@link OperandType::FLOAT32} otherwise.
|
||||
* * 3: An optional {@link OperandType::BOOL} scalar, default to false.
|
||||
* Set to true to specify NCHW data layout for input0 and output0.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output 4-D tensor, of shape
|
||||
* [batches, new_height, new_width, depth].
|
||||
@@ -1637,7 +1686,8 @@ enum OperationType : int32_t {
|
||||
* Tensors with rank other than 2 or 4 are only supported since API level 29.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
|
||||
* * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since
|
||||
* API level 29, this tensor may be zero-sized.
|
||||
* * 1: A scalar, specifying the positive scaling factor for the exponent,
|
||||
* beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
|
||||
@@ -1795,7 +1845,8 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor, specifying the input.
|
||||
* * 0: A tensor, specifying the input. Since API level 29, this tensor may
|
||||
* be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
@@ -1862,6 +1913,10 @@ enum OperationType : int32_t {
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Since API level 29, generic zero-sized input tensor is supported. Zero
|
||||
* dimension is only compatible with 0 or 1. The size of the output
|
||||
* dimension is zero if either of corresponding input dimension is zero.
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
@@ -2095,6 +2150,10 @@ enum OperationType : int32_t {
|
||||
* input2.dimension = {5, 4, 3, 1}
|
||||
* output.dimension = {5, 4, 3, 2}
|
||||
*
|
||||
* Since API level 29, generic zero-sized input tensor is supported. Zero
|
||||
* dimension is only compatible with 0 or 1. The size of the output
|
||||
* dimension is zero if either of corresponding input dimension is zero.
|
||||
*
|
||||
* Supported tensor {@link OperandType}:
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
@@ -2135,6 +2194,7 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: An n-D tensor, specifying the tensor to be transposed.
|
||||
* Since API level 29, this tensor may be zero-sized.
|
||||
* * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
|
||||
* the permutation of the dimensions of the input tensor.
|
||||
*
|
||||
@@ -2231,7 +2291,8 @@ enum OperationType : int32_t {
|
||||
* * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
|
||||
* bounding box proposals, each line with format [x1, y1, x2, y2].
|
||||
* For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
|
||||
* the zeroPoint must be 0 and the scale must be 0.125.
|
||||
* the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
|
||||
* is supported for this tensor.
|
||||
* * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
|
||||
* bounding box delta for each region of interest and each class. The
|
||||
* bounding box deltas are organized in the following order
|
||||
@@ -2240,10 +2301,12 @@ enum OperationType : int32_t {
|
||||
* and height, dw and dh is the log-scale relative correction factor
|
||||
* for the width and height. For input0 of type
|
||||
* {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
|
||||
* of {@link OperandType::TENSOR_QUANT8_ASYMM}.
|
||||
* of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
|
||||
* supported for this tensor.
|
||||
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
|
||||
* [num_rois], specifying the batch index of each box. Boxes with
|
||||
* the same batch index are grouped together.
|
||||
* the same batch index are grouped together. Zero num_rois is
|
||||
* supported for this tensor.
|
||||
* * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
|
||||
* each image in the batch, each line with format
|
||||
* [image_height, image_width].
|
||||
@@ -2272,113 +2335,113 @@ enum OperationType : int32_t {
|
||||
* Inputs:
|
||||
* * 0: The input.
|
||||
* A 3-D tensor of shape:
|
||||
* If time-major: [max_time, batch_size, output_size]
|
||||
* If batch-major: [batch_size, max_time, output_size]
|
||||
* If time-major: [max_time, batch_size, input_size]
|
||||
* If batch-major: [batch_size, max_time, input_size]
|
||||
* where "max_time" is the number of timesteps (sequence length),
|
||||
* "batch_size" corresponds to the batching dimension, and
|
||||
* "input_size" is the size of the input.
|
||||
* * 1: The forward input-to-input weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size], where “num_units”
|
||||
* corresponds to the number of cell units.
|
||||
* A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
|
||||
* corresponds to the number of forward cell units.
|
||||
* * 2: The forward input-to-forget weights.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 3: The forward input-to-cell weights.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 4: The forward input-to-output weights.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 5: The forward recurrent-to-input weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, output_size], where “output_size”
|
||||
* corresponds to either the number of cell units (i.e., “num_units”),
|
||||
* or the second dimension of the “projection_weights”, if defined.
|
||||
* A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
|
||||
* corresponds to either the number of cell units (i.e., fw_num_units),
|
||||
* or the second dimension of the “fw_projection_weights”, if defined.
|
||||
* * 6: The forward recurrent-to-forget weights.
|
||||
* A 2-D tensor of shape [num_units, output_size].
|
||||
* A 2-D tensor of shape [fw_num_units, fw_output_size].
|
||||
* * 7: The forward recurrent-to-cell weights.
|
||||
* A 2-D tensor of shape [num_units, output_size].
|
||||
* A 2-D tensor of shape [fw_num_units, fw_output_size].
|
||||
* * 8: The forward recurrent-to-output weights.
|
||||
* A 2-D tensor of shape [num_units, output_size].
|
||||
* A 2-D tensor of shape [fw_num_units, fw_output_size].
|
||||
* * 9: The forward cell-to-input weights. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 10: The forward cell-to-forget weights. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 11: The forward cell-to-output weights. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 12: The forward input gate bias. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 13: The forward forget gate bias.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 14: The forward cell gate bias.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 15: The forward output gate bias.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [fw_num_units].
|
||||
* * 16: The forward projection weights. Optional.
|
||||
* A 2-D tensor of shape [output_size, num_units].
|
||||
* A 2-D tensor of shape [fw_output_size, fw_num_units].
|
||||
* * 17: The forward projection bias. Optional.
|
||||
* A 1-D tensor of shape [output_size].
|
||||
* A 1-D tensor of shape [fw_output_size].
|
||||
* * 18: The backward input-to-input weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size], where “num_units”
|
||||
* corresponds to the number of cell units.
|
||||
* A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
|
||||
* corresponds to the number of backward cell units.
|
||||
* * 19: The backward input-to-forget weights.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 20: The backward input-to-cell weights.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 21: The backward input-to-output weights.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 22: The backward recurrent-to-input weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, output_size], where “output_size”
|
||||
* corresponds to either the number of cell units (i.e., “num_units”),
|
||||
* or the second dimension of the “projection_weights”, if defined.
|
||||
* A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
|
||||
* corresponds to either the number of cell units (i.e., “bw_num_units”),
|
||||
* or the second dimension of the “bw_projection_weights”, if defined.
|
||||
* * 23: The backward recurrent-to-forget weights.
|
||||
* A 2-D tensor of shape [num_units, output_size].
|
||||
* A 2-D tensor of shape [bw_num_units, bw_output_size].
|
||||
* * 24: The backward recurrent-to-cell weights.
|
||||
* A 2-D tensor of shape [num_units, output_size].
|
||||
* A 2-D tensor of shape [bw_num_units, bw_output_size].
|
||||
* * 25: The backward recurrent-to-output weights.
|
||||
* A 2-D tensor of shape [num_units, output_size].
|
||||
* A 2-D tensor of shape [bw_num_units, bw_output_size].
|
||||
* * 26: The backward cell-to-input weights. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 27: The backward cell-to-forget weights. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 28: The backward cell-to-output weights. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 29: The backward input gate bias. Optional.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 30: The backward forget gate bias.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 31: The backward cell gate bias.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 32: The backward output gate bias.
|
||||
* A 1-D tensor of shape [num_units].
|
||||
* A 1-D tensor of shape [bw_num_units].
|
||||
* * 33: The backward projection weights. Optional.
|
||||
* A 2-D tensor of shape [output_size, num_units].
|
||||
* A 2-D tensor of shape [bw_output_size, bw_num_units].
|
||||
* * 34: The backward projection bias. Optional.
|
||||
* A 1-D tensor of shape [output_size].
|
||||
* A 1-D tensor of shape [bw_output_size].
|
||||
* * 35: The forward input activation state.
|
||||
* A 2-D tensor of shape [batch_size, output_size].
|
||||
* A 2-D tensor of shape [batch_size, bw_output_size].
|
||||
* * 36: The forward input cell state.
|
||||
* A 2-D tensor of shape [batch_size, num_units].
|
||||
* A 2-D tensor of shape [batch_size, bw_num_units].
|
||||
* * 37: The backward input activation state.
|
||||
* A 2-D tensor of shape [batch_size, output_size].
|
||||
* A 2-D tensor of shape [batch_size, bw_output_size].
|
||||
* * 38: The backward input cell state.
|
||||
* A 2-D tensor of shape [batch_size, num_units].
|
||||
* A 2-D tensor of shape [batch_size, bw_num_units].
|
||||
* * 39: The auxiliary input. Optional.
|
||||
* A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
|
||||
* corresponds to the batching dimension, and “input_size” is the size
|
||||
* of the input.
|
||||
* * 40: The forward auxiliary input-to-input weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 41: The forward auxiliary input-to-forget weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 42: The forward auxiliary input-to-cell weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 43: The forward auxiliary input-to-output weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [fw_num_units, input_size].
|
||||
* * 44: The backward auxiliary input-to-input weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 45: The backward auxiliary input-to-forget weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 46: The backward auxiliary input-to-cell weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 47: The backward auxiliary input-to-output weights. Optional.
|
||||
* A 2-D tensor of shape [num_units, input_size].
|
||||
* A 2-D tensor of shape [bw_num_units, input_size].
|
||||
* * 48: The activation function.
|
||||
* A value indicating the activation function:
|
||||
* <ul>
|
||||
@@ -2410,16 +2473,46 @@ enum OperationType : int32_t {
|
||||
* * 52: time_major
|
||||
* An {@link OperandType::BOOL} scalar specifying the shape format
|
||||
* of input and output tensors.
|
||||
* * 53: The forward input layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at input gate.
|
||||
* * 54: The forward forget layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at forget gate.
|
||||
* * 55: The forward cell layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at cell gate.
|
||||
* * 56: The forward output layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at output gate.
|
||||
* * 57: The backward input layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at input gate.
|
||||
* * 58: The backward forget layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at forget gate.
|
||||
* * 59: The backward cell layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at cell gate.
|
||||
* * 60: The backward output layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
|
||||
* to activation at output gate.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The forward output.
|
||||
* A 3-D tensor of shape:
|
||||
* If time-major: [max_time, batch_size, output_size]
|
||||
* If batch-major: [batch_size, max_time, output_size]
|
||||
* If time-major and not merge_outputs:
|
||||
* [max_time, batch_size, fw_output_size]
|
||||
* If time-major and merge_outputs:
|
||||
* [max_time, batch_size, fw_output_size + bw_output_size]
|
||||
* If batch-major and not merge_outputs:
|
||||
* [batch_size, max_time, fw_output_size]
|
||||
* If batch-major and merge_outputs:
|
||||
* [batch_size, max_time, fw_output_size + bw_output_size]
|
||||
* * 1: The backward output. Unused if merge_outputs is true.
|
||||
* A 3-D tensor of shape:
|
||||
* If time-major: [max_time, batch_size, output_size]
|
||||
* If batch-major: [batch_size, max_time, output_size]
|
||||
* If time-major: [max_time, batch_size, bw_output_size]
|
||||
* If batch-major: [batch_size, max_time, bw_output_size]
|
||||
*
|
||||
* Available since API level 29.
|
||||
*/
|
||||
@@ -2547,10 +2640,17 @@ enum OperationType : int32_t {
|
||||
/**
|
||||
* Greedily selects a subset of bounding boxes in descending order of score.
|
||||
*
|
||||
* This op applies hard NMS algorithm to each class. In each loop of
|
||||
* execution, the box with maximum score gets selected, and any boxes with
|
||||
* the intersection-over-union (IOU) greater than a threshold are removed
|
||||
* from the pending set.
|
||||
* This op applies NMS algorithm to each class. In each loop of execution,
|
||||
* the box with maximum score gets selected and removed from the pending set.
|
||||
* The scores of the rest of boxes are lowered according to the
|
||||
* intersection-over-union (IOU) overlapping with the previously selected
|
||||
* boxes and a specified NMS kernel method. Any boxes with score less
|
||||
* than a threshold are removed from the pending set.
|
||||
*
|
||||
* Three NMS kernels are supported:
|
||||
* * Hard: score_new = score_old * (1 if IoU < threshold else 0)
|
||||
* * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
|
||||
* * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
|
||||
*
|
||||
* Axis-aligned bounding boxes are represented by its upper-left corner
|
||||
* coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
|
||||
@@ -2564,25 +2664,34 @@ enum OperationType : int32_t {
|
||||
* Inputs:
|
||||
* * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
|
||||
* of each bounding box proposal. The boxes are grouped by batches in the
|
||||
* first dimension.
|
||||
* first dimension. Zero num_rois is supported for this tensor.
|
||||
* * 1: A 2-D Tensor specifying the bounding boxes of shape
|
||||
* [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
|
||||
* The boxes are grouped by batches in the first dimension. The sequential
|
||||
* order of the boxes corresponds with input0. For input0 of type
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
|
||||
* {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
|
||||
* scale of 0.125.
|
||||
* scale of 0.125. Zero num_rois is supported for this tensor.
|
||||
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
|
||||
* [num_rois], specifying the batch index of each box. Boxes with
|
||||
* the same batch index are grouped together.
|
||||
* * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
|
||||
* with scores lower than the threshold are filtered before sending
|
||||
* to the NMS algorithm.
|
||||
* * 4: An {@link OperandType::FLOAT32} scalar, specifying the IoU
|
||||
* threshold.
|
||||
* * 5: An {@link OperandType::INT32} scalar, specifying the maximum
|
||||
* * 4: An {@link OperandType::INT32} scalar, specifying the maximum
|
||||
* number of selected bounding boxes for each image. Set to a negative
|
||||
* value for unlimited number of output bounding boxes.
|
||||
* * 5: An {@link OperandType::INT32} scalar, specifying the NMS
|
||||
* kernel method, options are 0:hard, 1:linear, 2:gaussian.
|
||||
* * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
|
||||
* threshold in hard and linear NMS kernel. This field is ignored if
|
||||
* gaussian kernel is selected.
|
||||
* * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
|
||||
* gaussian NMS kernel. This field is ignored if gaussian kernel is
|
||||
* not selected.
|
||||
* * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
|
||||
* Boxes with scores lower than the threshold are dropped during the
|
||||
* score updating phase in soft NMS.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
|
||||
@@ -2600,8 +2709,8 @@ enum OperationType : int32_t {
|
||||
* [num_output_rois], specifying the class of each output box. The
|
||||
* sequential order of the boxes corresponds with output0.
|
||||
* * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
|
||||
* [num_rois], specifying the batch index of each box. Boxes with
|
||||
* the same batch index are grouped together.
|
||||
* [num_output_rois], specifying the batch index of each box. Boxes
|
||||
* with the same batch index are grouped together.
|
||||
*
|
||||
* Available since API level 29.
|
||||
*/
|
||||
@@ -2937,8 +3046,8 @@ enum OperationType : int32_t {
|
||||
* For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
|
||||
* scale must be 0.125 and the zero point must be 0.
|
||||
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
|
||||
* [num_rois], specifying the batch index of each box. Boxes with
|
||||
* the same batch index are grouped together.
|
||||
* [num_output_rois], specifying the batch index of each box. Boxes
|
||||
* with the same batch index are grouped together.
|
||||
*
|
||||
* Available since API level 29.
|
||||
*/
|
||||
@@ -3122,11 +3231,7 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output 4-D tensor, of shape
|
||||
* [batches, out_height, out_width, depth_out]. For output tensor of
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
|
||||
* must be satisfied: output_scale > input_scale * filter_scale (for
|
||||
* filter tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
|
||||
* this condition must be true for all filter scales).
|
||||
* [batches, out_height, out_width, depth_out].
|
||||
*
|
||||
* Available since API level 29.
|
||||
*/
|
||||
@@ -3608,7 +3713,7 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: from 1
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor.
|
||||
* * 0: A tensor, may be zero-sized.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0, but with
|
||||
@@ -3940,10 +4045,12 @@ enum OperationType : int32_t {
|
||||
* the regions of interest, each line with format [x1, y1, x2, y2].
|
||||
* For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||
* this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
|
||||
* with zeroPoint of 0 and scale of 0.125.
|
||||
* with zeroPoint of 0 and scale of 0.125. Zero num_rois is
|
||||
* supported for this tensor.
|
||||
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
|
||||
* [num_rois], specifying the batch index of each box. Boxes with
|
||||
* the same batch index are grouped together.
|
||||
* the same batch index are grouped together. Zero num_rois is
|
||||
* supported for this tensor.
|
||||
* * 3: An {@link OperandType::INT32} scalar, specifying the output
|
||||
* height of the output tensor.
|
||||
* * 4: An {@link OperandType::INT32} scalar, specifying the output
|
||||
@@ -4108,7 +4215,7 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: from 1
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: An n-D tensor to take slice from.
|
||||
* * 0: An n-D tensor to take slice from, may be zero-sized.
|
||||
* * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
|
||||
* the beginning indices of the slice in each dimension.
|
||||
* * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
|
||||
@@ -4331,11 +4438,7 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output 4-D tensor, of shape
|
||||
* [batches, out_height, out_width, depth_out]. For output tensor of
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
|
||||
* must be satisfied: output_scale > input_scale * filter_scale (for
|
||||
* filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
|
||||
* this condition must be true for all filter scales).
|
||||
* [batches, out_height, out_width, depth_out].
|
||||
*
|
||||
* Available since API level 29.
|
||||
*/
|
||||
@@ -4367,9 +4470,9 @@ enum OperationType : int32_t {
|
||||
* Inputs:
|
||||
* * 0: The input (\f$x_t\f$).
|
||||
* A 3-D tensor of shape:
|
||||
* If time-major: [max_time, batch_size, output_size]
|
||||
* If batch-major: [batch_size, max_time, output_size]
|
||||
* where “max_size” is the number of timesteps (sequence length),
|
||||
* If time-major: [max_time, batch_size, input_size]
|
||||
* If batch-major: [batch_size, max_time, input_size]
|
||||
* where “max_time” is the number of timesteps (sequence length),
|
||||
* “batch_size” corresponds to the batching dimension, and
|
||||
* “input_size” is the size of the input.
|
||||
* * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
|
||||
@@ -4429,16 +4532,16 @@ enum OperationType : int32_t {
|
||||
* projection layer, such that values are bound within
|
||||
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
|
||||
* * 23:Time-major if true, batch-major if false.
|
||||
* * 24:The input layer normalization weights.
|
||||
* * 24:The input layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
|
||||
* to activation at input gate.
|
||||
* * 25:The forget layer normalization weights.
|
||||
* * 25:The forget layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
|
||||
* to activation at forget gate.
|
||||
* * 26:The cell layer normalization weights.
|
||||
* * 26:The cell layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
|
||||
* to activation at cell gate.
|
||||
* * 27:The output layer normalization weights.
|
||||
* * 27:The output layer normalization weights. Optional.
|
||||
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
|
||||
* to activation at output gate.
|
||||
*
|
||||
@@ -4526,9 +4629,11 @@ enum OperationType : int32_t {
|
||||
* [batch, height, width, channels]. Alternatively, the data layout could
|
||||
* be NCHW, the data storage order of: [batch, channels, height, width].
|
||||
*
|
||||
* Inputs:
|
||||
* Both resizing by shape and resizing by scale are supported.
|
||||
*
|
||||
* Inputs (resizing by shape):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input.
|
||||
* the input. Zero batches is supported for this tensor.
|
||||
* * 1: An {@link OperandType::INT32} scalar, specifying the output
|
||||
* height of the output tensor.
|
||||
* * 2: An {@link OperandType::INT32} scalar, specifying the output
|
||||
@@ -4536,6 +4641,24 @@ enum OperationType : int32_t {
|
||||
* * 3: An {@link OperandType::BOOL} scalar, default to false.
|
||||
* Set to true to specify NCHW data layout for input0 and output0.
|
||||
*
|
||||
* Inputs (resizing by scale):
|
||||
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
|
||||
* the input. Zero batches is supported for this tensor.
|
||||
* * 1: A scalar, specifying height_scale, the scaling factor of the height
|
||||
* dimension from the input tensor to the output tensor. The output
|
||||
* height is calculated as new_height = floor(height * height_scale).
|
||||
* The scalar must be of {@link OperandType::FLOAT16} if input0 is
|
||||
* of {@link OperandType::TENSOR_FLOAT16} and of
|
||||
* {@link OperandType::FLOAT32} otherwise.
|
||||
* * 2: A scalar, specifying width_scale, the scaling factor of the width
|
||||
* dimension from the input tensor to the output tensor. The output
|
||||
* width is calculated as new_width = floor(width * width_scale).
|
||||
* The scalar must be of {@link OperandType::FLOAT16} if input0 is
|
||||
* of {@link OperandType::TENSOR_FLOAT16} and of
|
||||
* {@link OperandType::FLOAT32} otherwise.
|
||||
* * 3: An {@link OperandType::BOOL} scalar, default to false.
|
||||
* Set to true to specify NCHW data layout for input0 and output0.
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output 4-D tensor, of shape
|
||||
* [batches, new_height, new_width, depth].
|
||||
@@ -4592,6 +4715,39 @@ enum DeviceType : int32_t {
|
||||
ACCELERATOR = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
* The capabilities of a driver.
|
||||
*
|
||||
* Performance of an operation comes from the type of its first operand.
|
||||
* This represents performance for non extension operand types.
|
||||
*/
|
||||
struct Capabilities {
|
||||
/**
|
||||
* Driver performance when operating on float32 data but performing
|
||||
* calculations with range and/or precision as low as that of the IEEE
|
||||
* 754 16-bit floating-point format.
|
||||
*/
|
||||
PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
|
||||
PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
|
||||
|
||||
/**
|
||||
* Driver performance when operating on a particular data type.
|
||||
* In the case of float32 data, this is used when the calculations
|
||||
* are not relaxed.
|
||||
*/
|
||||
struct OperandPerformance {
|
||||
OperandType type;
|
||||
PerformanceInfo info;
|
||||
};
|
||||
|
||||
/**
|
||||
* Performance by operand type. Must be sorted by OperandType.
|
||||
* If a particular OperandType is not present in operandPerformance,
|
||||
* its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
|
||||
*/
|
||||
vec<OperandPerformance> operandPerformance;
|
||||
};
|
||||
|
||||
/**
|
||||
* Describes one operation of the model's graph.
|
||||
*/
|
||||
|
||||
@@ -25,7 +25,7 @@ namespace V1_2 {
|
||||
namespace vts {
|
||||
namespace functional {
|
||||
|
||||
using V1_1::Capabilities;
|
||||
using V1_0::PerformanceInfo;
|
||||
|
||||
// create device test
|
||||
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
|
||||
@@ -37,6 +37,31 @@ TEST_F(NeuralnetworksHidlTest, StatusTest) {
|
||||
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
|
||||
}
|
||||
|
||||
// initialization
|
||||
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
|
||||
using OperandPerformance = Capabilities::OperandPerformance;
|
||||
Return<void> ret = device->getCapabilities_1_2([](ErrorStatus status,
|
||||
const Capabilities& capabilities) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
|
||||
auto isPositive = [](const PerformanceInfo& perf) {
|
||||
return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
|
||||
};
|
||||
|
||||
EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
|
||||
EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
|
||||
const auto& opPerf = capabilities.operandPerformance;
|
||||
EXPECT_TRUE(std::all_of(
|
||||
opPerf.begin(), opPerf.end(),
|
||||
[isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
|
||||
EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
|
||||
[](const OperandPerformance& a, const OperandPerformance& b) {
|
||||
return a.type < b.type;
|
||||
}));
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// device version test
|
||||
TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
|
||||
Return<void> ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) {
|
||||
@@ -77,10 +102,15 @@ TEST_F(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
|
||||
// isCachingSupported test
|
||||
TEST_F(NeuralnetworksHidlTest, IsCachingSupported) {
|
||||
Return<void> ret = device->isCachingSupported(
|
||||
[](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); });
|
||||
// getNumberOfCacheFilesNeeded test
|
||||
TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
|
||||
Return<void> ret = device->getNumberOfCacheFilesNeeded(
|
||||
[](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
|
||||
EXPECT_EQ(ErrorStatus::NONE, status);
|
||||
EXPECT_LE(numModelCache,
|
||||
static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
|
||||
EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
|
||||
});
|
||||
EXPECT_TRUE(ret.isOk());
|
||||
}
|
||||
} // namespace functional
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,6 +33,7 @@ namespace functional {
|
||||
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
|
||||
///////////////////////// UTILITY FUNCTIONS /////////////////////////
|
||||
|
||||
@@ -54,7 +55,8 @@ static void validatePrepareModel(const sp<IDevice>& device, const std::string& m
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus =
|
||||
device->prepareModel_1_2(model, preference, preparedModelCallback);
|
||||
device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
|
||||
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ namespace functional {
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
|
||||
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
|
||||
using ::android::hidl::memory::V1_0::IMemory;
|
||||
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
|
||||
using test_helper::for_all;
|
||||
using test_helper::MixedTyped;
|
||||
using test_helper::MixedTypedExample;
|
||||
@@ -66,7 +67,8 @@ static void createPreparedModel(const sp<IDevice>& device, const Model& model,
|
||||
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
|
||||
ASSERT_NE(nullptr, preparedModelCallback.get());
|
||||
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
|
||||
model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
|
||||
hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
|
||||
ASSERT_TRUE(prepareLaunchStatus.isOk());
|
||||
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
|
||||
|
||||
|
||||
@@ -38,46 +38,47 @@ using ::android::hardware::thermal::V1_0::ThermalStatusCode;
|
||||
std::set<sp<IThermalChangedCallback>> gCallbacks;
|
||||
|
||||
static const Temperature_1_0 kTemp_1_0 = {
|
||||
.type = static_cast<::android::hardware::thermal::V1_0::TemperatureType>(TemperatureType::CPU),
|
||||
.name = "test temperature sensor",
|
||||
.currentValue = 98.6,
|
||||
.throttlingThreshold = 58,
|
||||
.shutdownThreshold = 60.0,
|
||||
.vrThrottlingThreshold = 59.0,
|
||||
.type = static_cast<::android::hardware::thermal::V1_0::TemperatureType>(
|
||||
TemperatureType::SKIN),
|
||||
.name = "test temperature sensor",
|
||||
.currentValue = 30.8,
|
||||
.throttlingThreshold = 48.0,
|
||||
.shutdownThreshold = 60.0,
|
||||
.vrThrottlingThreshold = 49.0,
|
||||
};
|
||||
|
||||
static const Temperature_2_0 kTemp_2_0 = {
|
||||
.type = TemperatureType::SKIN,
|
||||
.name = "test temperature sensor",
|
||||
.value = 98.6,
|
||||
.throttlingStatus = ThrottlingSeverity::CRITICAL,
|
||||
.type = TemperatureType::SKIN,
|
||||
.name = "test temperature sensor",
|
||||
.value = 30.8,
|
||||
.throttlingStatus = ThrottlingSeverity::NONE,
|
||||
};
|
||||
|
||||
static const TemperatureThreshold kTempThreshold = {
|
||||
.type = TemperatureType::SKIN,
|
||||
.name = "test temperature sensor",
|
||||
.hotThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}},
|
||||
.coldThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}},
|
||||
.vrThrottlingThreshold = NAN,
|
||||
.type = TemperatureType::SKIN,
|
||||
.name = "test temperature sensor",
|
||||
.hotThrottlingThresholds = {{NAN, NAN, NAN, 48.0, NAN, NAN, 60.0}},
|
||||
.coldThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}},
|
||||
.vrThrottlingThreshold = 49.0,
|
||||
};
|
||||
|
||||
static const CoolingDevice_1_0 kCooling_1_0 = {
|
||||
.type = ::android::hardware::thermal::V1_0::CoolingType::FAN_RPM,
|
||||
.name = "test cooling device",
|
||||
.currentValue = 100.0,
|
||||
.type = ::android::hardware::thermal::V1_0::CoolingType::FAN_RPM,
|
||||
.name = "test cooling device",
|
||||
.currentValue = 100.0,
|
||||
};
|
||||
|
||||
static const CoolingDevice_2_0 kCooling_2_0 = {
|
||||
.type = CoolingType::CPU,
|
||||
.name = "test cooling device",
|
||||
.value = 1,
|
||||
.type = CoolingType::FAN,
|
||||
.name = "test cooling device",
|
||||
.value = 100,
|
||||
};
|
||||
|
||||
static const CpuUsage kCpuUsage = {
|
||||
.name = "cpu_name",
|
||||
.active = 0,
|
||||
.total = 0,
|
||||
.isOnline = true,
|
||||
.name = "cpu_name",
|
||||
.active = 0,
|
||||
.total = 0,
|
||||
.isOnline = true,
|
||||
};
|
||||
|
||||
// Methods from ::android::hardware::thermal::V1_0::IThermal follow.
|
||||
|
||||
@@ -8,6 +8,7 @@ hidl_interface {
|
||||
},
|
||||
srcs: [
|
||||
"IVibrator.hal",
|
||||
"types.hal",
|
||||
],
|
||||
interfaces: [
|
||||
"android.hardware.vibrator@1.0",
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
package android.hardware.vibrator@1.3;
|
||||
|
||||
import @1.0::EffectStrength;
|
||||
import @1.0::Status;
|
||||
import @1.2::IVibrator;
|
||||
|
||||
@@ -41,4 +42,18 @@ interface IVibrator extends @1.2::IVibrator {
|
||||
* not supported by the device.
|
||||
*/
|
||||
setExternalControl(bool enabled) generates (Status status);
|
||||
|
||||
/**
|
||||
* Fire off a predefined haptic event.
|
||||
*
|
||||
* @param event The type of haptic event to trigger.
|
||||
* @return status Whether the effect was successfully performed or not. Must
|
||||
* return Status::UNSUPPORTED_OPERATION if the effect is not supported.
|
||||
* @return lengthMs The length of time the event is expected to take in
|
||||
* milliseconds. This doesn't need to be perfectly accurate, but should be a reasonable
|
||||
* approximation. Should be a positive, non-zero value if the returned status is Status::OK,
|
||||
* and set to 0 otherwise.
|
||||
*/
|
||||
perform_1_3(Effect effect, EffectStrength strength)
|
||||
generates (Status status, uint32_t lengthMs);
|
||||
};
|
||||
|
||||
@@ -74,22 +74,9 @@ Return<void> Vibrator::perform_1_1(V1_1::Effect_1_1 effect, EffectStrength stren
|
||||
|
||||
// Methods from ::android::hardware::vibrator::V1_2::IVibrator follow.
|
||||
|
||||
Return<void> Vibrator::perform_1_2(Effect effect, EffectStrength strength, perform_cb _hidl_cb) {
|
||||
uint8_t amplitude;
|
||||
uint32_t ms;
|
||||
Status status;
|
||||
|
||||
ALOGI("Perform: Effect %s\n", effectToName(effect));
|
||||
|
||||
amplitude = strengthToAmplitude(strength);
|
||||
setAmplitude(amplitude);
|
||||
|
||||
ms = effectToMs(effect);
|
||||
status = activate(ms);
|
||||
|
||||
_hidl_cb(status, ms);
|
||||
|
||||
return Void();
|
||||
Return<void> Vibrator::perform_1_2(V1_2::Effect effect, EffectStrength strength,
|
||||
perform_cb _hidl_cb) {
|
||||
return perform_1_3(static_cast<V1_3::Effect>(effect), strength, _hidl_cb);
|
||||
}
|
||||
|
||||
// Methods from ::android::hardware::vibrator::V1_3::IVibrator follow.
|
||||
@@ -110,6 +97,24 @@ Return<Status> Vibrator::setExternalControl(bool enabled) {
|
||||
}
|
||||
}
|
||||
|
||||
Return<void> Vibrator::perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) {
|
||||
uint8_t amplitude;
|
||||
uint32_t ms;
|
||||
Status status;
|
||||
|
||||
ALOGI("Perform: Effect %s\n", effectToName(effect));
|
||||
|
||||
amplitude = strengthToAmplitude(strength);
|
||||
setAmplitude(amplitude);
|
||||
|
||||
ms = effectToMs(effect);
|
||||
status = activate(ms);
|
||||
|
||||
_hidl_cb(status, ms);
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Private methods follow.
|
||||
|
||||
Status Vibrator::enable(bool enabled) {
|
||||
@@ -184,6 +189,7 @@ uint32_t Vibrator::effectToMs(Effect effect) {
|
||||
case Effect::DOUBLE_CLICK:
|
||||
return 15;
|
||||
case Effect::TICK:
|
||||
case Effect::TEXTURE_TICK:
|
||||
return 5;
|
||||
case Effect::THUD:
|
||||
return 5;
|
||||
|
||||
@@ -27,7 +27,6 @@ namespace implementation {
|
||||
|
||||
using android::hardware::vibrator::V1_0::EffectStrength;
|
||||
using android::hardware::vibrator::V1_0::Status;
|
||||
using android::hardware::vibrator::V1_2::Effect;
|
||||
|
||||
class Vibrator : public IVibrator {
|
||||
public:
|
||||
@@ -46,11 +45,13 @@ class Vibrator : public IVibrator {
|
||||
perform_cb _hidl_cb) override;
|
||||
|
||||
// Methods from ::android::hardware::vibrator::V1_2::IVibrator follow.
|
||||
Return<void> perform_1_2(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override;
|
||||
Return<void> perform_1_2(V1_2::Effect effect, EffectStrength strength,
|
||||
perform_cb _hidl_cb) override;
|
||||
|
||||
// Methods from ::android::hardware::vibrator::V1_3::IVibrator follow.
|
||||
Return<bool> supportsExternalControl() override;
|
||||
Return<Status> setExternalControl(bool enabled) override;
|
||||
Return<void> perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override;
|
||||
|
||||
private:
|
||||
Status enable(bool enabled);
|
||||
|
||||
30
vibrator/1.3/types.hal
Normal file
30
vibrator/1.3/types.hal
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package android.hardware.vibrator@1.3;
|
||||
|
||||
import @1.2::Effect;
|
||||
|
||||
enum Effect : @1.2::Effect {
|
||||
/**
|
||||
* A soft tick effect meant to be played as a texture.
|
||||
*
|
||||
* A soft, short sensation like the tick of a clock. Unlike regular effects, texture effects
|
||||
* are expected to be played multiple times in quick succession, replicating a specific
|
||||
* texture to the user as a form of haptic feedback.
|
||||
*/
|
||||
TEXTURE_TICK
|
||||
};
|
||||
@@ -24,9 +24,16 @@
|
||||
#include <unistd.h>
|
||||
|
||||
using ::android::sp;
|
||||
using ::android::hardware::hidl_enum_range;
|
||||
using ::android::hardware::Return;
|
||||
using ::android::hardware::Void;
|
||||
using ::android::hardware::vibrator::V1_0::EffectStrength;
|
||||
using ::android::hardware::vibrator::V1_0::Status;
|
||||
using ::android::hardware::vibrator::V1_3::Effect;
|
||||
using ::android::hardware::vibrator::V1_3::IVibrator;
|
||||
|
||||
#define EXPECT_OK(ret) ASSERT_TRUE((ret).isOk())
|
||||
|
||||
// Test environment for Vibrator HIDL HAL.
|
||||
class VibratorHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
|
||||
public:
|
||||
@@ -71,6 +78,74 @@ TEST_F(VibratorHidlTest_1_3, SetExternalControlReturnUnsupportedOperationIfNotSu
|
||||
}
|
||||
}
|
||||
|
||||
static void validatePerformEffectUnsupportedOperation(Status status, uint32_t lengthMs) {
|
||||
ASSERT_EQ(Status::UNSUPPORTED_OPERATION, status);
|
||||
ASSERT_EQ(static_cast<uint32_t>(0), lengthMs)
|
||||
<< "Effects that return UNSUPPORTED_OPERATION must have a duration of zero";
|
||||
}
|
||||
|
||||
static void validatePerformEffect(Status status, uint32_t lengthMs) {
|
||||
ASSERT_TRUE(status == Status::OK || status == Status::UNSUPPORTED_OPERATION);
|
||||
if (status == Status::OK) {
|
||||
ASSERT_LT(static_cast<uint32_t>(0), lengthMs)
|
||||
<< "Effects that return OK must return a positive duration";
|
||||
} else {
|
||||
validatePerformEffectUnsupportedOperation(status, lengthMs);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to make sure effects within the valid range return are either supported and return OK with
|
||||
* a valid duration, or are unsupported and return UNSUPPORTED_OPERATION with a duration of 0.
|
||||
*/
|
||||
TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3) {
|
||||
for (const auto& effect : hidl_enum_range<Effect>()) {
|
||||
for (const auto& strength : hidl_enum_range<EffectStrength>()) {
|
||||
EXPECT_OK(vibrator->perform_1_3(effect, strength, validatePerformEffect));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to make sure effect values above the valid range are rejected.
|
||||
*/
|
||||
TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadEffects_AboveValidRange) {
|
||||
Effect effect = *std::prev(hidl_enum_range<Effect>().end());
|
||||
Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) + 1);
|
||||
EXPECT_OK(vibrator->perform_1_3(badEffect, EffectStrength::LIGHT,
|
||||
validatePerformEffectUnsupportedOperation));
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to make sure effect values below the valid range are rejected.
|
||||
*/
|
||||
TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadEffects_BelowValidRange) {
|
||||
Effect effect = *hidl_enum_range<Effect>().begin();
|
||||
Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) - 1);
|
||||
EXPECT_OK(vibrator->perform_1_3(badEffect, EffectStrength::LIGHT,
|
||||
validatePerformEffectUnsupportedOperation));
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to make sure strength values above the valid range are rejected.
|
||||
*/
|
||||
TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadStrength_AboveValidRange) {
|
||||
EffectStrength strength = *std::prev(hidl_enum_range<EffectStrength>().end());
|
||||
EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) + 1);
|
||||
EXPECT_OK(vibrator->perform_1_3(Effect::THUD, badStrength,
|
||||
validatePerformEffectUnsupportedOperation));
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to make sure strength values below the valid range are rejected.
|
||||
*/
|
||||
TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadStrength_BelowValidRange) {
|
||||
EffectStrength strength = *hidl_enum_range<EffectStrength>().begin();
|
||||
EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) - 1);
|
||||
EXPECT_OK(vibrator->perform_1_3(Effect::THUD, badStrength,
|
||||
validatePerformEffectUnsupportedOperation));
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::AddGlobalTestEnvironment(VibratorHidlEnvironment::Instance());
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
|
||||
@@ -65,10 +65,14 @@ interface IWifiChip extends @1.2::IWifiChip {
|
||||
/**
|
||||
* API to set the wifi latency mode
|
||||
*
|
||||
* Latency mode determines whether or not to optimize for reducing wifi
|
||||
* latency as a tradeoff with other wifi functionality such as scanning,
|
||||
* roaming, etc. This optimization is suitable for some applications such
|
||||
* as gaming and virtual reality applications.
|
||||
* The latency mode is a hint to the HAL to enable or disable Wi-Fi latency
|
||||
* optimization. The optimization should be enabled if the mode is set to |LOW|
|
||||
* and should be disabled if the mode is set to |NORMAL|.
|
||||
* Wi-Fi latency optimization may trade-off latency against other Wi-Fi
|
||||
* functionality such as scanning, roaming, etc. but it should not result in
|
||||
* completely halting this functionality.
|
||||
*
|
||||
* The low latency mode targets applications such as gaming and virtual reality.
|
||||
*/
|
||||
setLatencyMode(LatencyMode mode) generates (WifiStatus status);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user