Fix quantized LSTM doc

The CL changes explicit prefixes for types in types.spec to a macro, so
that the prefixes of tensor types are correct both in NDK and HAL docs.

Bug: 144841609
Test: mma
Change-Id: I6d904bd3a858f555beed9270f141f080f96e429a
This commit is contained in:
Lev Proleev
2020-01-10 16:47:23 +00:00
parent fde81ba688
commit da5079c5cd
3 changed files with 4 additions and 2 deletions

View File

@@ -648,7 +648,7 @@ a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardwar
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
258825966435b3ed08832055bb736d81516013e405f161d9ccde9a90cfcdde83 android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
618a628f8c94d6f6e4cb401b69fa50ccb8b82191ea434e3a071252289b4f312c android.hardware.neuralnetworks@1.3::types
f3c1e7298da628a755b452cd3325e8d0fe867a2debb873069baab6a27434a72d android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
514dc8b810658c45d7b0d34132b708cee2658ecedd9c7efc57d0d666ef182484 android.hardware.wifi.hostapd@1.2::IHostapd
11f6448d15336361180391c8ebcdfd2d7cf77b3782d577e594d583aadc9c2877 android.hardware.wifi.hostapd@1.2::types

View File

@@ -4747,7 +4747,7 @@ enum OperationType : int32_t {
RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
/**
* Quantized version of {@link OperationType:LSTM}.
* Quantized version of {@link OperationType::LSTM}.
*
* The input and the output use asymmetric quantized types, while the rest
* use symmetric ones.

View File

@@ -57,6 +57,8 @@ enum OperationType : int32_t {
%insert Operation_1.2
%insert Operation_1.3
/**
* DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
* OEM operation and data types.