mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 11:36:00 +00:00
Add TENSOR_QUANT8_ASYMM_SIGNED support for activations
Ops updated: RELU, RELU1, RELU6, TANH, LOGISTIC Change-Id: Id5e7a8c6b30463708bd93dbf6a3f30d05c2bcf40 Fix: 143933951 Fix: 143934720 Fix: 143933831 Fix: 143934770 Fix: 143934743 Test: quantization coupling tests in CTS and VTS
This commit is contained in:
@@ -602,7 +602,7 @@ a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardwar
|
||||
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
|
||||
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
|
||||
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
|
||||
6256b2b1df586fc01e80ecf001770d941385602682ec2055ba7b3979a02c8ebf android.hardware.neuralnetworks@1.3::types
|
||||
2d16429145dc1158bf3e45c7de86a39e461dec3ec00512c11a7e5249535a2e96 android.hardware.neuralnetworks@1.3::types
|
||||
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
|
||||
a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
|
||||
44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
|
||||
|
||||
@@ -956,6 +956,7 @@ enum OperationType : int32_t {
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||
*
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
@@ -967,6 +968,8 @@ enum OperationType : int32_t {
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||
* the scale must be 1.f / 256 and the zeroPoint must be 0.
|
||||
* For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
|
||||
* the scale must be 1.f / 256 and the zeroPoint must be -128.
|
||||
*/
|
||||
LOGISTIC = @1.2::OperationType:LOGISTIC,
|
||||
|
||||
@@ -1384,6 +1387,7 @@ enum OperationType : int32_t {
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||
*
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
@@ -1393,7 +1397,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||
* the scale and zeroPoint must be the same as input0.
|
||||
*/
|
||||
RELU = @1.2::OperationType:RELU,
|
||||
@@ -1409,6 +1414,7 @@ enum OperationType : int32_t {
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||
*
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
@@ -1418,7 +1424,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of the same shape as input0.
|
||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||
* the scale and zeroPoint must be the same as input0.
|
||||
*/
|
||||
RELU1 = @1.2::OperationType:RELU1,
|
||||
@@ -1434,6 +1441,7 @@ enum OperationType : int32_t {
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||
*
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
@@ -1443,7 +1451,8 @@ enum OperationType : int32_t {
|
||||
*
|
||||
* Outputs:
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
|
||||
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
|
||||
* {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
|
||||
* the scale and zeroPoint must be the same as input0.
|
||||
*/
|
||||
RELU6 = @1.2::OperationType:RELU6,
|
||||
@@ -1764,6 +1773,7 @@ enum OperationType : int32_t {
|
||||
* * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
|
||||
* * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
|
||||
*
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
@@ -1775,6 +1785,8 @@ enum OperationType : int32_t {
|
||||
* * 0: The output tensor of same shape as input0.
|
||||
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
|
||||
* the scale must be 1.f / 128 and the zeroPoint must be 128.
|
||||
* For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
|
||||
* the scale must be 1.f / 128 and the zeroPoint must be 0.
|
||||
*/
|
||||
TANH = @1.2::OperationType:TANH,
|
||||
|
||||
|
||||
Reference in New Issue
Block a user