mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 16:50:18 +00:00
Update neuralnetworks/*/types.hal to match impl
Updates hardware/interfaces/neuralnetworks/1.(0|1)/types.hal to match the NeuralNetworks.h header in framework/ml/nn. Only comments have changed. Updated using framework/ml/nn/tools/sync_enums_to_hal.py. Change-Id: I0754868ad8acf6e2e0c5b83661d04682febec9b0 Bug: 77604249 Test: checked changes with git diff Test: mm in $ANDROID_BUILD_TOP
This commit is contained in:
@@ -260,7 +260,7 @@ fb92e2b40f8e9d494e8fd3b4ac18499a3216342e7cff160714c3bbf3660b6e79 android.hardwar
|
||||
4e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback
|
||||
5804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice
|
||||
12e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback
|
||||
702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types
|
||||
934b9a0627080bca5dee83126d23ace31bdf1ed36fe192a2a7694f81b4f0c2af android.hardware.neuralnetworks@1.0::types
|
||||
d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types
|
||||
b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types
|
||||
|
||||
@@ -339,7 +339,7 @@ b8c7ed58aa8740361e63d0ce9e7c94227572a629f356958840b34809d2393a7c android.hardwar
|
||||
4a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection
|
||||
6aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types
|
||||
7698dc2382a2eeb43541840e3ee624f34108efdfb976b2bfa7c13ef15fb8c4c4 android.hardware.neuralnetworks@1.1::IDevice
|
||||
5604001029a255648a9e955de0a822a48d9ba7cc259b106fb8be0cd43dc8eece android.hardware.neuralnetworks@1.1::types
|
||||
ce5dab4b2dd828bcff09acfb93fcd4846f847868b9e914d214095532c28dc0cf android.hardware.neuralnetworks@1.1::types
|
||||
8d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc
|
||||
e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback
|
||||
5e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types
|
||||
|
||||
@@ -444,10 +444,11 @@ enum OperationType : int32_t {
|
||||
* Supported tensor rank: up to 4.
|
||||
*
|
||||
* Inputs:
|
||||
* * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
|
||||
* a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
|
||||
* [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
|
||||
* and “input_size” is the size of the input.
|
||||
* * 0: A tensor of at least rank 2, specifying the input. If rank is greater than 2,
|
||||
* then it gets flattened to a 2-D Tensor. The (flattened) 2-D Tensor is reshaped
|
||||
* (if necessary) to [batch_size, input_size], where "input_size" corresponds to
|
||||
* the number of inputs to the layer, matching the second dimension of weights, and
|
||||
* "batch_size" is calculated by dividing the number of elements by "input_size".
|
||||
* * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where
|
||||
* "num_units" corresponds to the number of output nodes.
|
||||
* * 2: A 1-D tensor, of shape [num_units], specifying the bias.
|
||||
@@ -728,9 +729,11 @@ enum OperationType : int32_t {
|
||||
* \f{eqnarray*}{
|
||||
* i_t = 1 - f_t
|
||||
* \f}
|
||||
* * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights (\f$W_{cf}\f$), and cell-to-output
|
||||
* weights (\f$W_{co}\f$) either all have values or none of them have values.
|
||||
* If they have values, the peephole optimization is used.
|
||||
* * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output
|
||||
* weights (\f$W_{co}\f$) either both have values or neither of them have values.
|
||||
* If they have values, the peephole optimization is used. Additionally,
|
||||
* if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
|
||||
* required to have values for peephole optimization.
|
||||
* * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection
|
||||
* layer, and should otherwise have no value.
|
||||
* * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the
|
||||
@@ -1008,7 +1011,8 @@ enum OperationType : int32_t {
|
||||
* Resizes images to given size using the bilinear interpretation.
|
||||
*
|
||||
* Resized images must be distorted if their output aspect ratio is not the same as
|
||||
* input aspect ratio.
|
||||
* input aspect ratio. The corner pixels of output may not be the same as
|
||||
* corner pixels of input.
|
||||
*
|
||||
* Supported tensor types:
|
||||
* * {@link OperandType::TENSOR_FLOAT32}
|
||||
|
||||
@@ -214,6 +214,13 @@ enum OperationType : @1.0::OperationType {
|
||||
* tensor to be sliced. The length must be of rank(input0).
|
||||
* 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input
|
||||
* tensor to be sliced. The length must be of rank(input0).
|
||||
* 4: An INT32 value, begin_mask. If the ith bit of begin_mask is set, begin[i] is ignored
|
||||
* and the fullest possible range in that dimension is used instead.
|
||||
* 5: An INT32 value, end_mask. If the ith bit of end_mask is set, end[i] is ignored and
|
||||
* the fullest possible range in that dimension is used instead.
|
||||
* 6: An INT32 value, shrink_axis_mask. An int32 mask. If the ith bit of shrink_axis_mask is
|
||||
* set, it implies that the ith specification shrinks the dimensionality by 1. A slice of
|
||||
* size 1 starting from begin[i] in the dimension must be preserved.
|
||||
*
|
||||
* Outputs:
|
||||
* 0: A tensor of the same type as input0.
|
||||
|
||||
Reference in New Issue
Block a user