mirror of
https://github.com/Evolution-X/hardware_interfaces
synced 2026-02-01 16:23:37 +00:00
Merge changes from topic "libneuralnetworks_common-cleanup"
* changes: Move Aidl utility code to aidl/utils -- hal 2/2 Move Aidl utility code to aidl/utils -- hal 1/2
This commit is contained in:
@@ -55,7 +55,7 @@ cc_test {
|
||||
static_libs: [
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"libgmock",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
"neuralnetworks_utils_hal_1_0",
|
||||
|
||||
@@ -50,7 +50,7 @@ cc_library_static {
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
],
|
||||
header_libs: [
|
||||
"libneuralnetworks_headers",
|
||||
@@ -81,7 +81,7 @@ cc_test {
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
],
|
||||
whole_static_libs: [
|
||||
"neuralnetworks_generated_V1_0_example",
|
||||
|
||||
@@ -52,7 +52,7 @@ cc_test {
|
||||
"android.hardware.neuralnetworks@1.0",
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"libgmock",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
"neuralnetworks_utils_hal_1_0",
|
||||
|
||||
@@ -48,7 +48,7 @@ cc_test {
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
],
|
||||
whole_static_libs: [
|
||||
"neuralnetworks_generated_V1_0_example",
|
||||
|
||||
@@ -71,7 +71,7 @@ cc_test {
|
||||
"android.hardware.neuralnetworks@1.1",
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
"libgmock",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
"neuralnetworks_utils_hal_1_0",
|
||||
|
||||
@@ -71,7 +71,7 @@ cc_test {
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
],
|
||||
whole_static_libs: [
|
||||
"neuralnetworks_generated_V1_0_example",
|
||||
|
||||
@@ -69,7 +69,7 @@ cc_test {
|
||||
"android.hardware.neuralnetworks@1.2",
|
||||
"android.hardware.neuralnetworks@1.3",
|
||||
"libgmock",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
"neuralnetworks_utils_hal_1_0",
|
||||
|
||||
@@ -75,7 +75,7 @@ cc_test {
|
||||
"libgmock",
|
||||
"libhidlmemory",
|
||||
"libneuralnetworks_generated_test_harness",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
"libsync",
|
||||
],
|
||||
whole_static_libs: [
|
||||
|
||||
17
neuralnetworks/aidl/utils/include/AidlBufferTracker.h
Normal file
17
neuralnetworks/aidl/utils/include/AidlBufferTracker.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnapi/hal/aidl/BufferTracker.h"
|
||||
17
neuralnetworks/aidl/utils/include/AidlHalInterfaces.h
Normal file
17
neuralnetworks/aidl/utils/include/AidlHalInterfaces.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnapi/hal/aidl/HalInterfaces.h"
|
||||
17
neuralnetworks/aidl/utils/include/AidlHalUtils.h
Normal file
17
neuralnetworks/aidl/utils/include/AidlHalUtils.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnapi/hal/aidl/HalUtils.h"
|
||||
17
neuralnetworks/aidl/utils/include/AidlValidateHal.h
Normal file
17
neuralnetworks/aidl/utils/include/AidlValidateHal.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnapi/hal/aidl/ValidateHal.h"
|
||||
119
neuralnetworks/aidl/utils/include/nnapi/hal/aidl/BufferTracker.h
Normal file
119
neuralnetworks/aidl/utils/include/nnapi/hal/aidl/BufferTracker.h
Normal file
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BUFFER_TRACKER_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BUFFER_TRACKER_H
|
||||
|
||||
#include <android-base/macros.h>
|
||||
#include <android-base/thread_annotations.h>
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "nnapi/hal/aidl/HalInterfaces.h"
|
||||
#include "nnapi/hal/aidl/ValidateHal.h"
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
// This class manages a CPU buffer allocated on heap and provides validation methods.
|
||||
class AidlManagedBuffer {
|
||||
public:
|
||||
static std::shared_ptr<AidlManagedBuffer> create(uint32_t size,
|
||||
std::set<AidlHalPreparedModelRole> roles,
|
||||
const Operand& operand);
|
||||
|
||||
// Prefer AidlManagedBuffer::create.
|
||||
AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
|
||||
std::set<AidlHalPreparedModelRole> roles, const Operand& operand);
|
||||
|
||||
uint8_t* getPointer() const { return kBuffer.get(); }
|
||||
uint32_t getSize() const { return kSize; }
|
||||
|
||||
// "poolIndex" is the index of this buffer in the request.pools.
|
||||
ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
|
||||
const aidl_hal::IPreparedModel* preparedModel) const;
|
||||
|
||||
// "size" is the byte size of the Memory provided to the copyFrom or copyTo method.
|
||||
ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
|
||||
ErrorStatus validateCopyTo(uint32_t size) const;
|
||||
|
||||
bool updateDimensions(const std::vector<uint32_t>& dimensions);
|
||||
void setInitialized(bool initialized);
|
||||
|
||||
private:
|
||||
mutable std::mutex mMutex;
|
||||
const std::unique_ptr<uint8_t[]> kBuffer;
|
||||
const uint32_t kSize;
|
||||
const std::set<AidlHalPreparedModelRole> kRoles;
|
||||
const OperandType kOperandType;
|
||||
const std::vector<uint32_t> kInitialDimensions;
|
||||
std::vector<uint32_t> mUpdatedDimensions GUARDED_BY(mMutex);
|
||||
bool mInitialized GUARDED_BY(mMutex) = false;
|
||||
};
|
||||
|
||||
// Keep track of all AidlManagedBuffers and assign each with a unique token.
|
||||
class AidlBufferTracker : public std::enable_shared_from_this<AidlBufferTracker> {
|
||||
DISALLOW_COPY_AND_ASSIGN(AidlBufferTracker);
|
||||
|
||||
public:
|
||||
// A RAII class to help manage the lifetime of the token.
|
||||
// It is only supposed to be constructed in AidlBufferTracker::add.
|
||||
class Token {
|
||||
DISALLOW_COPY_AND_ASSIGN(Token);
|
||||
|
||||
public:
|
||||
Token(uint32_t token, std::shared_ptr<AidlBufferTracker> tracker)
|
||||
: kToken(token), kBufferTracker(std::move(tracker)) {}
|
||||
~Token() { kBufferTracker->free(kToken); }
|
||||
uint32_t get() const { return kToken; }
|
||||
|
||||
private:
|
||||
const uint32_t kToken;
|
||||
const std::shared_ptr<AidlBufferTracker> kBufferTracker;
|
||||
};
|
||||
|
||||
// The factory of AidlBufferTracker. This ensures that the AidlBufferTracker is always managed
|
||||
// by a shared_ptr.
|
||||
static std::shared_ptr<AidlBufferTracker> create() {
|
||||
return std::make_shared<AidlBufferTracker>();
|
||||
}
|
||||
|
||||
// Prefer AidlBufferTracker::create.
|
||||
AidlBufferTracker() : mTokenToBuffers(1) {}
|
||||
|
||||
std::unique_ptr<Token> add(std::shared_ptr<AidlManagedBuffer> buffer);
|
||||
std::shared_ptr<AidlManagedBuffer> get(uint32_t token) const;
|
||||
|
||||
private:
|
||||
void free(uint32_t token);
|
||||
|
||||
mutable std::mutex mMutex;
|
||||
std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens GUARDED_BY(mMutex);
|
||||
|
||||
// Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping.
|
||||
// The index of the vector is the token. When the token gets freed, the corresponding entry is
|
||||
// set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token.
|
||||
std::vector<std::shared_ptr<AidlManagedBuffer>> mTokenToBuffers GUARDED_BY(mMutex);
|
||||
};
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BUFFER_TRACKER_H
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_INTERFACES_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_INTERFACES_H
|
||||
|
||||
#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DataLocation.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Extension.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/FusedActivationFunc.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IBuffer.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IDevice.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Memory.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Model.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Operand.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandExtraParams.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandLifeTime.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandPerformance.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperandType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Operation.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OperationType.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/OutputShape.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/PerformanceInfo.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Priority.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Request.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/RequestArgument.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Subgraph.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
|
||||
#include <aidl/android/hardware/neuralnetworks/Timing.h>
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
namespace aidl_hal = ::aidl::android::hardware::neuralnetworks;
|
||||
|
||||
inline constexpr aidl_hal::Priority kDefaultPriorityAidl = aidl_hal::Priority::MEDIUM;
|
||||
|
||||
} // namespace android::nn
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_INTERFACES_H
|
||||
52
neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalUtils.h
Normal file
52
neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalUtils.h
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_UTILS_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_UTILS_H
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "nnapi/hal/aidl/HalInterfaces.h"
|
||||
|
||||
namespace android {
|
||||
namespace nn {
|
||||
|
||||
// Return a vector with one entry for each non-extension OperandType except
|
||||
// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be
|
||||
// sorted by OperandType.
|
||||
//
|
||||
// Control flow (OperandType::SUBGRAPH) operation performance is specified
|
||||
// separately using Capabilities::ifPerformance and
|
||||
// Capabilities::whilePerformance.
|
||||
std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
|
||||
aidl_hal::PerformanceInfo perf);
|
||||
|
||||
// Update the vector entry corresponding to the specified OperandType with the
|
||||
// specified PerformanceInfo value. The vector must already have an entry for
|
||||
// that OperandType, and must be sorted by OperandType.
|
||||
void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
|
||||
aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf);
|
||||
|
||||
// Returns true if an operand type is an extension type.
|
||||
bool isExtensionOperandType(aidl_hal::OperandType type);
|
||||
|
||||
// Returns true if an operand type is a scalar type.
|
||||
bool isNonExtensionScalar(aidl_hal::OperandType type);
|
||||
|
||||
} // namespace nn
|
||||
} // namespace android
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_UTILS_H
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_VALIDATE_HAL_H
|
||||
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_VALIDATE_HAL_H
|
||||
|
||||
#include "nnapi/hal/aidl/HalInterfaces.h"
|
||||
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include <nnapi/TypeUtils.h>
|
||||
#include <nnapi/Validation.h>
|
||||
|
||||
namespace android {
|
||||
namespace nn {
|
||||
|
||||
using AidlHalPreparedModelRole = std::tuple<const aidl_hal::IPreparedModel*, IOType, uint32_t>;
|
||||
|
||||
bool validateMemoryDesc(
|
||||
const aidl_hal::BufferDesc& desc,
|
||||
const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
|
||||
const std::vector<aidl_hal::BufferRole>& inputRoles,
|
||||
const std::vector<aidl_hal::BufferRole>& outputRoles,
|
||||
std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
|
||||
getModel,
|
||||
std::set<AidlHalPreparedModelRole>* preparedModelRoles, aidl_hal::Operand* combinedOperand);
|
||||
|
||||
} // namespace nn
|
||||
} // namespace android
|
||||
|
||||
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_VALIDATE_HAL_H
|
||||
227
neuralnetworks/aidl/utils/src/AidlBufferTracker.cpp
Normal file
227
neuralnetworks/aidl/utils/src/AidlBufferTracker.cpp
Normal file
@@ -0,0 +1,227 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "AidlBufferTracker.h"
|
||||
|
||||
#include <android-base/macros.h>
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "AidlHalInterfaces.h"
|
||||
#include "nnapi/TypeUtils.h"
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
std::shared_ptr<AidlManagedBuffer> AidlManagedBuffer::create(
|
||||
uint32_t size, std::set<AidlHalPreparedModelRole> roles, const Operand& operand) {
|
||||
std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]);
|
||||
if (buffer == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if (isExtension(operand.type)) {
|
||||
LOG(ERROR) << "AidlManagedBuffer cannot handle extension operands.";
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_shared<AidlManagedBuffer>(std::move(buffer), size, std::move(roles), operand);
|
||||
}
|
||||
|
||||
AidlManagedBuffer::AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
|
||||
std::set<AidlHalPreparedModelRole> roles,
|
||||
const Operand& operand)
|
||||
: kBuffer(std::move(buffer)),
|
||||
kSize(size),
|
||||
kRoles(std::move(roles)),
|
||||
kOperandType(operand.type),
|
||||
kInitialDimensions(operand.dimensions),
|
||||
mUpdatedDimensions(operand.dimensions) {
|
||||
CHECK(!isExtension(kOperandType));
|
||||
}
|
||||
|
||||
ErrorStatus AidlManagedBuffer::validateRequest(
|
||||
uint32_t poolIndex, const Request& request,
|
||||
const aidl_hal::IPreparedModel* preparedModel) const {
|
||||
CHECK_LT(poolIndex, request.pools.size());
|
||||
CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex]));
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
|
||||
bool usedAsInput = false, usedAsOutput = false;
|
||||
for (uint32_t i = 0; i < request.inputs.size(); i++) {
|
||||
if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
|
||||
if (request.inputs[i].location.poolIndex != poolIndex) continue;
|
||||
// Validate if the input role is specified during allocation.
|
||||
if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
if (!mInitialized) {
|
||||
LOG(ERROR)
|
||||
<< "AidlManagedBuffer::validateRequest -- using uninitialized buffer as input "
|
||||
"request.";
|
||||
return ErrorStatus::GENERAL_FAILURE;
|
||||
}
|
||||
auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions);
|
||||
if (!combined.has_value()) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
|
||||
<< toString(mUpdatedDimensions) << " vs "
|
||||
<< toString(request.inputs[i].dimensions) << ")";
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
usedAsInput = true;
|
||||
}
|
||||
for (uint32_t i = 0; i < request.outputs.size(); i++) {
|
||||
if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
|
||||
if (request.outputs[i].location.poolIndex != poolIndex) continue;
|
||||
if (usedAsInput || usedAsOutput) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateRequest -- using the same device memory for "
|
||||
"input/output or multiple outputs";
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
// Validate if the output role is specified during allocation.
|
||||
if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions);
|
||||
if (!combined.has_value()) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
|
||||
<< toString(kInitialDimensions) << " vs "
|
||||
<< toString(request.outputs[i].dimensions) << ")";
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
usedAsOutput = true;
|
||||
}
|
||||
return ErrorStatus::NONE;
|
||||
}
|
||||
|
||||
ErrorStatus AidlManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions,
|
||||
uint32_t size) const {
|
||||
if (size != kSize) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize
|
||||
<< " vs " << size;
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
if (isNonExtensionScalar(kOperandType)) {
|
||||
if (!dimensions.empty()) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid dimensions for scalar "
|
||||
"operand: "
|
||||
<< toString(dimensions);
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return ErrorStatus::NONE;
|
||||
}
|
||||
|
||||
if (dimensions.empty()) {
|
||||
if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the initial dimensions are not "
|
||||
"fully "
|
||||
"specified and no dimension update is provided: "
|
||||
<< toString(kInitialDimensions);
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
} else {
|
||||
if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the updated dimensions are not "
|
||||
"fully "
|
||||
"specified: "
|
||||
<< toString(dimensions);
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
}
|
||||
|
||||
const auto combined = combineDimensions(kInitialDimensions, dimensions);
|
||||
if (!combined.has_value()) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- incompatible dimensions ("
|
||||
<< toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
return ErrorStatus::NONE;
|
||||
}
|
||||
|
||||
ErrorStatus AidlManagedBuffer::validateCopyTo(uint32_t size) const {
|
||||
if (size != kSize) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- invalid memory size: " << kSize
|
||||
<< " vs " << size;
|
||||
return ErrorStatus::INVALID_ARGUMENT;
|
||||
}
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
if (!mInitialized) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- using uninitialized buffer as source.";
|
||||
return ErrorStatus::GENERAL_FAILURE;
|
||||
}
|
||||
return ErrorStatus::NONE;
|
||||
}
|
||||
|
||||
bool AidlManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) {
|
||||
auto combined = combineDimensions(kInitialDimensions, dimensions);
|
||||
if (!combined.has_value()) {
|
||||
LOG(ERROR) << "AidlManagedBuffer::updateDimensions -- incompatible dimensions ("
|
||||
<< toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
|
||||
return false;
|
||||
}
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
mUpdatedDimensions = std::move(combined).value();
|
||||
return true;
|
||||
}
|
||||
|
||||
void AidlManagedBuffer::setInitialized(bool initialized) {
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
mInitialized = initialized;
|
||||
}
|
||||
|
||||
std::unique_ptr<AidlBufferTracker::Token> AidlBufferTracker::add(
|
||||
std::shared_ptr<AidlManagedBuffer> buffer) {
|
||||
if (buffer == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
uint32_t token = 0;
|
||||
if (mFreeTokens.empty()) {
|
||||
token = mTokenToBuffers.size();
|
||||
mTokenToBuffers.push_back(std::move(buffer));
|
||||
} else {
|
||||
token = mFreeTokens.top();
|
||||
mFreeTokens.pop();
|
||||
mTokenToBuffers[token] = std::move(buffer);
|
||||
}
|
||||
VLOG(MEMORY) << "AidlBufferTracker::add -- new token = " << token;
|
||||
return std::make_unique<Token>(token, shared_from_this());
|
||||
}
|
||||
|
||||
std::shared_ptr<AidlManagedBuffer> AidlBufferTracker::get(uint32_t token) const {
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) {
|
||||
LOG(ERROR) << "AidlBufferTracker::get -- unknown token " << token;
|
||||
return nullptr;
|
||||
}
|
||||
return mTokenToBuffers[token];
|
||||
}
|
||||
|
||||
void AidlBufferTracker::free(uint32_t token) {
|
||||
std::lock_guard<std::mutex> guard(mMutex);
|
||||
CHECK_LT(token, mTokenToBuffers.size());
|
||||
CHECK(mTokenToBuffers[token] != nullptr);
|
||||
VLOG(MEMORY) << "AidlBufferTracker::free -- release token = " << token;
|
||||
mTokenToBuffers[token] = nullptr;
|
||||
mFreeTokens.push(token);
|
||||
}
|
||||
|
||||
} // namespace android::nn
|
||||
72
neuralnetworks/aidl/utils/src/AidlHalUtils.cpp
Normal file
72
neuralnetworks/aidl/utils/src/AidlHalUtils.cpp
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// This file contains pre-canonical-types utility code and includes HAL
|
||||
// utilities. LegacyUtils.h is the subset of these utilities that do not touch
|
||||
// HAL.
|
||||
|
||||
#include "AidlHalUtils.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
#include "AidlHalInterfaces.h"
|
||||
#include "nnapi/TypeUtils.h"
|
||||
|
||||
namespace android::nn {
|
||||
|
||||
std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
|
||||
aidl_hal::PerformanceInfo perf) {
|
||||
static constexpr ndk::enum_range<aidl_hal::OperandType> kOperandTypeRange;
|
||||
std::vector<aidl_hal::OperandPerformance> ret;
|
||||
ret.reserve(std::distance(kOperandTypeRange.begin(), kOperandTypeRange.end()));
|
||||
for (aidl_hal::OperandType type : kOperandTypeRange) {
|
||||
if (type != aidl_hal::OperandType::SUBGRAPH) {
|
||||
ret.push_back(aidl_hal::OperandPerformance{type, perf});
|
||||
}
|
||||
}
|
||||
std::sort(ret.begin(), ret.end(),
|
||||
[](const aidl_hal::OperandPerformance& a, const aidl_hal::OperandPerformance& b) {
|
||||
return a.type < b.type;
|
||||
});
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
|
||||
aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf) {
|
||||
CHECK(operandPerformance != nullptr);
|
||||
const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
|
||||
[](const aidl_hal::OperandPerformance& perf,
|
||||
aidl_hal::OperandType type) { return perf.type < type; });
|
||||
CHECK(it != operandPerformance->end())
|
||||
<< toString(type) << " not in operand performance vector";
|
||||
it->info = perf;
|
||||
}
|
||||
|
||||
bool isExtensionOperandType(aidl_hal::OperandType type) {
|
||||
return isExtension(convert(type).value());
|
||||
}
|
||||
|
||||
bool isNonExtensionScalar(aidl_hal::OperandType type) {
|
||||
return isNonExtensionScalar(convert(type).value());
|
||||
}
|
||||
|
||||
} // namespace android::nn
|
||||
136
neuralnetworks/aidl/utils/src/AidlValidateHal.cpp
Normal file
136
neuralnetworks/aidl/utils/src/AidlValidateHal.cpp
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright (C) 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define LOG_TAG "ValidateHal"
|
||||
|
||||
#include "AidlValidateHal.h"
|
||||
|
||||
#include <android-base/logging.h>
|
||||
#include <nnapi/hal/aidl/Conversions.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "AidlHalUtils.h"
|
||||
#include "nnapi/TypeUtils.h"
|
||||
|
||||
namespace android {
|
||||
namespace nn {
|
||||
|
||||
bool validateMemoryDesc(
|
||||
const aidl_hal::BufferDesc& desc,
|
||||
const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
|
||||
const std::vector<aidl_hal::BufferRole>& inputRoles,
|
||||
const std::vector<aidl_hal::BufferRole>& outputRoles,
|
||||
std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
|
||||
getModel,
|
||||
std::set<AidlHalPreparedModelRole>* preparedModelRoles,
|
||||
aidl_hal::Operand* combinedOperand) {
|
||||
NN_RET_CHECK(!preparedModels.empty());
|
||||
NN_RET_CHECK(!inputRoles.empty() || !outputRoles.empty());
|
||||
|
||||
std::set<AidlHalPreparedModelRole> roles;
|
||||
std::vector<aidl_hal::Operand> operands;
|
||||
operands.reserve(inputRoles.size() + outputRoles.size());
|
||||
for (const auto& role : inputRoles) {
|
||||
NN_RET_CHECK_GE(role.modelIndex, 0);
|
||||
NN_RET_CHECK_LT(static_cast<size_t>(role.modelIndex), preparedModels.size());
|
||||
const auto& preparedModel = preparedModels[role.modelIndex];
|
||||
NN_RET_CHECK(preparedModel != nullptr);
|
||||
const auto* model = getModel(preparedModel);
|
||||
NN_RET_CHECK(model != nullptr);
|
||||
const auto& inputIndexes = model->main.inputIndexes;
|
||||
NN_RET_CHECK_GE(role.ioIndex, 0);
|
||||
NN_RET_CHECK_LT(static_cast<size_t>(role.ioIndex), inputIndexes.size());
|
||||
NN_RET_CHECK_GT(role.probability, 0.0f);
|
||||
NN_RET_CHECK_LE(role.probability, 1.0f);
|
||||
const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex);
|
||||
NN_RET_CHECK(success);
|
||||
operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]);
|
||||
}
|
||||
for (const auto& role : outputRoles) {
|
||||
NN_RET_CHECK_GE(role.modelIndex, 0);
|
||||
NN_RET_CHECK_LT(static_cast<size_t>(role.modelIndex), preparedModels.size());
|
||||
const auto& preparedModel = preparedModels[role.modelIndex];
|
||||
NN_RET_CHECK(preparedModel != nullptr);
|
||||
const auto* model = getModel(preparedModel);
|
||||
NN_RET_CHECK(model != nullptr);
|
||||
const auto& outputIndexes = model->main.outputIndexes;
|
||||
NN_RET_CHECK_GE(role.ioIndex, 0);
|
||||
NN_RET_CHECK_LT(static_cast<size_t>(role.ioIndex), outputIndexes.size());
|
||||
NN_RET_CHECK_GT(role.probability, 0.0f);
|
||||
NN_RET_CHECK_LE(role.probability, 1.0f);
|
||||
const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex);
|
||||
NN_RET_CHECK(success);
|
||||
operands.push_back(model->main.operands[outputIndexes[role.ioIndex]]);
|
||||
}
|
||||
|
||||
CHECK(!operands.empty());
|
||||
const auto opType = operands[0].type;
|
||||
const auto canonicalOperandType = convert(opType);
|
||||
NN_RET_CHECK(canonicalOperandType.has_value()) << canonicalOperandType.error().message;
|
||||
const bool isExtensionOperand = isExtension(canonicalOperandType.value());
|
||||
|
||||
auto maybeDimensions = toUnsigned(desc.dimensions);
|
||||
NN_RET_CHECK(maybeDimensions.has_value()) << maybeDimensions.error().message;
|
||||
std::vector<uint32_t> dimensions = std::move(maybeDimensions).value();
|
||||
|
||||
for (const auto& operand : operands) {
|
||||
NN_RET_CHECK(operand.type == operands[0].type)
|
||||
<< toString(operand.type) << " vs " << toString(operands[0].type);
|
||||
NN_RET_CHECK_EQ(operand.scale, operands[0].scale);
|
||||
NN_RET_CHECK_EQ(operand.zeroPoint, operands[0].zeroPoint);
|
||||
// NOTE: validateMemoryDesc cannot validate extra parameters for extension operand type.
|
||||
if (!isExtensionOperand) {
|
||||
const auto& lhsExtraParams = operand.extraParams;
|
||||
const auto& rhsExtraParams = operands[0].extraParams;
|
||||
NN_RET_CHECK(lhsExtraParams == rhsExtraParams)
|
||||
<< (lhsExtraParams.has_value() ? lhsExtraParams.value().toString()
|
||||
: "std::nullopt")
|
||||
<< " vs "
|
||||
<< (rhsExtraParams.has_value() ? rhsExtraParams.value().toString()
|
||||
: "std::nullopt");
|
||||
}
|
||||
const auto maybeRhsDimensions = toUnsigned(operand.dimensions);
|
||||
NN_RET_CHECK(maybeRhsDimensions.has_value()) << maybeRhsDimensions.error().message;
|
||||
const auto combined = combineDimensions(dimensions, maybeRhsDimensions.value());
|
||||
NN_RET_CHECK(combined.has_value());
|
||||
dimensions = combined.value();
|
||||
}
|
||||
|
||||
// NOTE: validateMemoryDesc cannot validate scalar dimensions with extension operand type.
|
||||
if (!isExtensionOperand) {
|
||||
NN_RET_CHECK(!isNonExtensionScalar(opType) || dimensions.empty())
|
||||
<< "invalid dimensions with scalar operand type.";
|
||||
}
|
||||
|
||||
if (preparedModelRoles != nullptr) {
|
||||
*preparedModelRoles = std::move(roles);
|
||||
}
|
||||
if (combinedOperand != nullptr) {
|
||||
*combinedOperand = operands[0];
|
||||
// No need to check that values fit int32_t here, since the original values are obtained
|
||||
// from int32_t.
|
||||
combinedOperand->dimensions = aidl_hal::utils::toSigned(dimensions).value();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace nn
|
||||
} // namespace android
|
||||
@@ -39,7 +39,7 @@ cc_test {
|
||||
srcs: ["test/*.cpp"],
|
||||
static_libs: [
|
||||
"libgmock",
|
||||
"libneuralnetworks_common_hidl",
|
||||
"libneuralnetworks_common",
|
||||
"neuralnetworks_types",
|
||||
"neuralnetworks_utils_hal_common",
|
||||
],
|
||||
|
||||
Reference in New Issue
Block a user