Merge changes from topic "nnapi-canonical-ahwb" am: 8548f574ee am: ece0b71cc1

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1593092

MUST ONLY BE SUBMITTED BY AUTOMERGER

Change-Id: I003fdfdfd480a17f1d3549d25631dea8a91cd55c
This commit is contained in:
Michael Butler
2021-02-18 22:00:12 +00:00
committed by Automerger Merge Worker
9 changed files with 299 additions and 53 deletions

View File

@@ -154,7 +154,7 @@ GeneralResult<Model::OperandValues> unvalidatedConvert(const hidl_vec<uint8_t>&
}
GeneralResult<SharedMemory> unvalidatedConvert(const hidl_memory& memory) {
return createSharedMemoryFromHidlMemory(memory);
return hal::utils::createSharedMemoryFromHidlMemory(memory);
}
GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model) {
@@ -347,9 +347,7 @@ nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
}
nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
CHECK(memory != nullptr);
return hidl_memory(memory->name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory->handle)),
memory->size);
return hal::utils::createHidlMemoryFromSharedMemory(memory);
}
nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {

View File

@@ -304,7 +304,11 @@ GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
}
GeneralResult<SharedHandle> unvalidatedConvert(const hidl_handle& hidlHandle) {
return hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle());
if (hidlHandle.getNativeHandle() == nullptr) {
return nullptr;
}
auto handle = NN_TRY(hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle()));
return std::make_shared<const Handle>(std::move(handle));
}
GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
@@ -588,7 +592,10 @@ nn::GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
}
nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
return hal::utils::hidlHandleFromSharedHandle(handle);
if (handle == nullptr) {
return {};
}
return hal::utils::hidlHandleFromSharedHandle(*handle);
}
nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {

View File

@@ -261,7 +261,7 @@ GeneralResult<Request::MemoryPool> unvalidatedConvert(
using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
switch (memoryPool.getDiscriminator()) {
case Discriminator::hidlMemory:
return createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
return hal::utils::createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
case Discriminator::token:
return static_cast<Request::MemoryDomainToken>(memoryPool.token());
}

View File

@@ -21,12 +21,14 @@ cc_library_static {
local_include_dirs: ["include/nnapi/hal/aidl/"],
export_include_dirs: ["include"],
static_libs: [
"libarect",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
],
shared_libs: [
"libhidlbase",
"android.hardware.neuralnetworks-V1-ndk_platform",
"libbinder_ndk",
"libhidlbase",
"libnativewindow",
],
}

View File

@@ -18,6 +18,8 @@
#include <aidl/android/hardware/common/NativeHandle.h>
#include <android-base/logging.h>
#include <android/hardware_buffer.h>
#include <cutils/native_handle.h>
#include <nnapi/OperandTypes.h>
#include <nnapi/OperationTypes.h>
#include <nnapi/Result.h>
@@ -27,6 +29,7 @@
#include <nnapi/Validation.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <vndk/hardware_buffer.h>
#include <algorithm>
#include <chrono>
@@ -127,6 +130,61 @@ GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
return canonical;
}
GeneralResult<Handle> unvalidatedConvertHelper(const NativeHandle& aidlNativeHandle) {
std::vector<base::unique_fd> fds;
fds.reserve(aidlNativeHandle.fds.size());
for (const auto& fd : aidlNativeHandle.fds) {
const int dupFd = dup(fd.get());
if (dupFd == -1) {
// TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
// here?
return NN_ERROR() << "Failed to dup the fd";
}
fds.emplace_back(dupFd);
}
return Handle{.fds = std::move(fds), .ints = aidlNativeHandle.ints};
}
struct NativeHandleDeleter {
void operator()(native_handle_t* handle) const {
if (handle) {
native_handle_close(handle);
native_handle_delete(handle);
}
}
};
using UniqueNativeHandle = std::unique_ptr<native_handle_t, NativeHandleDeleter>;
static nn::GeneralResult<UniqueNativeHandle> nativeHandleFromAidlHandle(
const NativeHandle& handle) {
std::vector<base::unique_fd> fds;
fds.reserve(handle.fds.size());
for (const auto& fd : handle.fds) {
const int dupFd = dup(fd.get());
if (dupFd == -1) {
return NN_ERROR() << "Failed to dup the fd";
}
fds.emplace_back(dupFd);
}
constexpr size_t kIntMax = std::numeric_limits<int>::max();
CHECK_LE(handle.fds.size(), kIntMax);
CHECK_LE(handle.ints.size(), kIntMax);
native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
static_cast<int>(handle.ints.size()));
if (nativeHandle == nullptr) {
return NN_ERROR() << "Failed to create native_handle";
}
for (size_t i = 0; i < fds.size(); ++i) {
nativeHandle->data[i] = fds[i].release();
}
std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
return UniqueNativeHandle(nativeHandle);
}
} // anonymous namespace
GeneralResult<OperandType> unvalidatedConvert(const aidl_hal::OperandType& operandType) {
@@ -318,10 +376,64 @@ GeneralResult<MeasureTiming> unvalidatedConvert(bool measureTiming) {
return measureTiming ? MeasureTiming::YES : MeasureTiming::NO;
}
static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
return (value + multiple - 1) / multiple * multiple;
}
GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory) {
VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative";
if (memory.size > std::numeric_limits<uint32_t>::max()) {
return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
}
if (memory.name != "hardware_buffer_blob") {
return std::make_shared<const Memory>(Memory{
.handle = NN_TRY(unvalidatedConvertHelper(memory.handle)),
.size = static_cast<uint32_t>(memory.size),
.name = memory.name,
});
}
const auto size = static_cast<uint32_t>(memory.size);
const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
const uint32_t width = size;
const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
const UniqueNativeHandle handle = NN_TRY(nativeHandleFromAidlHandle(memory.handle));
const native_handle_t* nativeHandle = handle.get();
// AHardwareBuffer_createFromHandle() might fail because an allocator
// expects a specific stride value. In that case, we try to guess it by
// aligning the width to small powers of 2.
// TODO(b/174120849): Avoid stride assumptions.
AHardwareBuffer* hardwareBuffer = nullptr;
status_t status = UNKNOWN_ERROR;
for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
const uint32_t stride = roundUpToMultiple(width, alignment);
AHardwareBuffer_Desc desc{
.width = width,
.height = height,
.layers = layers,
.format = format,
.usage = usage,
.stride = stride,
};
status = AHardwareBuffer_createFromHandle(&desc, nativeHandle,
AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
&hardwareBuffer);
if (status == NO_ERROR) {
break;
}
}
if (status != NO_ERROR) {
return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
<< "Can't create AHardwareBuffer from handle. Error: " << status;
}
return std::make_shared<const Memory>(Memory{
.handle = NN_TRY(unvalidatedConvert(memory.handle)),
.handle = HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
.size = static_cast<uint32_t>(memory.size),
.name = memory.name,
});
@@ -400,22 +512,7 @@ GeneralResult<ExecutionPreference> unvalidatedConvert(
}
GeneralResult<SharedHandle> unvalidatedConvert(const NativeHandle& aidlNativeHandle) {
std::vector<base::unique_fd> fds;
fds.reserve(aidlNativeHandle.fds.size());
for (const auto& fd : aidlNativeHandle.fds) {
int dupFd = dup(fd.get());
if (dupFd == -1) {
// TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
// here?
return NN_ERROR() << "Failed to dup the fd";
}
fds.emplace_back(dupFd);
}
return std::make_shared<const Handle>(Handle{
.fds = std::move(fds),
.ints = aidlNativeHandle.ints,
});
return std::make_shared<const Handle>(NN_TRY(unvalidatedConvertHelper(aidlNativeHandle)));
}
GeneralResult<ExecutionPreference> convert(
@@ -508,13 +605,11 @@ nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
return halObject;
}
} // namespace
nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::Handle& handle) {
common::NativeHandle aidlNativeHandle;
aidlNativeHandle.fds.reserve(sharedHandle->fds.size());
for (const auto& fd : sharedHandle->fds) {
int dupFd = dup(fd.get());
aidlNativeHandle.fds.reserve(handle.fds.size());
for (const auto& fd : handle.fds) {
const int dupFd = dup(fd.get());
if (dupFd == -1) {
// TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
// here?
@@ -522,17 +617,69 @@ nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandl
}
aidlNativeHandle.fds.emplace_back(dupFd);
}
aidlNativeHandle.ints = sharedHandle->ints;
aidlNativeHandle.ints = handle.ints;
return aidlNativeHandle;
}
static nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
const native_handle_t& handle) {
common::NativeHandle aidlNativeHandle;
aidlNativeHandle.fds.reserve(handle.numFds);
for (int i = 0; i < handle.numFds; ++i) {
const int dupFd = dup(handle.data[i]);
if (dupFd == -1) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
}
aidlNativeHandle.fds.emplace_back(dupFd);
}
aidlNativeHandle.ints = std::vector<int>(&handle.data[handle.numFds],
&handle.data[handle.numFds + handle.numInts]);
return aidlNativeHandle;
}
} // namespace
nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
CHECK(sharedHandle != nullptr);
return unvalidatedConvert(*sharedHandle);
}
nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory) {
CHECK(memory != nullptr);
if (memory->size > std::numeric_limits<int64_t>::max()) {
return NN_ERROR() << "Memory size doesn't fit into int64_t.";
}
if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
return Memory{
.handle = NN_TRY(unvalidatedConvert(*handle)),
.size = static_cast<int64_t>(memory->size),
.name = memory->name,
};
}
const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
AHardwareBuffer_Desc bufferDesc;
AHardwareBuffer_describe(ahwb, &bufferDesc);
if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
CHECK_EQ(memory->size, bufferDesc.width);
CHECK_EQ(memory->name, "hardware_buffer_blob");
} else {
CHECK_EQ(memory->size, 0u);
CHECK_EQ(memory->name, "hardware_buffer");
}
const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
if (nativeHandle == nullptr) {
return NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle "
"returned nullptr";
}
return Memory{
.handle = NN_TRY(unvalidatedConvert(memory->handle)),
.handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)),
.size = static_cast<int64_t>(memory->size),
.name = memory->name,
};

View File

@@ -135,7 +135,8 @@ void TestBlobAHWB::initialize(uint32_t size) {
ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
ASSERT_NE(mAhwb, nullptr);
const auto sharedMemory = nn::createSharedMemoryFromAHWB(*mAhwb).value();
const auto sharedMemory =
nn::createSharedMemoryFromAHWB(mAhwb, /*takeOwnership=*/false).value();
mMapping = nn::map(sharedMemory).value();
mPtr = static_cast<uint8_t*>(std::get<void*>(mMapping.pointer));
CHECK_NE(mPtr, nullptr);

View File

@@ -22,10 +22,12 @@ cc_library_static {
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
static_libs: [
"libarect",
"neuralnetworks_types",
],
shared_libs: [
"libhidlbase",
"libnativewindow",
],
}

View File

@@ -74,10 +74,12 @@ nn::GeneralResult<void> unflushDataFromSharedToPointer(
std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
const std::vector<nn::Operation>& operations);
nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory);
nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle);
nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle);
nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle);
nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle);
nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
const std::vector<nn::SyncFence>& fences);

View File

@@ -20,11 +20,14 @@
#include <android-base/logging.h>
#include <android-base/unique_fd.h>
#include <android/hardware_buffer.h>
#include <hidl/HidlSupport.h>
#include <nnapi/Result.h>
#include <nnapi/SharedMemory.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <nnapi/Validation.h>
#include <vndk/hardware_buffer.h>
#include <algorithm>
#include <any>
@@ -248,44 +251,128 @@ std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
return nn::countNumberOfConsumers(numberOfOperands, operations);
}
nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle) {
if (handle == nullptr) {
return {};
nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) {
if (memory == nullptr) {
return NN_ERROR() << "Memory must be non-empty";
}
if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
return hidl_memory(memory->name, NN_TRY(hidlHandleFromSharedHandle(*handle)), memory->size);
}
const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
AHardwareBuffer_Desc bufferDesc;
AHardwareBuffer_describe(ahwb, &bufferDesc);
if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
CHECK_EQ(memory->size, bufferDesc.width);
CHECK_EQ(memory->name, "hardware_buffer_blob");
} else {
CHECK_EQ(memory->size, 0u);
CHECK_EQ(memory->name, "hardware_buffer");
}
const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
const hidl_handle hidlHandle(nativeHandle);
hidl_handle handle(hidlHandle);
return hidl_memory(memory->name, std::move(handle), memory->size);
}
static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
return (value + multiple - 1) / multiple * multiple;
}
nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
CHECK_LE(memory.size(), std::numeric_limits<uint32_t>::max());
if (memory.name() != "hardware_buffer_blob") {
return std::make_shared<const nn::Memory>(nn::Memory{
.handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())),
.size = static_cast<uint32_t>(memory.size()),
.name = memory.name(),
});
}
const auto size = memory.size();
const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
const uint32_t width = size;
const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
// AHardwareBuffer_createFromHandle() might fail because an allocator
// expects a specific stride value. In that case, we try to guess it by
// aligning the width to small powers of 2.
// TODO(b/174120849): Avoid stride assumptions.
AHardwareBuffer* hardwareBuffer = nullptr;
status_t status = UNKNOWN_ERROR;
for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
const uint32_t stride = roundUpToMultiple(width, alignment);
AHardwareBuffer_Desc desc{
.width = width,
.height = height,
.layers = layers,
.format = format,
.usage = usage,
.stride = stride,
};
status = AHardwareBuffer_createFromHandle(&desc, memory.handle(),
AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
&hardwareBuffer);
if (status == NO_ERROR) {
break;
}
}
if (status != NO_ERROR) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "Can't create AHardwareBuffer from handle. Error: " << status;
}
return std::make_shared<const nn::Memory>(nn::Memory{
.handle = nn::HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
.size = static_cast<uint32_t>(memory.size()),
.name = memory.name(),
});
}
nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle) {
std::vector<base::unique_fd> fds;
fds.reserve(handle->fds.size());
for (const auto& fd : handle->fds) {
int dupFd = dup(fd);
fds.reserve(handle.fds.size());
for (const auto& fd : handle.fds) {
const int dupFd = dup(fd);
if (dupFd == -1) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
}
fds.emplace_back(dupFd);
}
native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
constexpr size_t kIntMax = std::numeric_limits<int>::max();
CHECK_LE(handle.fds.size(), kIntMax);
CHECK_LE(handle.ints.size(), kIntMax);
native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
static_cast<int>(handle.ints.size()));
if (nativeHandle == nullptr) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
}
for (size_t i = 0; i < fds.size(); ++i) {
nativeHandle->data[i] = fds[i].release();
}
std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
hidl_handle hidlHandle;
hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
return hidlHandle;
}
nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
if (handle == nullptr) {
return nullptr;
return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle is nullptr";
}
std::vector<base::unique_fd> fds;
fds.reserve(handle->numFds);
for (int i = 0; i < handle->numFds; ++i) {
int dupFd = dup(handle->data[i]);
const int dupFd = dup(handle->data[i]);
if (dupFd == -1) {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
}
@@ -295,18 +382,18 @@ nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_ha
std::vector<int> ints(&handle->data[handle->numFds],
&handle->data[handle->numFds + handle->numInts]);
return std::make_shared<const nn::Handle>(nn::Handle{
.fds = std::move(fds),
.ints = std::move(ints),
});
return nn::Handle{.fds = std::move(fds), .ints = std::move(ints)};
}
nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
const std::vector<nn::SyncFence>& syncFences) {
hidl_vec<hidl_handle> handles(syncFences.size());
for (size_t i = 0; i < syncFences.size(); ++i) {
handles[i] =
NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle()));
const auto& handle = syncFences[i].getSharedHandle();
if (handle == nullptr) {
return NN_ERROR() << "convertSyncFences failed because sync fence is empty";
}
handles[i] = NN_TRY(hidlHandleFromSharedHandle(*handle));
}
return handles;
}