Merge "Invalidate NN interface objects on cache mismatch" am: 4cdcadf70c

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1508322

Change-Id: I77446ec7fb4d8b6c2f7c3581b58b015032b75ae4
This commit is contained in:
Michael Butler
2020-11-24 19:59:29 +00:00
committed by Automerger Merge Worker
8 changed files with 403 additions and 8 deletions

View File

@@ -0,0 +1,42 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H
#include <nnapi/IBuffer.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <utility>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
class InvalidBuffer final : public nn::IBuffer {
public:
nn::Request::MemoryDomainToken getToken() const override;
nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
nn::GeneralResult<void> copyFrom(const nn::Memory& src,
const nn::Dimensions& dimensions) const override;
};
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H

View File

@@ -0,0 +1,80 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H
#include <nnapi/IBuffer.h>
#include <nnapi/IDevice.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <string>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
class InvalidDevice final : public nn::IDevice {
public:
InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel,
nn::DeviceType type, std::vector<nn::Extension> extensions,
nn::Capabilities capabilities,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded);
const std::string& getName() const override;
const std::string& getVersionString() const override;
nn::Version getFeatureLevel() const override;
nn::DeviceType getType() const override;
const std::vector<nn::Extension>& getSupportedExtensions() const override;
const nn::Capabilities& getCapabilities() const override;
std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
nn::GeneralResult<void> wait() const override;
nn::GeneralResult<std::vector<bool>> getSupportedOperations(
const nn::Model& model) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
const std::vector<nn::BufferRole>& inputRoles,
const std::vector<nn::BufferRole>& outputRoles) const override;
private:
const std::string kName;
const std::string kVersionString;
const nn::Version kFeatureLevel;
const nn::DeviceType kType;
const std::vector<nn::Extension> kExtensions;
const nn::Capabilities kCapabilities;
const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
};
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H

View File

@@ -0,0 +1,48 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <utility>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
class InvalidPreparedModel final : public nn::IPreparedModel {
public:
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
const nn::OptionalTimeoutDuration& loopTimeoutDuration,
const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
std::any getUnderlyingResource() const override;
};
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H

View File

@@ -45,8 +45,9 @@ class ResilientDevice final : public nn::IDevice,
std::string versionString, std::vector<nn::Extension> extensions,
nn::Capabilities capabilities, nn::SharedDevice device);
nn::SharedDevice getDevice() const;
nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const;
nn::SharedDevice getDevice() const EXCLUDES(mMutex);
nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const
EXCLUDES(mMutex);
const std::string& getName() const override;
const std::string& getVersionString() const override;
@@ -78,6 +79,7 @@ class ResilientDevice final : public nn::IDevice,
const std::vector<nn::BufferRole>& outputRoles) const override;
private:
bool isValidInternal() const EXCLUDES(mMutex);
nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
nn::Priority priority, nn::OptionalTimePoint deadline,
@@ -100,6 +102,7 @@ class ResilientDevice final : public nn::IDevice,
const nn::Capabilities kCapabilities;
mutable std::mutex mMutex;
mutable nn::SharedDevice mDevice GUARDED_BY(mMutex);
mutable bool mIsValid GUARDED_BY(mMutex) = true;
};
} // namespace android::hardware::neuralnetworks::utils

View File

@@ -0,0 +1,42 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "InvalidBuffer.h"
#include <nnapi/IBuffer.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <utility>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
nn::Request::MemoryDomainToken InvalidBuffer::getToken() const {
return nn::Request::MemoryDomainToken{};
}
nn::GeneralResult<void> InvalidBuffer::copyTo(const nn::Memory& /*dst*/) const {
return NN_ERROR() << "InvalidBuffer";
}
nn::GeneralResult<void> InvalidBuffer::copyFrom(const nn::Memory& /*src*/,
const nn::Dimensions& /*dimensions*/) const {
return NN_ERROR() << "InvalidBuffer";
}
} // namespace android::hardware::neuralnetworks::utils

View File

@@ -0,0 +1,105 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "InvalidDevice.h"
#include "InvalidBuffer.h"
#include "InvalidPreparedModel.h"
#include <nnapi/IBuffer.h>
#include <nnapi/IDevice.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <string>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
InvalidDevice::InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel,
nn::DeviceType type, std::vector<nn::Extension> extensions,
nn::Capabilities capabilities,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded)
: kName(std::move(name)),
kVersionString(std::move(versionString)),
kFeatureLevel(featureLevel),
kType(type),
kExtensions(std::move(extensions)),
kCapabilities(std::move(capabilities)),
kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded) {}
const std::string& InvalidDevice::getName() const {
return kName;
}
const std::string& InvalidDevice::getVersionString() const {
return kVersionString;
}
nn::Version InvalidDevice::getFeatureLevel() const {
return kFeatureLevel;
}
nn::DeviceType InvalidDevice::getType() const {
return kType;
}
const std::vector<nn::Extension>& InvalidDevice::getSupportedExtensions() const {
return kExtensions;
}
const nn::Capabilities& InvalidDevice::getCapabilities() const {
return kCapabilities;
}
std::pair<uint32_t, uint32_t> InvalidDevice::getNumberOfCacheFilesNeeded() const {
return kNumberOfCacheFilesNeeded;
}
nn::GeneralResult<void> InvalidDevice::wait() const {
return NN_ERROR() << "InvalidDevice";
}
nn::GeneralResult<std::vector<bool>> InvalidDevice::getSupportedOperations(
const nn::Model& /*model*/) const {
return NN_ERROR() << "InvalidDevice";
}
nn::GeneralResult<nn::SharedPreparedModel> InvalidDevice::prepareModel(
const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/,
nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/,
const std::vector<nn::SharedHandle>& /*modelCache*/,
const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
return NN_ERROR() << "InvalidDevice";
}
nn::GeneralResult<nn::SharedPreparedModel> InvalidDevice::prepareModelFromCache(
nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
return NN_ERROR() << "InvalidDevice";
}
nn::GeneralResult<nn::SharedBuffer> InvalidDevice::allocate(
const nn::BufferDesc& /*desc*/,
const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
const std::vector<nn::BufferRole>& /*inputRoles*/,
const std::vector<nn::BufferRole>& /*outputRoles*/) const {
return NN_ERROR() << "InvalidDevice";
}
} // namespace android::hardware::neuralnetworks::utils

View File

@@ -0,0 +1,49 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "InvalidPreparedModel.h"
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <memory>
#include <utility>
#include <vector>
namespace android::hardware::neuralnetworks::utils {
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
const nn::OptionalTimePoint& /*deadline*/,
const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
return NN_ERROR() << "InvalidPreparedModel";
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
InvalidPreparedModel::executeFenced(
const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
return NN_ERROR() << "InvalidPreparedModel";
}
std::any InvalidPreparedModel::getUnderlyingResource() const {
return {};
}
} // namespace android::hardware::neuralnetworks::utils

View File

@@ -16,6 +16,9 @@
#include "ResilientDevice.h"
#include "InvalidBuffer.h"
#include "InvalidDevice.h"
#include "InvalidPreparedModel.h"
#include "ResilientBuffer.h"
#include "ResilientPreparedModel.h"
@@ -107,12 +110,21 @@ nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool
}
auto device = std::move(maybeDevice).value();
// TODO(b/173081926): Instead of CHECKing to ensure the cache has not been changed, return an
// invalid/"null" IDevice object that always fails.
CHECK_EQ(kName, device->getName());
CHECK_EQ(kVersionString, device->getVersionString());
CHECK(kExtensions == device->getSupportedExtensions());
CHECK_EQ(kCapabilities, device->getCapabilities());
// If recovered device has different metadata than what is cached (i.e., because it was
// updated), mark the device as invalid and preserve the cached data.
auto compare = [this, &device](auto fn) REQUIRES(mMutex) {
return std::invoke(fn, mDevice) != std::invoke(fn, device);
};
if (compare(&IDevice::getName) || compare(&IDevice::getVersionString) ||
compare(&IDevice::getFeatureLevel) || compare(&IDevice::getType) ||
compare(&IDevice::getSupportedExtensions) || compare(&IDevice::getCapabilities)) {
LOG(ERROR) << "Recovered device has different metadata than what is cached. Marking "
"IDevice object as invalid.";
device = std::make_shared<const InvalidDevice>(
kName, kVersionString, mDevice->getFeatureLevel(), mDevice->getType(), kExtensions,
kCapabilities, mDevice->getNumberOfCacheFilesNeeded());
mIsValid = false;
}
mDevice = std::move(device);
return mDevice;
@@ -199,11 +211,19 @@ nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocate(
return ResilientBuffer::create(std::move(makeBuffer));
}
bool ResilientDevice::isValidInternal() const {
std::lock_guard hold(mMutex);
return mIsValid;
}
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
nn::Priority priority, nn::OptionalTimePoint deadline,
const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
if (!isValidInternal()) {
return std::make_shared<const InvalidPreparedModel>();
}
const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
token](const nn::IDevice& device) {
return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
@@ -216,6 +236,9 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCach
bool blocking, nn::OptionalTimePoint deadline,
const std::vector<nn::SharedHandle>& modelCache,
const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
if (!isValidInternal()) {
return std::make_shared<const InvalidPreparedModel>();
}
const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
};
@@ -227,6 +250,9 @@ nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocateInternal(
const std::vector<nn::SharedPreparedModel>& preparedModels,
const std::vector<nn::BufferRole>& inputRoles,
const std::vector<nn::BufferRole>& outputRoles) const {
if (!isValidInternal()) {
return std::make_shared<const InvalidBuffer>();
}
const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) {
return device.allocate(desc, preparedModels, inputRoles, outputRoles);
};