From 376005883c5981f1255e52e9a4ed417ab81fce58 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Thu, 19 Nov 2020 20:46:58 -0800 Subject: [PATCH] Invalidate NN interface objects on cache mismatch Currently, if an IDevice object is a DEAD_OBJECT, the runtime attempts to re-retrieve the handle to the rebooted IDevice service. If an update occurs after the IDevice was originally created, the rebooted IDevice object may have different metadata and behavior. This is problematic because the original metadata is cached in the runtime. Further, an application might have made decisions based on that metadata and behavior. (Note that a driver service that is functionally the same but has a different underlying implementation such as having more optimized code will have different `getVersionString` metadata.) Instead, this CL invalidates the IDevice object on cache mismatch, and always returns an error if it is used. Bug: 173081926 Test: mma Change-Id: I805987361c627c32d45e1b7c7aed230376fc66ad Merged-In: I805987361c627c32d45e1b7c7aed230376fc66ad (cherry picked from commit 5a74c0fb0f23474a89471c49111e5ab526735392) --- .../common/include/nnapi/hal/InvalidBuffer.h | 42 +++++++ .../common/include/nnapi/hal/InvalidDevice.h | 80 +++++++++++++ .../include/nnapi/hal/InvalidPreparedModel.h | 48 ++++++++ .../include/nnapi/hal/ResilientDevice.h | 7 +- .../utils/common/src/InvalidBuffer.cpp | 42 +++++++ .../utils/common/src/InvalidDevice.cpp | 105 ++++++++++++++++++ .../utils/common/src/InvalidPreparedModel.cpp | 49 ++++++++ .../utils/common/src/ResilientDevice.cpp | 38 ++++++- 8 files changed, 403 insertions(+), 8 deletions(-) create mode 100644 neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h create mode 100644 neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h create mode 100644 neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h create mode 100644 neuralnetworks/utils/common/src/InvalidBuffer.cpp create mode 100644 neuralnetworks/utils/common/src/InvalidDevice.cpp create mode 100644 neuralnetworks/utils/common/src/InvalidPreparedModel.cpp diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h new file mode 100644 index 0000000000..8c04b8887b --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H + +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class InvalidBuffer final : public nn::IBuffer { + public: + nn::Request::MemoryDomainToken getToken() const override; + + nn::GeneralResult copyTo(const nn::Memory& dst) const override; + + nn::GeneralResult copyFrom(const nn::Memory& src, + const nn::Dimensions& dimensions) const override; +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h new file mode 100644 index 0000000000..5e62b9ae0b --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class InvalidDevice final : public nn::IDevice { + public: + InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel, + nn::DeviceType type, std::vector extensions, + nn::Capabilities capabilities, + std::pair numberOfCacheFilesNeeded); + + const std::string& getName() const override; + const std::string& getVersionString() const override; + nn::Version getFeatureLevel() const override; + nn::DeviceType getType() const override; + const std::vector& getSupportedExtensions() const override; + const nn::Capabilities& getCapabilities() const override; + std::pair getNumberOfCacheFilesNeeded() const override; + + nn::GeneralResult wait() const override; + + nn::GeneralResult> getSupportedOperations( + const nn::Model& model) const override; + + nn::GeneralResult prepareModel( + const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult prepareModelFromCache( + nn::OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, + const nn::CacheToken& token) const override; + + nn::GeneralResult allocate( + const nn::BufferDesc& desc, const std::vector& preparedModels, + const std::vector& inputRoles, + const std::vector& outputRoles) const override; + + private: + const std::string kName; + const std::string kVersionString; + const nn::Version kFeatureLevel; + const nn::DeviceType kType; + const std::vector kExtensions; + const nn::Capabilities kCapabilities; + const std::pair kNumberOfCacheFilesNeeded; +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h new file mode 100644 index 0000000000..4b32b4e3af --- /dev/null +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H +#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H + +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +class InvalidPreparedModel final : public nn::IPreparedModel { + public: + nn::ExecutionResult, nn::Timing>> execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override; + + nn::GeneralResult> executeFenced( + const nn::Request& request, const std::vector& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalTimeoutDuration& loopTimeoutDuration, + const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + + std::any getUnderlyingResource() const override; +}; + +} // namespace android::hardware::neuralnetworks::utils + +#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h index 4a84e4dacc..4bfed6cd51 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h @@ -45,8 +45,9 @@ class ResilientDevice final : public nn::IDevice, std::string versionString, std::vector extensions, nn::Capabilities capabilities, nn::SharedDevice device); - nn::SharedDevice getDevice() const; - nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const; + nn::SharedDevice getDevice() const EXCLUDES(mMutex); + nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const + EXCLUDES(mMutex); const std::string& getName() const override; const std::string& getVersionString() const override; @@ -78,6 +79,7 @@ class ResilientDevice final : public nn::IDevice, const std::vector& outputRoles) const override; private: + bool isValidInternal() const EXCLUDES(mMutex); nn::GeneralResult prepareModelInternal( bool blocking, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, @@ -100,6 +102,7 @@ class ResilientDevice final : public nn::IDevice, const nn::Capabilities kCapabilities; mutable std::mutex mMutex; mutable nn::SharedDevice mDevice GUARDED_BY(mMutex); + mutable bool mIsValid GUARDED_BY(mMutex) = true; }; } // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/InvalidBuffer.cpp b/neuralnetworks/utils/common/src/InvalidBuffer.cpp new file mode 100644 index 0000000000..c6f75d7137 --- /dev/null +++ b/neuralnetworks/utils/common/src/InvalidBuffer.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "InvalidBuffer.h" + +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +nn::Request::MemoryDomainToken InvalidBuffer::getToken() const { + return nn::Request::MemoryDomainToken{}; +} + +nn::GeneralResult InvalidBuffer::copyTo(const nn::Memory& /*dst*/) const { + return NN_ERROR() << "InvalidBuffer"; +} + +nn::GeneralResult InvalidBuffer::copyFrom(const nn::Memory& /*src*/, + const nn::Dimensions& /*dimensions*/) const { + return NN_ERROR() << "InvalidBuffer"; +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp new file mode 100644 index 0000000000..535ccb41c7 --- /dev/null +++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "InvalidDevice.h" + +#include "InvalidBuffer.h" +#include "InvalidPreparedModel.h" + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +InvalidDevice::InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel, + nn::DeviceType type, std::vector extensions, + nn::Capabilities capabilities, + std::pair numberOfCacheFilesNeeded) + : kName(std::move(name)), + kVersionString(std::move(versionString)), + kFeatureLevel(featureLevel), + kType(type), + kExtensions(std::move(extensions)), + kCapabilities(std::move(capabilities)), + kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded) {} + +const std::string& InvalidDevice::getName() const { + return kName; +} + +const std::string& InvalidDevice::getVersionString() const { + return kVersionString; +} + +nn::Version InvalidDevice::getFeatureLevel() const { + return kFeatureLevel; +} + +nn::DeviceType InvalidDevice::getType() const { + return kType; +} + +const std::vector& InvalidDevice::getSupportedExtensions() const { + return kExtensions; +} + +const nn::Capabilities& InvalidDevice::getCapabilities() const { + return kCapabilities; +} + +std::pair InvalidDevice::getNumberOfCacheFilesNeeded() const { + return kNumberOfCacheFilesNeeded; +} + +nn::GeneralResult InvalidDevice::wait() const { + return NN_ERROR() << "InvalidDevice"; +} + +nn::GeneralResult> InvalidDevice::getSupportedOperations( + const nn::Model& /*model*/) const { + return NN_ERROR() << "InvalidDevice"; +} + +nn::GeneralResult InvalidDevice::prepareModel( + const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/, + nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, + const std::vector& /*modelCache*/, + const std::vector& /*dataCache*/, const nn::CacheToken& /*token*/) const { + return NN_ERROR() << "InvalidDevice"; +} + +nn::GeneralResult InvalidDevice::prepareModelFromCache( + nn::OptionalTimePoint /*deadline*/, const std::vector& /*modelCache*/, + const std::vector& /*dataCache*/, const nn::CacheToken& /*token*/) const { + return NN_ERROR() << "InvalidDevice"; +} + +nn::GeneralResult InvalidDevice::allocate( + const nn::BufferDesc& /*desc*/, + const std::vector& /*preparedModels*/, + const std::vector& /*inputRoles*/, + const std::vector& /*outputRoles*/) const { + return NN_ERROR() << "InvalidDevice"; +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp new file mode 100644 index 0000000000..9ae7a63949 --- /dev/null +++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "InvalidPreparedModel.h" + +#include +#include +#include + +#include +#include +#include + +namespace android::hardware::neuralnetworks::utils { + +nn::ExecutionResult, nn::Timing>> +InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const { + return NN_ERROR() << "InvalidPreparedModel"; +} + +nn::GeneralResult> +InvalidPreparedModel::executeFenced( + const nn::Request& /*request*/, const std::vector& /*waitFor*/, + nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/, + const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const { + return NN_ERROR() << "InvalidPreparedModel"; +} + +std::any InvalidPreparedModel::getUnderlyingResource() const { + return {}; +} + +} // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp index 26025a5026..2f83c5c5bd 100644 --- a/neuralnetworks/utils/common/src/ResilientDevice.cpp +++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp @@ -16,6 +16,9 @@ #include "ResilientDevice.h" +#include "InvalidBuffer.h" +#include "InvalidDevice.h" +#include "InvalidPreparedModel.h" #include "ResilientBuffer.h" #include "ResilientPreparedModel.h" @@ -107,12 +110,21 @@ nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool } auto device = std::move(maybeDevice).value(); - // TODO(b/173081926): Instead of CHECKing to ensure the cache has not been changed, return an - // invalid/"null" IDevice object that always fails. - CHECK_EQ(kName, device->getName()); - CHECK_EQ(kVersionString, device->getVersionString()); - CHECK(kExtensions == device->getSupportedExtensions()); - CHECK_EQ(kCapabilities, device->getCapabilities()); + // If recovered device has different metadata than what is cached (i.e., because it was + // updated), mark the device as invalid and preserve the cached data. + auto compare = [this, &device](auto fn) REQUIRES(mMutex) { + return std::invoke(fn, mDevice) != std::invoke(fn, device); + }; + if (compare(&IDevice::getName) || compare(&IDevice::getVersionString) || + compare(&IDevice::getFeatureLevel) || compare(&IDevice::getType) || + compare(&IDevice::getSupportedExtensions) || compare(&IDevice::getCapabilities)) { + LOG(ERROR) << "Recovered device has different metadata than what is cached. Marking " + "IDevice object as invalid."; + device = std::make_shared( + kName, kVersionString, mDevice->getFeatureLevel(), mDevice->getType(), kExtensions, + kCapabilities, mDevice->getNumberOfCacheFilesNeeded()); + mIsValid = false; + } mDevice = std::move(device); return mDevice; @@ -199,11 +211,19 @@ nn::GeneralResult ResilientDevice::allocate( return ResilientBuffer::create(std::move(makeBuffer)); } +bool ResilientDevice::isValidInternal() const { + std::lock_guard hold(mMutex); + return mIsValid; +} + nn::GeneralResult ResilientDevice::prepareModelInternal( bool blocking, const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { + if (!isValidInternal()) { + return std::make_shared(); + } const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache, token](const nn::IDevice& device) { return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache, @@ -216,6 +236,9 @@ nn::GeneralResult ResilientDevice::prepareModelFromCach bool blocking, nn::OptionalTimePoint deadline, const std::vector& modelCache, const std::vector& dataCache, const nn::CacheToken& token) const { + if (!isValidInternal()) { + return std::make_shared(); + } const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) { return device.prepareModelFromCache(deadline, modelCache, dataCache, token); }; @@ -227,6 +250,9 @@ nn::GeneralResult ResilientDevice::allocateInternal( const std::vector& preparedModels, const std::vector& inputRoles, const std::vector& outputRoles) const { + if (!isValidInternal()) { + return std::make_shared(); + } const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) { return device.allocate(desc, preparedModels, inputRoles, outputRoles); };