diff --git a/current.txt b/current.txt index 88d2e1132a..23b019e8a5 100644 --- a/current.txt +++ b/current.txt @@ -508,11 +508,11 @@ b9422a9aca84df1ff9623dc12c0562abce97716e28d63a965f2bfb88f9ad9607 android.hardwar 4cb139f729c29d8d6f4ecdab149c4feb571dad8a06e56cd57fcb52e70208bab4 android.hardware.media.c2@1.0::types 4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback 19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext -96249c852dabeefa3a9496ecdfc44681a071c665bfbf88527bf775c88bf1ab1b android.hardware.neuralnetworks@1.2::IDevice +363821d1b71147b896a08e2a570946db9b9d46f90d9f91b085bd8d3013a2b4d5 android.hardware.neuralnetworks@1.2::IDevice 92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback -83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel +36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback -2ef1bab554ea484523b396e48033117dbbefc2f90269f9e7e3eb5a58ba50bfb9 android.hardware.neuralnetworks@1.2::types +39a6d7cf9bc7290bd90739e971ccad5f35f5cc0faea4a417b59f22c9ca9f1f2a android.hardware.neuralnetworks@1.2::types cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types 4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index f5cb0d7cf5..106f33279d 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -52,6 +52,7 @@ using ::test_helper::for_each; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; using ::test_helper::resize_accordingly; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; template void copy_back_(std::map>* dst, const std::vector& ra, @@ -540,7 +541,8 @@ void PrepareModel(const sp& device, const V1_2::Model& model, sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel_1_2( - model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal index b9fa38870e..da9a966ba1 100644 --- a/neuralnetworks/1.2/IDevice.hal +++ b/neuralnetworks/1.2/IDevice.hal @@ -113,44 +113,83 @@ interface IDevice extends @1.1::IDevice { generates (ErrorStatus status, vec supportedOperations); /** - * Gets whether the driver supports compilation caching. + * Gets the caching requirements of the driver implementation. * - * isCachingSupported indicates whether the driver supports compilation caching. - * Even if so, the driver may still choose not to cache certain compiled models. + * There are two types of cache file descriptors provided to the driver: model cache + * and data cache. * - * If the device reports the caching is not supported, the user may avoid calling - * IDevice::prepareModelFromCache and IPreparedModel::saveToCache. + * The data cache is for caching constant data, possibly including preprocessed + * and transformed tensor buffers. Any modification to the data cache should + * have no worse effect than generating bad output values at execution time. + * + * The model cache is for caching security-sensitive data such as compiled + * executable machine code in the device's native binary format. A modification + * to the model cache may affect the driver's execution behavior, and a malicious + * client could make use of this to execute beyond the granted permission. Thus, + * the driver must always check whether the model cache is corrupted before + * preparing the model from cache. + * + * getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver + * implementation needs to cache a single prepared model. Returning 0 for both types + * indicates compilation caching is not supported by this driver. The driver may + * still choose not to cache certain compiled models even if it reports that caching + * is supported. + * + * If the device reports that caching is not supported, the user may avoid calling + * IDevice::prepareModelFromCache or providing cache file descriptors to + * IDevice::prepareModel_1_2. * * @return status Error status of the call, must be: * - NONE if successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error - * @return supported A boolean indicating whether the driver supports compilation - * caching. Even on returning true, the driver may still choose - * not to cache certain compiled models. + * @return numModelCache An unsigned integer indicating how many files for model cache + * the driver needs to cache a single prepared model. It must + * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES. + * @return numDataCache An unsigned integer indicating how many files for data cache + * the driver needs to cache a single prepared model. It must + * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES. */ - isCachingSupported() generates (ErrorStatus status, bool supported); + getNumberOfCacheFilesNeeded() + generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache); /** - * Creates a prepared model for execution. + * Asynchronously creates a prepared model for execution and optionally saves it + * into cache files. * - * prepareModel is used to make any necessary transformations or alternative + * prepareModel is used to make any necessary transformations to or alternative * representations to a model for execution, possibly including * transformations on the constant data, optimization on the model's graph, * or compilation into the device's native binary format. The model itself * is not changed. * + * Optionally, caching information may be provided for the driver to save + * the prepared model to cache files for faster model compilation time + * when the same model preparation is requested in the future. There are + * two types of cache file handles provided to the driver: model cache + * and data cache. For more information on the two types of cache handles, + * refer to getNumberOfCacheFilesNeeded. + * + * The file descriptors must be opened with read and write permission. A file may + * have any size, and the corresponding file descriptor may have any offset. The + * driver must truncate a file to zero size before writing to that file. The file + * descriptors may be closed by the client once the asynchronous preparation has + * finished. The driver must dup a file descriptor if it wants to get access to + * the cache file later. + * * The model is prepared asynchronously with respect to the caller. The - * prepareModel function must verify the inputs to the prepareModel function - * are correct. If there is an error, prepareModel must immediately invoke + * prepareModel function must verify the inputs to the preparedModel function + * related to preparing the model (as opposed to saving the prepared model to + * cache) are correct. If there is an error, prepareModel must immediately invoke * the callback with the appropriate ErrorStatus value and nullptr for the - * IPreparedModel, then return with the same ErrorStatus. If the inputs to - * the prepareModel function are valid and there is no error, prepareModel - * must launch an asynchronous task to prepare the model in the background, - * and immediately return from prepareModel with ErrorStatus::NONE. If the - * asynchronous task fails to launch, prepareModel must immediately invoke - * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the - * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE. + * IPreparedModel, then return with the same ErrorStatus. If the inputs to the + * prepareModel function that are related to preparing the model are valid and + * there is no error, prepareModel must launch an asynchronous task + * to prepare the model in the background, and immediately return from + * prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch, + * prepareModel must immediately invoke the callback with + * ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return + * with ErrorStatus::GENERAL_FAILURE. * * When the asynchronous task has finished preparing the model, it must * immediately invoke the callback function provided as an input to @@ -160,6 +199,14 @@ interface IDevice extends @1.1::IDevice { * the callback object must be invoked with the appropriate ErrorStatus * value and nullptr for the IPreparedModel. * + * Optionally, the driver may save the prepared model to cache during the + * asynchronous preparation. Any error that occurs when saving to cache must + * not affect the status of preparing the model. Even if the input arguments + * related to the cache may be invalid, or the driver may fail to save to cache, + * the prepareModel function must finish preparing the model. The driver + * may choose not to save to cache even if the caching information is + * provided and valid. + * * The only information that may be unknown to the model at this stage is * the shape of the tensors, which may only be known at execution time. As * such, some driver services may return partially prepared models, where @@ -173,6 +220,26 @@ interface IDevice extends @1.1::IDevice { * @param model The model to be prepared for execution. * @param preference Indicates the intended execution behavior of a prepared * model. + * @param modelCache A vector of handles with each entry holding exactly one + * cache file descriptor for the security-sensitive cache. The length of + * the vector must either be 0 indicating that caching information is not provided, + * or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache + * handles will be provided in the same order when retrieving the + * preparedModel from cache files with prepareModelFromCache. + * @param dataCache A vector of handles with each entry holding exactly one + * cache file descriptor for the constants' cache. The length of + * the vector must either be 0 indicating that caching information is not provided, + * or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache + * handles will be provided in the same order when retrieving the + * preparedModel from cache files with prepareModelFromCache. + * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN + * identifying the prepared model. The same token will be provided when retrieving + * the prepared model from the cache files with prepareModelFromCache. + * Tokens should be chosen to have a low rate of collision for a particular + * application. The driver cannot detect a collision; a collision will result + * in a failed execution or in a successful execution that produces incorrect + * output values. If both modelCache and dataCache are empty indicating that + * caching information is not provided, this token must be ignored. * @param callback A callback object used to return the error status of * preparing the model for execution and the prepared model if * successful, nullptr otherwise. The callback object's notify function @@ -182,9 +249,12 @@ interface IDevice extends @1.1::IDevice { * - NONE if preparation task is successfully launched * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if one of the input arguments is invalid + * - INVALID_ARGUMENT if one of the input arguments related to preparing the + * model is invalid */ prepareModel_1_2(Model model, ExecutionPreference preference, + vec modelCache, vec dataCache, + uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token, IPreparedModelCallback callback) generates (ErrorStatus status); @@ -192,22 +262,17 @@ interface IDevice extends @1.1::IDevice { * Creates a prepared model from cache files for execution. * * prepareModelFromCache is used to retrieve a prepared model directly from - * cache files to avoid slow model compilation time. There are exactly two - * cache file descriptors provided to the driver: modelCache and dataCache. + * cache files to avoid slow model compilation time. There are + * two types of cache file handles provided to the driver: model cache + * and data cache. For more information on the two types of cache handles, + * refer to getNumberOfCacheFilesNeeded. * - * The dataCache is for caching constant data, possibly including preprocessed - * and transformed tensor buffers. Any modification to the dataCache should - * have no worse effect than generating bad output values at execution time. - * - * The modelCache is for caching security-sensitive data such as compiled - * executable machine code in the device's native binary format. A modification - * to the modelCache may affect the driver's execution behavior, and a malicious - * client could make use of this to execute beyond the granted permission. Thus, - * the driver must always check whether the modelCache is corrupted before preparing - * the model from cache. - * - * The two file descriptors may be closed by the client once the asynchronous - * preparation has finished. The driver has to copy all the data it needs. + * The file descriptors must be opened with read and write permission. A file may + * have any size, and the corresponding file descriptor may have any offset. The + * driver must truncate a file to zero size before writing to that file. The file + * descriptors may be closed by the client once the asynchronous preparation has + * finished. The driver must dup a file descriptor if it wants to get access to + * the cache file later. * * The model is prepared asynchronously with respect to the caller. The * prepareModelFromCache function must verify the inputs to the @@ -241,13 +306,17 @@ interface IDevice extends @1.1::IDevice { * used with different shapes of inputs on different (possibly concurrent) * executions. * - * @param modelCache A handle holding exactly one cache file descriptor for the - * security-sensitive cache. - * @param dataCache A handle holding exactly one cache file descriptor for the - * constants' cache. + * @param modelCache A vector of handles with each entry holding exactly one + * cache file descriptor for the security-sensitive cache. The length of + * the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded. + * The cache handles will be provided in the same order as with prepareModel_1_2. + * @param dataCache A vector of handles with each entry holding exactly one + * cache file descriptor for the constants' cache. The length of the vector + * must match the numDataCache returned from getNumberOfCacheFilesNeeded. + * The cache handles will be provided in the same order as with prepareModel_1_2. * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN * identifying the prepared model. It is the same token provided when saving - * the cache files with IPreparedModel::saveToCache. Tokens should be chosen + * the cache files with prepareModel_1_2. Tokens should be chosen * to have a low rate of collision for a particular application. The driver * cannot detect a collision; a collision will result in a failed execution * or in a successful execution that produces incorrect output values. @@ -263,7 +332,7 @@ interface IDevice extends @1.1::IDevice { * unspecified error * - INVALID_ARGUMENT if one of the input arguments is invalid */ - prepareModelFromCache(handle modelCache, handle dataCache, + prepareModelFromCache(vec modelCache, vec dataCache, uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token, IPreparedModelCallback callback) generates (ErrorStatus status); diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal index 757d5f1467..5d2d80ff71 100644 --- a/neuralnetworks/1.2/IPreparedModel.hal +++ b/neuralnetworks/1.2/IPreparedModel.hal @@ -157,62 +157,4 @@ interface IPreparedModel extends @1.0::IPreparedModel { fmq_sync requestChannel, fmq_sync resultChannel) generates (ErrorStatus status, IBurstContext context); - - /* - * Saves the prepared model to cache files. - * - * saveToCache is used to save a prepared model to cache files for faster - * model compilation time when the same model preparation is requested in - * the future. There are exactly two cache file descriptors provided to the - * driver: modelCache and dataCache. - * - * The dataCache is for caching constant data, possibly including preprocessed - * and transformed tensor buffers. Any modification to the dataCache should - * have no worse effect than generating bad output values at execution time. - * - * The modelCache is for caching security-sensitive data such as compiled - * executable machine code in the device's native binary format. A modification - * to the modelCache may affect the driver's execution behavior, and a malicious - * client could make use of this to execute beyond the granted permission. Thus, - * the driver must always check whether the modelCache is corrupted before preparing - * the model from cache. - * - * The two file descriptors must point to two zero-length files with offset - * positioned at the beginning of the file. The file descriptors may be closed - * by the client once the method has returned. - * - * If the driver decides not to save the prepared model without looking at the - * input arguments to the saveToCache function, saveToCache must return with - * ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify - * the input arguments to the saveToCache function are valid, and return with - * ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver - * could not save the prepared model, saveToCache must return with the appropriate - * ErrorStatus. Otherwise, it must write the cache files and return - * ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents - * of the cache files are undefined. - * - * @param modelCache A handle holding exactly one cache file descriptor for the - * security-sensitive cache. - * @param dataCache A handle holding exactly one cache file descriptor for the - * constants' cache. - * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN - * identifying the prepared model. The same token will be provided - * when retrieving the prepared model from cache files with - * IDevice::prepareModelFromCache. Tokens should be chosen to have - * a low rate of collision for a particular application. The driver - * cannot detect a collision; a collision will result in a failed - * execution or in a successful execution that produces incorrect - * output values. - * @return status Error status of saveToCache, must be: - * - NONE if saveToCache is performed successfully - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if the driver could not save the - * prepared model or if there is an unspecified error - * - INVALID_ARGUMENT if one of the input arguments is invalid, - * unless the driver decides not to save the prepared model - * without looking at the input arguments - */ - saveToCache(handle modelCache, handle dataCache, - uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token) - generates (ErrorStatus status); }; diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal index b27dc86088..28d01192da 100644 --- a/neuralnetworks/1.2/types.hal +++ b/neuralnetworks/1.2/types.hal @@ -30,6 +30,11 @@ enum Constant : uint32_t { * The byte size of the cache token. */ BYTE_SIZE_OF_CACHE_TOKEN = 32, + + /** + * The maximum number of files for each type of cache in compilation caching. + */ + MAX_NUMBER_OF_CACHE_FILES = 32, }; enum OperandType : @1.0::OperandType { diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp index 365a750fdb..6fb16c2033 100644 --- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp @@ -77,10 +77,15 @@ TEST_F(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) { EXPECT_TRUE(ret.isOk()); } -// isCachingSupported test -TEST_F(NeuralnetworksHidlTest, IsCachingSupported) { - Return ret = device->isCachingSupported( - [](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); }); +// getNumberOfCacheFilesNeeded test +TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) { + Return ret = device->getNumberOfCacheFilesNeeded( + [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { + EXPECT_EQ(ErrorStatus::NONE, status); + EXPECT_LE(numModelCache, + static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); + EXPECT_LE(numDataCache, static_cast(Constant::MAX_NUMBER_OF_CACHE_FILES)); + }); EXPECT_TRUE(ret.isOk()); } } // namespace functional diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp index 00989e5bdc..167fc096ce 100644 --- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp @@ -54,29 +54,39 @@ namespace { [[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape; [[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape; -enum class AccessMode { READ_ONLY, WRITE_ONLY }; +enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY }; -void createCacheHandle(const std::vector& files, AccessMode mode, - hidl_handle* handle) { - std::vector fds; - for (const auto& file : files) { - int fd; - if (mode == AccessMode::READ_ONLY) { - fd = open(file.c_str(), O_RDONLY); - } else if (mode == AccessMode::WRITE_ONLY) { - fd = open(file.c_str(), O_WRONLY | O_TRUNC | O_CREAT, S_IRUSR | S_IWUSR); - } else { - FAIL(); +// Creates cache handles based on provided file groups. +// The outer vector corresponds to handles and the inner vector is for fds held by each handle. +void createCacheHandles(const std::vector>& fileGroups, + const std::vector& mode, hidl_vec* handles) { + handles->resize(fileGroups.size()); + for (uint32_t i = 0; i < fileGroups.size(); i++) { + std::vector fds; + for (const auto& file : fileGroups[i]) { + int fd; + if (mode[i] == AccessMode::READ_ONLY) { + fd = open(file.c_str(), O_RDONLY); + } else if (mode[i] == AccessMode::WRITE_ONLY) { + fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); + } else if (mode[i] == AccessMode::READ_WRITE) { + fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + } else { + FAIL(); + } + ASSERT_GE(fd, 0); + fds.push_back(fd); } - ASSERT_GE(fd, 0); - fds.push_back(fd); + native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0); + ASSERT_NE(cacheNativeHandle, nullptr); + std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]); + (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true); } - native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0); - ASSERT_NE(cacheNativeHandle, nullptr); - for (uint32_t i = 0; i < fds.size(); i++) { - cacheNativeHandle->data[i] = fds[i]; - } - handle->setTo(cacheNativeHandle, /*shouldOwn=*/true); +} + +void createCacheHandles(const std::vector>& fileGroups, AccessMode mode, + hidl_vec* handles) { + createCacheHandles(fileGroups, std::vector(fileGroups.size(), mode), handles); } } // namespace @@ -88,38 +98,43 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { NeuralnetworksHidlTest::SetUp(); ASSERT_NE(device.get(), nullptr); - // Create cache directory. The cache directory and cache files are always created to test - // the behavior of prepareModelFromCache, even when caching is not supported. + // Create cache directory. The cache directory and a temporary cache file is always created + // to test the behavior of prepareModelFromCache, even when caching is not supported. char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX"; char* cacheDir = mkdtemp(cacheDirTemp); ASSERT_NE(cacheDir, nullptr); mCacheDir = cacheDir; + mCacheDir.push_back('/'); - // Create empty cache files. - mCache1 = mCacheDir + "/cache1"; - mCache2 = mCacheDir + "/cache2"; - mCache3 = mCacheDir + "/cache3"; - // A dummy handle, use AccessMode::WRITE_ONLY for createCacheHandle to create files. - hidl_handle handle; - createCacheHandle({mCache1, mCache2, mCache3}, AccessMode::WRITE_ONLY, &handle); - - // Check if caching is supported. - bool isCachingSupported; - Return ret = device->isCachingSupported( - [&isCachingSupported](ErrorStatus status, bool supported) { + Return ret = device->getNumberOfCacheFilesNeeded( + [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) { EXPECT_EQ(ErrorStatus::NONE, status); - isCachingSupported = supported; + mNumModelCache = numModelCache; + mNumDataCache = numDataCache; }); EXPECT_TRUE(ret.isOk()); - if (isCachingSupported) { - mIsCachingSupported = true; - } else { + mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0; + + // Create empty cache files. + mTmpCache = mCacheDir + "tmp"; + for (uint32_t i = 0; i < mNumModelCache; i++) { + mModelCache.push_back({mCacheDir + "model" + std::to_string(i)}); + } + for (uint32_t i = 0; i < mNumDataCache; i++) { + mDataCache.push_back({mCacheDir + "data" + std::to_string(i)}); + } + // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files. + hidl_vec modelHandle, dataHandle, tmpHandle; + createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle); + createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle); + createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle); + + if (!mIsCachingSupported) { LOG(INFO) << "NN VTS: Early termination of test because vendor service does not " "support compilation caching."; std::cout << "[ ] Early termination of test because vendor service does not " "support compilation caching." << std::endl; - mIsCachingSupported = false; } } @@ -127,22 +142,49 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { // The tmp directory is only removed when the driver reports caching not supported, // otherwise it is kept for debugging purpose. if (!mIsCachingSupported) { - remove(mCache1.c_str()); - remove(mCache2.c_str()); - remove(mCache3.c_str()); + remove(mTmpCache.c_str()); rmdir(mCacheDir.c_str()); } NeuralnetworksHidlTest::TearDown(); } - void saveModelToCache(sp preparedModel, const hidl_handle& cache1, - const hidl_handle& cache2, ErrorStatus* status) { - // Save IPreparedModel to cache. + void saveModelToCache(const V1_2::Model& model, const hidl_vec& modelCache, + const hidl_vec& dataCache, bool* supported, + sp* preparedModel = nullptr) { + if (preparedModel != nullptr) *preparedModel = nullptr; + + // See if service can handle model. + bool fullySupportsModel = false; + Return supportedCall = device->getSupportedOperations_1_2( + model, + [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_EQ(supported.size(), model.operations.size()); + fullySupportsModel = std::all_of(supported.begin(), supported.end(), + [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + *supported = fullySupportsModel; + if (!fullySupportsModel) return; + + // Launch prepare model. + sp preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); hidl_array cacheToken(mToken); - Return saveToCacheStatus = - preparedModel->saveToCache(cache1, cache2, cacheToken); - ASSERT_TRUE(saveToCacheStatus.isOk()); - *status = static_cast(saveToCacheStatus); + Return prepareLaunchStatus = + device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache, + dataCache, cacheToken, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(static_cast(prepareLaunchStatus), ErrorStatus::NONE); + + // Retrieve prepared model. + preparedModelCallback->wait(); + ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE); + if (preparedModel != nullptr) { + *preparedModel = + V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) + .withDefault(nullptr); + } } bool checkEarlyTermination(ErrorStatus status) { @@ -157,14 +199,27 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { return false; } - void prepareModelFromCache(const hidl_handle& cache1, const hidl_handle& cache2, + bool checkEarlyTermination(bool supported) { + if (!supported) { + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return true; + } + return false; + } + + void prepareModelFromCache(const hidl_vec& modelCache, + const hidl_vec& dataCache, sp* preparedModel, ErrorStatus* status) { // Launch prepare model from cache. sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); hidl_array cacheToken(mToken); - Return prepareLaunchStatus = - device->prepareModelFromCache(cache1, cache2, cacheToken, preparedModelCallback); + Return prepareLaunchStatus = device->prepareModelFromCache( + modelCache, dataCache, cacheToken, preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); if (static_cast(prepareLaunchStatus) != ErrorStatus::NONE) { *preparedModel = nullptr; @@ -179,49 +234,54 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { .withDefault(nullptr); } + // Absolute path to the temporary cache directory. std::string mCacheDir; - std::string mCache1; - std::string mCache2; - std::string mCache3; + + // Groups of file paths for model and data cache in the tmp cache directory, initialized with + // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles + // and the inner vector is for fds held by each handle. + std::vector> mModelCache; + std::vector> mDataCache; + + // A separate temporary file path in the tmp cache directory. + std::string mTmpCache; + uint8_t mToken[static_cast(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {}; - bool mIsCachingSupported; + uint32_t mNumModelCache; + uint32_t mNumDataCache; + uint32_t mIsCachingSupported; }; TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) { // Create test HIDL model and compile. Model testModel = createTestModel(); sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (!mIsCachingSupported) { - EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } else { - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); - } + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } // Retrieve preparedModel from cache. { preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (!mIsCachingSupported) { ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); return; + } else if (checkEarlyTermination(status)) { + ASSERT_EQ(preparedModel, nullptr); + return; } else { ASSERT_EQ(status, ErrorStatus::NONE); ASSERT_NE(preparedModel, nullptr); @@ -238,41 +298,54 @@ TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { // Create test HIDL model and compile. Model testModel = createTestModel(); sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (!mIsCachingSupported) { - EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } else { - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + uint8_t dummyBytes[] = {0, 0}; + // Write a dummy integer to the cache. + // The driver should be able to handle non-empty cache and non-zero fd offset. + for (uint32_t i = 0; i < modelCache.size(); i++) { + ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes, + sizeof(dummyBytes)), + sizeof(dummyBytes)); } + for (uint32_t i = 0; i < dataCache.size(); i++) { + ASSERT_EQ( + write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)), + sizeof(dummyBytes)); + } + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } // Retrieve preparedModel from cache. { preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); uint8_t dummyByte = 0; - // Advance offset by one byte. - ASSERT_GE(read(cache1.getNativeHandle()->data[0], &dummyByte, 1), 0); - ASSERT_GE(read(cache2.getNativeHandle()->data[0], &dummyByte, 1), 0); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + // Advance the offset of each handle by one byte. + // The driver should be able to handle non-zero fd offset. + for (uint32_t i = 0; i < modelCache.size(); i++) { + ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0); + } + for (uint32_t i = 0; i < dataCache.size(); i++) { + ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0); + } + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (!mIsCachingSupported) { ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); return; + } else if (checkEarlyTermination(status)) { + ASSERT_EQ(preparedModel, nullptr); + return; } else { ASSERT_EQ(status, ErrorStatus::NONE); ASSERT_NE(preparedModel, nullptr); @@ -285,234 +358,512 @@ TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) { /*testDynamicOutputShape=*/false); } +TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) { + // Create test HIDL model and compile. + Model testModel = createTestModel(); + + // Test with number of model cache files greater than mNumModelCache. + { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an additional cache file for model cache. + mModelCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of model cache files smaller than mNumModelCache. + if (mModelCache.size() > 0) { + bool supported; + hidl_vec modelCache, dataCache; + // Pop out the last cache file. + auto tmp = mModelCache.back(); + mModelCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files greater than mNumDataCache. + { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an additional cache file for data cache. + mDataCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files smaller than mNumDataCache. + if (mDataCache.size() > 0) { + bool supported; + hidl_vec modelCache, dataCache; + // Pop out the last cache file. + auto tmp = mDataCache.back(); + mDataCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + +TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) { + // Create test HIDL model and compile. + Model testModel = createTestModel(); + + // Save the compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Test with number of model cache files greater than mNumModelCache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mModelCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of model cache files smaller than mNumModelCache. + if (mModelCache.size() > 0) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mModelCache.back(); + mModelCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache.push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files greater than mNumDataCache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mDataCache.push_back({mTmpCache}); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Test with number of data cache files smaller than mNumDataCache. + if (mDataCache.size() > 0) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mDataCache.back(); + mDataCache.pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache.push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } +} + TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - // cache1 with invalid NumFd. - { + // Go through each handle in model cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumModelCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + mModelCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1, mCache3}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); } + ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid NumFd. - { + // Go through each handle in model cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumModelCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + auto tmp = mModelCache[i].back(); + mModelCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2, mCache3}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumDataCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + mDataCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].pop_back(); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumDataCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + // Pass an invalid number of fds for handle i. + auto tmp = mDataCache[i].back(); + mDataCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].push_back(tmp); + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; + ErrorStatus status; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); } } TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::NONE); - } + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } - // cache1 with invalid NumFd. - { - preparedModel = nullptr; + // Go through each handle in model cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1, mCache3}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + mModelCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (status != ErrorStatus::GENERAL_FAILURE) { ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); - ASSERT_EQ(preparedModel, nullptr); } + ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid NumFd. - { - preparedModel = nullptr; + // Go through each handle in model cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2, mCache3}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + auto tmp = mModelCache[i].back(); + mModelCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mModelCache[i].push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); if (status != ErrorStatus::GENERAL_FAILURE) { ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); - ASSERT_EQ(preparedModel, nullptr); } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd greater than 1. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + mDataCache[i].push_back(mTmpCache); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].pop_back(); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); + } + + // Go through each handle in data cache, test with NumFd equal to 0. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + auto tmp = mDataCache[i].back(); + mDataCache[i].pop_back(); + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + mDataCache[i].push_back(tmp); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::GENERAL_FAILURE) { + ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT); + } + ASSERT_EQ(preparedModel, nullptr); } } TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; + std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); + std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); - // cache1 with invalid access mode. - { + // Go through each handle in model cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumModelCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + modelCacheMode[i] = AccessMode::READ_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + modelCacheMode[i] = AccessMode::READ_WRITE; + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid access mode. - { + // Go through each handle in data cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumDataCache; i++) { + bool supported; + hidl_vec modelCache, dataCache; + dataCacheMode[i] = AccessMode::READ_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + dataCacheMode[i] = AccessMode::READ_WRITE; + sp preparedModel = nullptr; + saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel); + if (checkEarlyTermination(supported)) return; + ASSERT_NE(preparedModel, nullptr); + // Execute and verify results. + generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, + get_examples(), + testModel.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + // Check if prepareModelFromCache fails. + preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + if (status != ErrorStatus::INVALID_ARGUMENT) { + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + } + ASSERT_EQ(preparedModel, nullptr); } } TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; + std::vector modelCacheMode(mNumModelCache, AccessMode::READ_WRITE); + std::vector dataCacheMode(mNumDataCache, AccessMode::READ_WRITE); // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (status != ErrorStatus::GENERAL_FAILURE) { - ASSERT_EQ(status, ErrorStatus::NONE); - } + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } - // cache1 with invalid access mode. - { - preparedModel = nullptr; + // Go through each handle in model cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumModelCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + modelCacheMode[i] = AccessMode::WRITE_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + modelCacheMode[i] = AccessMode::READ_WRITE; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); } - // cache2 with invalid access mode. - { - preparedModel = nullptr; + // Go through each handle in data cache, test with invalid access mode. + for (uint32_t i = 0; i < mNumDataCache; i++) { + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + dataCacheMode[i] = AccessMode::WRITE_ONLY; + createCacheHandles(mModelCache, modelCacheMode, &modelCache); + createCacheHandles(mDataCache, dataCacheMode, &dataCache); + dataCacheMode[i] = AccessMode::READ_WRITE; + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); } } -TEST_F(CompilationCachingTest, SaveToCacheInvalidOffset) { - // Create test HIDL model and compile. - Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - - // cache1 with invalid file descriptor offset. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Advance offset by one byte. - ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } - - // cache2 with invalid file descriptor offset. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Advance offset by one byte. - ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } -} - -TEST_F(CompilationCachingTest, SaveToCacheInvalidFileSize) { - // Create test HIDL model and compile. - Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - - // cache1 with invalid file size. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Write one byte and seek back to the beginning. - ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1); - ASSERT_EQ(lseek(cache1.getNativeHandle()->data[0], 0, SEEK_SET), 0); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } - - // cache2 with invalid file size. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - uint8_t dummyByte = 0; - // Write one byte and seek back to the beginning. - ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1); - ASSERT_EQ(lseek(cache2.getNativeHandle()->data[0], 0, SEEK_SET), 0); - saveModelToCache(preparedModel, cache1, cache2, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - } -} - class CompilationCachingSecurityTest : public CompilationCachingTest, public ::testing::WithParamInterface { protected: @@ -537,44 +888,44 @@ TEST_P(CompilationCachingSecurityTest, CorruptedSecuritySensitiveCache) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - // Save the compilation to cache. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); - } + for (uint32_t i = 0; i < mNumModelCache; i++) { + // Save the compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } - // Randomly flip one single bit of the cache entry. - FILE* pFile = fopen(mCache1.c_str(), "r+"); - ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0); - long int fileSize = ftell(pFile); - ASSERT_GT(fileSize, 0); - ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0); - int readByte = fgetc(pFile); - ASSERT_NE(readByte, EOF); - ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0); - ASSERT_NE(fputc(static_cast(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF); - fclose(pFile); + // Randomly flip one single bit of the cache entry. + FILE* pFile = fopen(mModelCache[i][0].c_str(), "r+"); + ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0); + long int fileSize = ftell(pFile); + if (fileSize == 0) { + fclose(pFile); + continue; + } + ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0); + int readByte = fgetc(pFile); + ASSERT_NE(readByte, EOF); + ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0); + ASSERT_NE(fputc(static_cast(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF); + fclose(pFile); - // Retrieve preparedModel from cache, expect failure. - { - preparedModel = nullptr; - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - ASSERT_EQ(preparedModel, nullptr); + // Retrieve preparedModel from cache, expect failure. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } } } @@ -583,40 +934,37 @@ TEST_P(CompilationCachingSecurityTest, WrongLengthSecuritySensitiveCache) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; - // Save the compilation to cache. - { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); - } + for (uint32_t i = 0; i < mNumModelCache; i++) { + // Save the compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } - // Randomly append bytes to the cache entry. - FILE* pFile = fopen(mCache1.c_str(), "a"); - uint32_t appendLength = getRandomInt(1, 256); - for (uint32_t i = 0; i < appendLength; i++) { - ASSERT_NE(fputc(getRandomInt(0, 255), pFile), EOF); - } - fclose(pFile); + // Randomly append bytes to the cache entry. + FILE* pFile = fopen(mModelCache[i][0].c_str(), "a"); + uint32_t appendLength = getRandomInt(1, 256); + for (uint32_t i = 0; i < appendLength; i++) { + ASSERT_NE(fputc(getRandomInt(0, 255), pFile), EOF); + } + fclose(pFile); - // Retrieve preparedModel from cache, expect failure. - { - preparedModel = nullptr; - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); - ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); - ASSERT_EQ(preparedModel, nullptr); + // Retrieve preparedModel from cache, expect failure. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } } } @@ -625,20 +973,15 @@ TEST_P(CompilationCachingSecurityTest, WrongToken) { // Create test HIDL model and compile. Model testModel = createTestModel(); - sp preparedModel = nullptr; - generated_tests::PrepareModel(device, testModel, &preparedModel); - // Terminate early if the driver cannot prepare the model. - if (preparedModel == nullptr) return; // Save the compilation to cache. { - ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2); - saveModelToCache(preparedModel, cache1, cache2, &status); - if (checkEarlyTermination(status)) return; - ASSERT_EQ(status, ErrorStatus::NONE); + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModel, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; } // Randomly flip one single bit in mToken. @@ -647,12 +990,12 @@ TEST_P(CompilationCachingSecurityTest, WrongToken) { // Retrieve the preparedModel from cache, expect failure. { - preparedModel = nullptr; + sp preparedModel = nullptr; ErrorStatus status; - hidl_handle cache1, cache2; - createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1); - createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2); - prepareModelFromCache(cache1, cache2, &preparedModel, &status); + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); ASSERT_EQ(preparedModel, nullptr); } diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index c2330b581e..2988211e5a 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -33,6 +33,7 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; ///////////////////////// UTILITY FUNCTIONS ///////////////////////// @@ -54,7 +55,8 @@ static void validatePrepareModel(const sp& device, const std::string& m sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = - device->prepareModel_1_2(model, preference, preparedModelCallback); + device->prepareModel_1_2(model, preference, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast(prepareLaunchStatus)); diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp index d411da4819..b15f657348 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -37,6 +37,7 @@ namespace functional { using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback; using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback; using ::android::hidl::memory::V1_0::IMemory; +using HidlToken = hidl_array(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; using test_helper::for_all; using test_helper::MixedTyped; using test_helper::MixedTypedExample; @@ -66,7 +67,8 @@ static void createPreparedModel(const sp& device, const Model& model, sp preparedModelCallback = new PreparedModelCallback(); ASSERT_NE(nullptr, preparedModelCallback.get()); Return prepareLaunchStatus = device->prepareModel_1_2( - model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec(), + hidl_vec(), HidlToken(), preparedModelCallback); ASSERT_TRUE(prepareLaunchStatus.isOk()); ASSERT_EQ(ErrorStatus::NONE, static_cast(prepareLaunchStatus));