From 7cc0cccf5db241498f07a0926b02d6ace75dcc9a Mon Sep 17 00:00:00 2001 From: Xusong Wang Date: Tue, 23 Apr 2019 14:28:17 -0700 Subject: [PATCH] Test TOCTOU in VTS CompilationCachingTests. Two tests are added into VTS: 1. The TOCTOU test, for a fixed number of iterations. We try to attack the driver by replacing the model cache with another cache entry from a similar model while the driver is saving to or preparing from cache, and see if the driver crashes or falsely prepares an unexpected model (by checking the execution result). Either of the following results are acceptable * Fails -> the driver detects this corruption. * Succeeds and produces correct execution result -> the corruption happens before saving to cache or after preparing from cache. Due to the racy nature, this test is probabilistic and we run it several times. 2. Similar to the TOCTOU test but replace the content between compile-to-cache and compile-from-cache (once only). Additionally, remove tmp cache directory if test succeeds. Bug: 123433989 Test: VtsHalNeuralnetworksV1_xTargetTest with 1.2 sample driver Test: VtsHalNeuralnetworksV1_xTargetTest with a test driver that can read and write cache entries Test: Check /data/local/tmp and confirm that * dirs are preserved on failure * dirs are removed on success Change-Id: Ie04fa905f465c3775979f0ca74359d185dcacea9 Merged-In: Ie04fa905f465c3775979f0ca74359d185dcacea9 (cherry picked from commit a44e130a92d1e897a6ac075affa2564021edd22d) --- .../functional/CompilationCachingTests.cpp | 353 +++++++++++++++++- 1 file changed, 337 insertions(+), 16 deletions(-) diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp index 167fc096ce..df95ac61b4 100644 --- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp @@ -16,21 +16,22 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" -#include "VtsHalNeuralnetworks.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include #include "Callbacks.h" #include "GeneratedTestHarness.h" #include "TestHarness.h" #include "Utils.h" - -#include -#include -#include -#include -#include -#include - -#include +#include "VtsHalNeuralnetworks.h" namespace android { namespace hardware { @@ -46,7 +47,7 @@ using ::test_helper::MixedTypedExample; namespace { -// In frameworks/ml/nn/runtime/tests/generated/, creates a hidl model of mobilenet. +// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of mobilenet. #include "examples/mobilenet_224_gender_basic_fixed.example.cpp" #include "vts_models/mobilenet_224_gender_basic_fixed.model.cpp" @@ -89,6 +90,118 @@ void createCacheHandles(const std::vector>& fileGroups, createCacheHandles(fileGroups, std::vector(fileGroups.size(), mode), handles); } +// Create a chain of broadcast operations. The second operand is always constant tensor [1]. +// For simplicity, activation scalar is shared. The second operand is not shared +// in the model to let driver maintain a non-trivial size of constant data and the corresponding +// data locations in cache. +// +// --------- activation -------- +// ↓ ↓ ↓ ↓ +// E.g. input -> ADD -> ADD -> ADD -> ... -> ADD -> output +// ↑ ↑ ↑ ↑ +// [1] [1] [1] [1] +// +Model createLargeTestModel(OperationType op, uint32_t len) { + // Model operations and operands. + std::vector operations(len); + std::vector operands(len * 2 + 2); + + // The constant buffer pool. This contains the activation scalar, followed by the + // per-operation constant operands. + std::vector operandValues(sizeof(int32_t) + len * sizeof(float)); + + // The activation scalar, value = 0. + operands[0] = { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = len, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)}, + }; + memset(operandValues.data(), 0, sizeof(int32_t)); + + const float floatBufferValue = 1.0f; + for (uint32_t i = 0; i < len; i++) { + const uint32_t firstInputIndex = i * 2 + 1; + const uint32_t secondInputIndex = firstInputIndex + 1; + const uint32_t outputIndex = secondInputIndex + 1; + + // The first operation input. + operands[firstInputIndex] = { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT + : OperandLifeTime::TEMPORARY_VARIABLE), + .location = {}, + }; + + // The second operation input, value = 1. + operands[secondInputIndex] = { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::CONSTANT_COPY, + .location = {.poolIndex = 0, + .offset = static_cast(i * sizeof(float) + sizeof(int32_t)), + .length = sizeof(float)}, + }; + memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(float), &floatBufferValue, + sizeof(float)); + + // The operation. All operations share the same activation scalar. + // The output operand is created as an input in the next iteration of the loop, in the case + // of all but the last member of the chain; and after the loop as a model output, in the + // case of the last member of the chain. + operations[i] = { + .type = op, + .inputs = {firstInputIndex, secondInputIndex, /*activation scalar*/ 0}, + .outputs = {outputIndex}, + }; + } + + // The model output. + operands.back() = { + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_OUTPUT, + .location = {}, + }; + + const std::vector inputIndexes = {1}; + const std::vector outputIndexes = {len * 2 + 1}; + const std::vector pools = {}; + + return { + .operands = operands, + .operations = operations, + .inputIndexes = inputIndexes, + .outputIndexes = outputIndexes, + .operandValues = operandValues, + .pools = pools, + }; +} + +// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h. +// This function assumes the operation is always ADD. +std::vector getLargeModelExamples(uint32_t len) { + float outputValue = 1.0f + static_cast(len); + return {{.operands = { + // Input + {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}}, + // Output + {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}}; +}; + } // namespace // Tag for the compilation caching tests. @@ -139,11 +252,13 @@ class CompilationCachingTest : public NeuralnetworksHidlTest { } void TearDown() override { - // The tmp directory is only removed when the driver reports caching not supported, - // otherwise it is kept for debugging purpose. - if (!mIsCachingSupported) { - remove(mTmpCache.c_str()); - rmdir(mCacheDir.c_str()); + // If the test passes, remove the tmp directory. Otherwise, keep it for debugging purposes. + if (!::testing::Test::HasFailure()) { + // Recursively remove the cache directory specified by mCacheDir. + auto callback = [](const char* entry, const struct stat*, int, struct FTW*) { + return remove(entry); + }; + nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS); } NeuralnetworksHidlTest::TearDown(); } @@ -864,6 +979,212 @@ TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) { } } +// Copy file contents between file groups. +// The outer vector corresponds to handles and the inner vector is for fds held by each handle. +// The outer vector sizes must match and the inner vectors must have size = 1. +static void copyCacheFiles(const std::vector>& from, + const std::vector>& to) { + constexpr size_t kBufferSize = 1000000; + uint8_t buffer[kBufferSize]; + + ASSERT_EQ(from.size(), to.size()); + for (uint32_t i = 0; i < from.size(); i++) { + ASSERT_EQ(from[i].size(), 1u); + ASSERT_EQ(to[i].size(), 1u); + int fromFd = open(from[i][0].c_str(), O_RDONLY); + int toFd = open(to[i][0].c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); + ASSERT_GE(fromFd, 0); + ASSERT_GE(toFd, 0); + + ssize_t readBytes; + while ((readBytes = read(fromFd, &buffer, kBufferSize)) > 0) { + ASSERT_EQ(write(toFd, &buffer, readBytes), readBytes); + } + ASSERT_GE(readBytes, 0); + + close(fromFd); + close(toFd); + } +} + +// Number of operations in the large test model. +constexpr uint32_t kLargeModelSize = 100; +constexpr uint32_t kNumIterationsTOCTOU = 100; + +TEST_F(CompilationCachingTest, SaveToCache_TOCTOU) { + if (!mIsCachingSupported) return; + + // Save the testModelMul compilation to cache. + Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + auto modelCacheMul = mModelCache; + for (auto& cache : modelCacheMul) { + cache[0].append("_mul"); + } + { + hidl_vec modelCache, dataCache; + createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + bool supported; + saveModelToCache(testModelMul, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Use a different token for testModelAdd. + mToken[0]++; + + // This test is probabilistic, so we run it multiple times. + Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) { + // Save the testModelAdd compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + + // Spawn a thread to copy the cache content concurrently while saving to cache. + std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache)); + saveModelToCache(testModelAdd, modelCache, dataCache, &supported); + thread.join(); + if (checkEarlyTermination(supported)) return; + } + + // Retrieve preparedModel from cache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + + // The preparation may fail or succeed, but must not crash. If the preparation succeeds, + // the prepared model must be executed with the correct result and not crash. + if (status != ErrorStatus::NONE) { + ASSERT_EQ(preparedModel, nullptr); + } else { + ASSERT_NE(preparedModel, nullptr); + generated_tests::EvaluatePreparedModel( + preparedModel, [](int) { return false; }, + getLargeModelExamples(kLargeModelSize), + testModelAdd.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + } + } + } +} + +TEST_F(CompilationCachingTest, PrepareFromCache_TOCTOU) { + if (!mIsCachingSupported) return; + + // Save the testModelMul compilation to cache. + Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + auto modelCacheMul = mModelCache; + for (auto& cache : modelCacheMul) { + cache[0].append("_mul"); + } + { + hidl_vec modelCache, dataCache; + createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + bool supported; + saveModelToCache(testModelMul, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Use a different token for testModelAdd. + mToken[0]++; + + // This test is probabilistic, so we run it multiple times. + Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) { + // Save the testModelAdd compilation to cache. + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModelAdd, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Retrieve preparedModel from cache. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + + // Spawn a thread to copy the cache content concurrently while preparing from cache. + std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache)); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + thread.join(); + + // The preparation may fail or succeed, but must not crash. If the preparation succeeds, + // the prepared model must be executed with the correct result and not crash. + if (status != ErrorStatus::NONE) { + ASSERT_EQ(preparedModel, nullptr); + } else { + ASSERT_NE(preparedModel, nullptr); + generated_tests::EvaluatePreparedModel( + preparedModel, [](int) { return false; }, + getLargeModelExamples(kLargeModelSize), + testModelAdd.relaxComputationFloat32toFloat16, + /*testDynamicOutputShape=*/false); + } + } + } +} + +TEST_F(CompilationCachingTest, ReplaceSecuritySensitiveCache) { + if (!mIsCachingSupported) return; + + // Save the testModelMul compilation to cache. + Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize); + auto modelCacheMul = mModelCache; + for (auto& cache : modelCacheMul) { + cache[0].append("_mul"); + } + { + hidl_vec modelCache, dataCache; + createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + bool supported; + saveModelToCache(testModelMul, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Use a different token for testModelAdd. + mToken[0]++; + + // Save the testModelAdd compilation to cache. + Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize); + { + bool supported; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + saveModelToCache(testModelAdd, modelCache, dataCache, &supported); + if (checkEarlyTermination(supported)) return; + } + + // Replace the model cache of testModelAdd with testModelMul. + copyCacheFiles(modelCacheMul, mModelCache); + + // Retrieve the preparedModel from cache, expect failure. + { + sp preparedModel = nullptr; + ErrorStatus status; + hidl_vec modelCache, dataCache; + createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache); + createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache); + prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); + ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE); + ASSERT_EQ(preparedModel, nullptr); + } +} + class CompilationCachingSecurityTest : public CompilationCachingTest, public ::testing::WithParamInterface { protected: