diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 1f66c43bf9..802d01875c 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -45,6 +45,7 @@ using ::test_helper::for_each; using ::test_helper::Int32Operands; using ::test_helper::MixedTyped; using ::test_helper::MixedTypedExample; +using ::test_helper::MixedTypedIndex; using ::test_helper::Quant8Operands; using ::test_helper::resize_accordingly; @@ -63,14 +64,16 @@ void copy_back(MixedTyped* dst, const std::vector& ra, char* sr copy_back_(dst, ra, src); copy_back_(dst, ra, src); copy_back_(dst, ra, src); - static_assert(4 == std::tuple_size::value, + copy_back_<_Float16>(dst, ra, src); + static_assert(5 == std::tuple_size::value, "Number of types in MixedTyped changed, but copy_back function wasn't updated"); } // Top level driver for models and examples generated by test_generator.py // Test driver for those generated from ml/nn/runtime/test/spec void EvaluatePreparedModel(sp& preparedModel, std::function is_ignored, - const std::vector& examples, float fpAtol = 1e-5f, + const std::vector& examples, + bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f, float fpRtol = 1e-5f) { const uint32_t INPUT = 0; const uint32_t OUTPUT = 1; @@ -78,13 +81,20 @@ void EvaluatePreparedModel(sp& preparedModel, std::function::index>(inputs).empty(); + if (hasRelaxedFloat32Model || hasFloat16Inputs) { + // TODO: Adjust the error limit based on testing. + // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. + fpAtol = 5.0f * 0.0009765625f; + // Set the relative tolerance to be 5ULP of the corresponding FP precision. + fpRtol = 5.0f * 0.0009765625f; + } + std::vector inputs_info, outputs_info; uint32_t inputSize = 0, outputSize = 0; - // This function only partially specifies the metadata (vector of RequestArguments). // The contents are copied over below. for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { @@ -228,7 +238,8 @@ void Execute(const sp& device, std::function c ASSERT_NE(nullptr, preparedModel.get()); float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f; - EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol); + EvaluatePreparedModel(preparedModel, is_ignored, examples, + /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol); } void Execute(const sp& device, std::function create_model, @@ -272,13 +283,8 @@ void Execute(const sp& device, std::function c EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); ASSERT_NE(nullptr, preparedModel.get()); - // TODO: Adjust the error limit based on testing. - // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. - float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f; - // Set the relative tolerance to be 5ULP of the corresponding FP precision. - float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f - : 5.0f * 0.0009765625f; - EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol); + EvaluatePreparedModel(preparedModel, is_ignored, examples, + model.relaxComputationFloat32toFloat16); } // TODO: Reduce code duplication. @@ -323,13 +329,8 @@ void Execute(const sp& device, std::function c EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); ASSERT_NE(nullptr, preparedModel.get()); - // TODO: Adjust the error limit based on testing. - // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. - float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f; - // Set the relative tolerance to be 5ULP of the corresponding FP precision. - float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f - : 5.0f * 0.0009765625f; - EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol); + EvaluatePreparedModel(preparedModel, is_ignored, examples, + model.relaxComputationFloat32toFloat16); } } // namespace generated_tests