diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index 109839f0f3..4892100877 100644 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -53,6 +53,10 @@ class GradientCheckSettings: # Absolute and relative tolerances for finite difference gradient checks. atol_check: float = 1e-3 rtol_check: float = 1e-2 + # Absolute and relative tolerances for fiddy consistency check between + # forward/backward/central differences. + atol_consistency: float = 1e-5 + rtol_consistency: float = 1e-1 # Step sizes for finite difference gradient checks. step_sizes = [ 1e-1, @@ -91,6 +95,7 @@ class GradientCheckSettings: atol_sim=1e-14, rtol_sim=1e-14, noise_level=0.01, + atol_consistency=1e-3, ) settings["Okuonghae_ChaosSolitonsFractals2020"].step_sizes.extend([0.2, 0.005]) settings["Oliveira_NatCommun2021"] = GradientCheckSettings( @@ -234,7 +239,10 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request): sizes=cur_settings.step_sizes, direction_ids=parameter_ids, method_ids=[MethodId.CENTRAL, MethodId.FORWARD, MethodId.BACKWARD], - success_checker=Consistency(atol=1e-5, rtol=1e-1), + success_checker=Consistency( + rtol=cur_settings.rtol_consistency, + atol=cur_settings.atol_consistency, + ), expected_result=expected_derivative, relative_sizes=not scale, ) @@ -287,6 +295,8 @@ def assert_gradient_check_success( df = check_result.df df["abs_diff"] = np.abs(df["expectation"] - df["test"]) df["rel_diff"] = df["abs_diff"] / np.abs(df["expectation"]) + df["atol_success"] = df["abs_diff"] <= atol + df["rtol_success"] = df["rel_diff"] <= rtol max_adiff = df["abs_diff"].max() max_rdiff = df["rel_diff"].max() with pd.option_context("display.max_columns", None, "display.width", None):