diff --git a/ThirdParty/SuiteSparse/SuiteSparse_config/SuiteSparse_config.h b/ThirdParty/SuiteSparse/SuiteSparse_config/SuiteSparse_config.h index 8e532262c0..d4ea27d806 100644 --- a/ThirdParty/SuiteSparse/SuiteSparse_config/SuiteSparse_config.h +++ b/ThirdParty/SuiteSparse/SuiteSparse_config/SuiteSparse_config.h @@ -374,7 +374,7 @@ int SuiteSparse_divcomplex // be done via the SUITESPARSE_TIME macro, defined below: #define SUITESPARSE_TIMER_ENABLED #define SUITESPARSE_HAVE_CLOCK_GETTIME - #define SUITESPARSE_CONFIG_TIMER clock_gettime + #define SUITESPARSE_CONFIG_TIMER omp_get_wtime #if defined ( SUITESPARSE_TIMER_ENABLED ) #if defined ( _OPENMP ) // Avoid indirection through the library if the compilation unit diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index c28e1257b1..88df36d3d3 100644 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -9,11 +9,8 @@ import pytest from amici.petab.petab_import import import_petab_problem import benchmark_models_petab - - -# Absolute and relative tolerances for finite difference gradient checks. -ATOL: float = 1e-3 -RTOL: float = 1e-2 +from collections import defaultdict +from dataclasses import dataclass repo_root = Path(__file__).parent.parent.parent @@ -42,9 +39,45 @@ debug_path.mkdir(exist_ok=True, parents=True) +@dataclass +class GradientCheckSettings: + # Absolute and relative tolerances for simulation + atol_sim: float = 1e-12 + rtol_sim: float = 1e-12 + # Absolute and relative tolerances for finite difference gradient checks. + atol_check: float = 1e-4 + rtol_check: float = 1e-2 + # Step sizes for finite difference gradient checks. + step_sizes = [ + 1e-1, + 5e-2, + 1e-2, + 1e-3, + 1e-4, + 1e-5, + ] + + +settings = defaultdict(GradientCheckSettings) +settings["Smith_BMCSystBiol2013"] = GradientCheckSettings( + atol_sim=1e-10, + rtol_sim=1e-10, +) +settings["Oliveira_NatCommun2021"] = GradientCheckSettings( + atol_sim=1e-10, + rtol_sim=1e-10, +) +settings["Okuonghae_ChaosSolitonsFractals2020"] = GradientCheckSettings( + atol_sim=1e-14, + rtol_sim=1e-14, +) +settings["Okuonghae_ChaosSolitonsFractals2020"].step_sizes.insert(0, 0.2) + + # until fiddy is updated @pytest.mark.filterwarnings( - "ignore:Importing amici.petab_objective is deprecated.:DeprecationWarning" + "ignore:Importing amici.petab_objective is deprecated.:DeprecationWarning", + r"ignore:.*petab\.v1.*instead.*:DeprecationWarning", ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log10") @pytest.mark.parametrize("scale", (True, False)) @@ -78,6 +111,7 @@ def test_benchmark_gradient(model, scale): petab_problem.x_free_ids ] parameter_ids = list(parameter_df_free.index) + cur_settings = settings[model] # Setup AMICI objects. amici_model = import_petab_problem( @@ -85,17 +119,8 @@ def test_benchmark_gradient(model, scale): model_output_dir=benchmark_outdir / model, ) amici_solver = amici_model.getSolver() - amici_solver.setAbsoluteTolerance(1e-12) - amici_solver.setRelativeTolerance(1e-12) - if model in ( - "Smith_BMCSystBiol2013", - "Oliveira_NatCommun2021", - ): - amici_solver.setAbsoluteTolerance(1e-10) - amici_solver.setRelativeTolerance(1e-10) - elif model in ("Okuonghae_ChaosSolitonsFractals2020",): - amici_solver.setAbsoluteTolerance(1e-14) - amici_solver.setRelativeTolerance(1e-14) + amici_solver.setAbsoluteTolerance(cur_settings.atol_sim) + amici_solver.setRelativeTolerance(cur_settings.rtol_sim) amici_solver.setMaxSteps(int(1e5)) if model in ("Brannmark_JBC2010",): @@ -132,21 +157,10 @@ def test_benchmark_gradient(model, scale): expected_derivative = amici_derivative(point) - sizes = [ - 1e-1, - 5e-2, - 1e-2, - 1e-3, - 1e-4, - 1e-5, - ] - if model in ("Okuonghae_ChaosSolitonsFractals2020",): - sizes.insert(0, 0.2) - derivative = get_derivative( function=amici_function, point=point, - sizes=sizes, + sizes=cur_settings.step_sizes, direction_ids=parameter_ids, method_ids=[MethodId.CENTRAL, MethodId.FORWARD, MethodId.BACKWARD], success_checker=Consistency(atol=1e-5, rtol=1e-1), @@ -160,7 +174,9 @@ def test_benchmark_gradient(model, scale): expectation=expected_derivative, point=point, ) - success = check(rtol=RTOL, atol=ATOL) + success = check( + rtol=cur_settings.rtol_check, atol=cur_settings.atol_check + ) if debug: df = pd.DataFrame(