diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index 4892100877..3709de187a 100644 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -12,8 +12,9 @@ from amici.petab.petab_import import import_petab_problem import benchmark_models_petab from collections import defaultdict -from dataclasses import dataclass +from dataclasses import dataclass, field from amici import SensitivityMethod +from petab.v1.lint import measurement_table_has_timepoint_specific_mappings from fiddy import MethodId, get_derivative from fiddy.derivative_check import NumpyIsCloseDerivativeCheck from fiddy.extensions.amici import simulate_petab_to_cached_functions @@ -58,14 +59,16 @@ class GradientCheckSettings: atol_consistency: float = 1e-5 rtol_consistency: float = 1e-1 # Step sizes for finite difference gradient checks. - step_sizes = [ - 1e-1, - 5e-2, - 1e-2, - 1e-3, - 1e-4, - 1e-5, - ] + step_sizes: list[float] = field( + default_factory=lambda: [ + 1e-1, + 5e-2, + 1e-2, + 1e-3, + 1e-4, + 1e-5, + ] + ) rng_seed: int = 0 ss_sensitivity_mode: amici.SteadyStateSensitivityMode = ( amici.SteadyStateSensitivityMode.integrateIfNewtonFails @@ -176,7 +179,10 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request): pytest.skip() petab_problem = benchmark_models_petab.get_problem(model) - petab.flatten_timepoint_specific_output_overrides(petab_problem) + if measurement_table_has_timepoint_specific_mappings( + petab_problem.measurement_df, + ): + petab.flatten_timepoint_specific_output_overrides(petab_problem) # Only compute gradient for estimated parameters. parameter_ids = petab_problem.x_free_ids