From a5e771edf5967b84a93d45f80acbf6e011b8f2b8 Mon Sep 17 00:00:00 2001 From: Paul Jonas Jost <70631928+PaulJonasJost@users.noreply.github.com> Date: Wed, 13 Mar 2024 01:19:51 +0100 Subject: [PATCH 01/14] get_parameter_mapping_for_condition default value update (#250) --- petab/parameter_mapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/petab/parameter_mapping.py b/petab/parameter_mapping.py index 1483fc13..b4c6fcc4 100644 --- a/petab/parameter_mapping.py +++ b/petab/parameter_mapping.py @@ -307,7 +307,7 @@ def _map_condition(packed_args): def get_parameter_mapping_for_condition( condition_id: str, is_preeq: bool, - cur_measurement_df: Optional[pd.DataFrame], + cur_measurement_df: Optional[pd.DataFrame] = None, sbml_model: libsbml.Model = None, condition_df: pd.DataFrame = None, parameter_df: pd.DataFrame = None, From b75ece3b6009027ecff28511534a057d5ad38337 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 16 Apr 2024 15:13:03 +0200 Subject: [PATCH 02/14] maint: black -> ruff (#251) Instead of black and isort, run ruff as pre-commit hook. Enable pyupgrade and additional rules. Applied safe auto-fixes, but validation currently fails. Will be addressed in a follow-up PR (#252). Relevant changes: `.pre-commit-config.yaml`, `pyproject.toml` --- .pre-commit-config.yaml | 30 ++++----- petab/calculate.py | 3 +- petab/composite_problem.py | 1 - petab/conditions.py | 3 +- petab/core.py | 5 +- petab/lint.py | 6 -- petab/mapping.py | 2 +- petab/measurements.py | 3 +- petab/models/model.py | 5 +- petab/observables.py | 4 +- petab/parameter_mapping.py | 9 ++- petab/parameters.py | 15 +++-- petab/problem.py | 68 ++++++++++----------- petab/sbml.py | 2 - petab/simplify.py | 3 +- petab/visualize/__init__.py | 5 +- petab/visualize/cli.py | 1 - petab/visualize/data_overview.py | 2 - petab/visualize/helper_functions.py | 1 - petab/visualize/plot_data_and_simulation.py | 6 +- petab/visualize/plot_residuals.py | 1 - petab/visualize/plotter.py | 11 +--- petab/visualize/plotting.py | 9 --- petab/yaml.py | 3 - pyproject.toml | 20 +++++- setup.py | 1 - tests/test_combine.py | 1 - tests/test_lint.py | 6 +- tests/test_measurements.py | 1 - tests/test_observables.py | 1 - tests/test_parameter_mapping.py | 6 +- tests/test_petab.py | 4 +- tests/test_simulate.py | 1 - 33 files changed, 103 insertions(+), 136 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89cf753b..d32e5b68 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,6 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - name: isort (python) - args: ["--profile", "black", "--filter-files", "--line-length", "79"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: @@ -16,13 +10,19 @@ repos: args: [--allow-multiple-documents] - id: end-of-file-fixer - id: trailing-whitespace -- repo: https://github.com/psf/black - rev: 23.7.0 +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.11 hooks: - - id: black-jupyter - # It is recommended to specify the latest version of Python - # supported by your project here, or alternatively use - # pre-commit's default_language_version, see - # https://pre-commit.com/#top_level-default_language_version - language_version: python3.11 - args: ["--line-length", "79"] + # Run the linter. + - id: ruff + args: + - --fix + - --config + - pyproject.toml + + # Run the formatter. + - id: ruff-format + args: + - --config + - pyproject.toml diff --git a/petab/calculate.py b/petab/calculate.py index cb20bd38..4ca3240b 100644 --- a/petab/calculate.py +++ b/petab/calculate.py @@ -322,7 +322,8 @@ def calculate_llh_for_table( parameter_df: pd.DataFrame, ) -> float: """Calculate log-likelihood for one set of tables. For the arguments, see - `calculate_llh`.""" + `calculate_llh`. + """ llhs = [] # matching columns diff --git a/petab/composite_problem.py b/petab/composite_problem.py index d9803765..b8569cb5 100644 --- a/petab/composite_problem.py +++ b/petab/composite_problem.py @@ -46,7 +46,6 @@ def from_yaml(yaml_config: Union[Dict, str]) -> "CompositeProblem": Arguments: yaml_config: PEtab configuration as dictionary or YAML file name """ - if isinstance(yaml_config, str): path_prefix = os.path.dirname(yaml_config) yaml_config = yaml.load_yaml(yaml_config) diff --git a/petab/conditions.py b/petab/conditions.py index 55cf133d..4e88e925 100644 --- a/petab/conditions.py +++ b/petab/conditions.py @@ -18,7 +18,7 @@ def get_condition_df( - condition_file: Union[str, pd.DataFrame, Path, None] + condition_file: Union[str, pd.DataFrame, Path, None], ) -> pd.DataFrame: """Read the provided condition file into a ``pandas.Dataframe`` @@ -75,7 +75,6 @@ def create_condition_df( A :py:class:`pandas.DataFrame` with empty given rows and columns and all nan values """ - condition_ids = [] if condition_ids is None else list(condition_ids) data = {CONDITION_ID: condition_ids} diff --git a/petab/core.py b/petab/core.py index a04b88f7..57d548f2 100644 --- a/petab/core.py +++ b/petab/core.py @@ -72,7 +72,7 @@ def write_simulation_df(df: pd.DataFrame, filename: Union[str, Path]) -> None: def get_visualization_df( - visualization_file: Union[str, Path, pd.DataFrame, None] + visualization_file: Union[str, Path, pd.DataFrame, None], ) -> Union[pd.DataFrame, None]: """Read PEtab visualization table @@ -357,7 +357,6 @@ def concat_tables( Returns: The concatenated DataFrames """ - if isinstance(tables, pd.DataFrame): return tables @@ -389,7 +388,6 @@ def to_float_if_float(x: Any) -> Any: Returns: ``x`` as float if possible, otherwise ``x`` """ - try: return float(x) except (ValueError, TypeError): @@ -427,7 +425,6 @@ def create_combine_archive( email: E-mail address of archive creator organization: Organization of archive creator """ - path_prefix = os.path.dirname(str(yaml_file)) yaml_config = yaml.load_yaml(yaml_file) diff --git a/petab/lint.py b/petab/lint.py index b332fdbd..a58678c8 100644 --- a/petab/lint.py +++ b/petab/lint.py @@ -105,7 +105,6 @@ def check_condition_df( Raises: AssertionError: in case of problems """ - # Check required columns are present req_cols = [] _check_df(df, req_cols, "condition") @@ -167,7 +166,6 @@ def check_measurement_df( Raises: AssertionError, ValueError: in case of problems """ - _check_df(df, MEASUREMENT_DF_REQUIRED_COLS, "measurement") for column_name in MEASUREMENT_DF_REQUIRED_COLS: @@ -432,7 +430,6 @@ def assert_measured_observables_defined( Raises: AssertionError: in case of problems """ - used_observables = set(measurement_df[OBSERVABLE_ID].values) defined_observables = set(observable_df.index.values) if undefined_observables := (used_observables - defined_observables): @@ -453,7 +450,6 @@ def condition_table_is_parameter_free(condition_df: pd.DataFrame) -> bool: ``True`` if there are no parameter overrides in the condition table, ``False`` otherwise. """ - return len(petab.get_parametric_overrides(condition_df)) == 0 @@ -468,7 +464,6 @@ def assert_parameter_id_is_string(parameter_df: pd.DataFrame) -> None: Raises: AssertionError: in case of problems """ - for parameter_id in parameter_df: if isinstance(parameter_id, str): if parameter_id[0].isdigit(): @@ -1088,7 +1083,6 @@ def assert_measurement_conditions_present_in_condition_table( Raises: AssertionError: in case of problems """ - used_conditions = set(measurement_df[SIMULATION_CONDITION_ID].values) if PREEQUILIBRATION_CONDITION_ID in measurement_df: used_conditions |= set( diff --git a/petab/mapping.py b/petab/mapping.py index 357daf47..dd91102e 100644 --- a/petab/mapping.py +++ b/petab/mapping.py @@ -16,7 +16,7 @@ def get_mapping_df( - mapping_file: Union[None, str, Path, pd.DataFrame] + mapping_file: Union[None, str, Path, pd.DataFrame], ) -> pd.DataFrame: """ Read the provided mapping file into a ``pandas.Dataframe``. diff --git a/petab/measurements.py b/petab/measurements.py index f329f144..6ab35eb7 100644 --- a/petab/measurements.py +++ b/petab/measurements.py @@ -28,7 +28,7 @@ def get_measurement_df( - measurement_file: Union[None, str, Path, pd.DataFrame] + measurement_file: Union[None, str, Path, pd.DataFrame], ) -> pd.DataFrame: """ Read the provided measurement file into a ``pandas.Dataframe``. @@ -217,7 +217,6 @@ def create_measurement_df() -> pd.DataFrame: Returns: Created DataFrame """ - return pd.DataFrame( data={ OBSERVABLE_ID: [], diff --git a/petab/models/model.py b/petab/models/model.py index 29ebf495..b123efd9 100644 --- a/petab/models/model.py +++ b/petab/models/model.py @@ -3,7 +3,7 @@ import abc from pathlib import Path -from typing import Any, Iterable, Tuple +from typing import Any, Iterable class Model(abc.ABC): @@ -55,7 +55,7 @@ def get_parameter_value(self, id_: str) -> float: @abc.abstractmethod def get_free_parameter_ids_with_values( self, - ) -> Iterable[Tuple[str, float]]: + ) -> Iterable[tuple[str, float]]: """Get free model parameters along with their values Returns: @@ -106,7 +106,6 @@ def symbol_allowed_in_observable_formula(self, id_: str) -> bool: :returns: ``True``, if allowed, ``False`` otherwise """ - ... @abc.abstractmethod diff --git a/petab/observables.py b/petab/observables.py index 27cafab5..a0976cd7 100644 --- a/petab/observables.py +++ b/petab/observables.py @@ -24,7 +24,7 @@ def get_observable_df( - observable_file: Union[str, pd.DataFrame, Path, None] + observable_file: Union[str, pd.DataFrame, Path, None], ) -> Union[pd.DataFrame, None]: """ Read the provided observable file into a ``pandas.Dataframe``. @@ -191,7 +191,6 @@ def get_placeholders( List of placeholder parameters from observable table observableFormulas and noiseFormulas. """ - # collect placeholder parameters overwritten by # {observable,noise}Parameters placeholder_types = [] @@ -224,5 +223,4 @@ def create_observable_df() -> pd.DataFrame: Returns: Created DataFrame """ - return pd.DataFrame(data={col: [] for col in OBSERVABLE_DF_COLS}) diff --git a/petab/parameter_mapping.py b/petab/parameter_mapping.py index b4c6fcc4..4fa3115d 100644 --- a/petab/parameter_mapping.py +++ b/petab/parameter_mapping.py @@ -1,5 +1,6 @@ """Functions related to mapping parameter from model to parameter estimation -problem""" +problem +""" import logging import numbers @@ -234,7 +235,6 @@ def _map_condition(packed_args): For arguments see :py:func:`get_optimization_to_simulation_parameter_mapping`. """ - ( condition, measurement_df, @@ -564,7 +564,6 @@ def _apply_parameter_table( parameter_df: PEtab parameter table """ - if parameter_df is None: return @@ -626,8 +625,8 @@ def _perform_mapping_checks( allow_timepoint_specific_numeric_noise_parameters: bool = False, ) -> None: """Check for PEtab features which we can't account for during parameter - mapping.""" - + mapping. + """ if lint.measurement_table_has_timepoint_specific_mappings( measurement_df, allow_scalar_numeric_noise_parameters=allow_timepoint_specific_numeric_noise_parameters, # noqa: E251,E501 diff --git a/petab/parameters.py b/petab/parameters.py index 3e1e4fe4..62fd8980 100644 --- a/petab/parameters.py +++ b/petab/parameters.py @@ -45,7 +45,7 @@ def get_parameter_df( parameter_file: Union[ str, Path, pd.DataFrame, Iterable[Union[str, Path, pd.DataFrame]], None - ] + ], ) -> Union[pd.DataFrame, None]: """ Read the provided parameter file into a ``pandas.Dataframe``. @@ -301,19 +301,22 @@ def append_overrides(overrides): for formula_type, placeholder_sources in ( ( # Observable formulae - {'observables': True, 'noise': False}, + {"observables": True, "noise": False}, # can only contain observable placeholders - {'noise': False, 'observables': True} + {"noise": False, "observables": True}, ), ( # Noise formulae - {'observables': False, 'noise': True}, + {"observables": False, "noise": True}, # can contain noise and observable placeholders - {'noise': True, 'observables': True} + {"noise": True, "observables": True}, ), ): output_parameters = observables.get_output_parameters( - observable_df, model, mapping_df=mapping_df, **formula_type, + observable_df, + model, + mapping_df=mapping_df, + **formula_type, ) placeholders = observables.get_placeholders( observable_df, diff --git a/petab/problem.py b/petab/problem.py index 3acddbc4..c9154a4c 100644 --- a/petab/problem.py +++ b/petab/problem.py @@ -5,7 +5,7 @@ import tempfile from math import nan from pathlib import Path, PurePosixPath -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Union +from typing import TYPE_CHECKING, Iterable, Optional, Union from urllib.parse import unquote, urlparse, urlunparse from warnings import warn @@ -76,7 +76,7 @@ def __init__( visualization_df: pd.DataFrame = None, observable_df: pd.DataFrame = None, mapping_df: pd.DataFrame = None, - extensions_config: Dict = None, + extensions_config: dict = None, ): self.condition_df: Optional[pd.DataFrame] = condition_df self.measurement_df: Optional[pd.DataFrame] = measurement_df @@ -178,8 +178,8 @@ def from_files( ] = None, observable_files: Union[str, Path, Iterable[Union[str, Path]]] = None, model_id: str = None, - extensions_config: Dict = None, - ) -> "Problem": + extensions_config: dict = None, + ) -> Problem: """ Factory method to load model and tables from files. @@ -252,7 +252,7 @@ def from_files( ) @staticmethod - def from_yaml(yaml_config: Union[Dict, Path, str]) -> "Problem": + def from_yaml(yaml_config: Union[dict, Path, str]) -> Problem: """ Factory method to load model and tables as specified by YAML file. @@ -274,9 +274,7 @@ def from_yaml(yaml_config: Union[Dict, Path, str]) -> "Problem": ): # a regular file path string path_prefix = Path(yaml_path).parent - get_path = ( - lambda filename: path_prefix / filename - ) # noqa: E731 + get_path = lambda filename: path_prefix / filename # noqa: E731 else: # a URL # extract parent path from @@ -293,9 +291,7 @@ def from_yaml(yaml_config: Union[Dict, Path, str]) -> "Problem": ) ) # need "/" on windows, not "\" - get_path = ( - lambda filename: f"{path_prefix}/{filename}" - ) # noqa: E731 + get_path = lambda filename: f"{path_prefix}/{filename}" # noqa: E731 if yaml.is_composite_problem(yaml_config): raise ValueError( @@ -425,7 +421,7 @@ def from_yaml(yaml_config: Union[Dict, Path, str]) -> "Problem": ) @staticmethod - def from_combine(filename: Union[Path, str]) -> "Problem": + def from_combine(filename: Union[Path, str]) -> Problem: """Read PEtab COMBINE archive (http://co.mbine.org/documents/archive). See also :py:func:`petab.create_combine_archive`. @@ -687,7 +683,7 @@ def get_observable_ids(self): """ return list(self.observable_df.index) - def _apply_mask(self, v: List, free: bool = True, fixed: bool = True): + def _apply_mask(self, v: list, free: bool = True, fixed: bool = True): """Apply mask of only free or only fixed values. Parameters @@ -731,17 +727,17 @@ def get_x_ids(self, free: bool = True, fixed: bool = True): return self._apply_mask(v, free=free, fixed=fixed) @property - def x_ids(self) -> List[str]: + def x_ids(self) -> list[str]: """Parameter table parameter IDs""" return self.get_x_ids() @property - def x_free_ids(self) -> List[str]: + def x_free_ids(self) -> list[str]: """Parameter table parameter IDs, for free parameters.""" return self.get_x_ids(fixed=False) @property - def x_fixed_ids(self) -> List[str]: + def x_fixed_ids(self) -> list[str]: """Parameter table parameter IDs, for fixed parameters.""" return self.get_x_ids(free=False) @@ -777,35 +773,37 @@ def get_x_nominal( return self._apply_mask(v, free=free, fixed=fixed) @property - def x_nominal(self) -> List: + def x_nominal(self) -> list: """Parameter table nominal values""" return self.get_x_nominal() @property - def x_nominal_free(self) -> List: + def x_nominal_free(self) -> list: """Parameter table nominal values, for free parameters.""" return self.get_x_nominal(fixed=False) @property - def x_nominal_fixed(self) -> List: + def x_nominal_fixed(self) -> list: """Parameter table nominal values, for fixed parameters.""" return self.get_x_nominal(free=False) @property - def x_nominal_scaled(self) -> List: + def x_nominal_scaled(self) -> list: """Parameter table nominal values with applied parameter scaling""" return self.get_x_nominal(scaled=True) @property - def x_nominal_free_scaled(self) -> List: + def x_nominal_free_scaled(self) -> list: """Parameter table nominal values with applied parameter scaling, - for free parameters.""" + for free parameters. + """ return self.get_x_nominal(fixed=False, scaled=True) @property - def x_nominal_fixed_scaled(self) -> List: + def x_nominal_fixed_scaled(self) -> list: """Parameter table nominal values with applied parameter scaling, - for fixed parameters.""" + for fixed parameters. + """ return self.get_x_nominal(free=False, scaled=True) def get_lb( @@ -836,12 +834,12 @@ def get_lb( return self._apply_mask(v, free=free, fixed=fixed) @property - def lb(self) -> List: + def lb(self) -> list: """Parameter table lower bounds.""" return self.get_lb() @property - def lb_scaled(self) -> List: + def lb_scaled(self) -> list: """Parameter table lower bounds with applied parameter scaling""" return self.get_lb(scaled=True) @@ -873,23 +871,23 @@ def get_ub( return self._apply_mask(v, free=free, fixed=fixed) @property - def ub(self) -> List: + def ub(self) -> list: """Parameter table upper bounds""" return self.get_ub() @property - def ub_scaled(self) -> List: + def ub_scaled(self) -> list: """Parameter table upper bounds with applied parameter scaling""" return self.get_ub(scaled=True) @property - def x_free_indices(self) -> List[int]: + def x_free_indices(self) -> list[int]: """Parameter table estimated parameter indices.""" estimated = list(self.parameter_df[ESTIMATE]) return [j for j, val in enumerate(estimated) if val != 0] @property - def x_fixed_indices(self) -> List[int]: + def x_fixed_indices(self) -> list[int]: """Parameter table non-estimated parameter indices.""" estimated = list(self.parameter_df[ESTIMATE]) return [j for j, val in enumerate(estimated) if val == 0] @@ -941,7 +939,7 @@ def sample_parameter_startpoints(self, n_starts: int = 100, **kwargs): def sample_parameter_startpoints_dict( self, n_starts: int = 100 - ) -> List[Dict[str, float]]: + ) -> list[dict[str, float]]: """Create dictionaries with starting points for optimization See also :py:func:`petab.sample_parameter_startpoints`. @@ -959,8 +957,8 @@ def sample_parameter_startpoints_dict( def unscale_parameters( self, - x_dict: Dict[str, float], - ) -> Dict[str, float]: + x_dict: dict[str, float], + ) -> dict[str, float]: """Unscale parameter values. Parameters @@ -983,8 +981,8 @@ def unscale_parameters( def scale_parameters( self, - x_dict: Dict[str, float], - ) -> Dict[str, float]: + x_dict: dict[str, float], + ) -> dict[str, float]: """Scale parameter values. Parameters diff --git a/petab/sbml.py b/petab/sbml.py index 7b5fec16..3f3e0769 100644 --- a/petab/sbml.py +++ b/petab/sbml.py @@ -39,7 +39,6 @@ def is_sbml_consistent( Returns: ``False`` if problems were detected, otherwise ``True`` """ - if not check_units: sbml_document.setConsistencyChecks( libsbml.LIBSBML_CAT_UNITS_CONSISTENCY, False @@ -202,7 +201,6 @@ def load_sbml_from_string( :param sbml_string: Model as XML string :return: The SBML document, model and reader """ - sbml_reader = libsbml.SBMLReader() sbml_document = sbml_reader.readSBMLFromString(sbml_string) sbml_model = sbml_document.getModel() diff --git a/petab/simplify.py b/petab/simplify.py index 39a58ab0..5946be05 100644 --- a/petab/simplify.py +++ b/petab/simplify.py @@ -63,7 +63,8 @@ def simplify_problem(problem: Problem): def condition_parameters_to_parameter_table(problem: Problem): """Move parameters from the condition table to the parameters table, if - the same parameter value is used for all conditions.""" + the same parameter value is used for all conditions. + """ if ( problem.condition_df is None or problem.condition_df.empty diff --git a/petab/visualize/__init__.py b/petab/visualize/__init__.py index dea23bb1..924be86a 100644 --- a/petab/visualize/__init__.py +++ b/petab/visualize/__init__.py @@ -6,15 +6,14 @@ ``import petab.visualize``. """ +# ruff: noqa: F401 import importlib.util -mpl_spec = importlib.util.find_spec("matplotlib") - from .plotting import DataProvider, Figure __all__ = ["DataProvider", "Figure"] -if mpl_spec is not None: +if importlib.util.find_spec("matplotlib") is not None: from .plot_data_and_simulation import ( plot_problem, plot_with_vis_spec, diff --git a/petab/visualize/cli.py b/petab/visualize/cli.py index 269461ad..d25a6785 100644 --- a/petab/visualize/cli.py +++ b/petab/visualize/cli.py @@ -10,7 +10,6 @@ def _parse_cli_args(): """Parse command-line arguments.""" - parser = argparse.ArgumentParser( description="Create PEtab visualizations." ) diff --git a/petab/visualize/data_overview.py b/petab/visualize/data_overview.py index c7d9fd41..2fd2404e 100644 --- a/petab/visualize/data_overview.py +++ b/petab/visualize/data_overview.py @@ -24,7 +24,6 @@ def create_report( model_name: Name of the model, used for file name for report output_path: Output directory """ - template_dir = Path(__file__).absolute().parent / "templates" output_path = Path(output_path) template_file = "report.html" @@ -59,7 +58,6 @@ def get_data_per_observable(measurement_df: pd.DataFrame) -> pd.DataFrame: Returns: Pivot table with number of data points per observable and condition """ - my_measurements = measurement_df.copy() index = [SIMULATION_CONDITION_ID] diff --git a/petab/visualize/helper_functions.py b/petab/visualize/helper_functions.py index 7d6fb02f..127afdde 100644 --- a/petab/visualize/helper_functions.py +++ b/petab/visualize/helper_functions.py @@ -33,7 +33,6 @@ def generate_dataset_id_col(exp_data: pd.DataFrame) -> List[str]: A list with generated datasetIds for each entry in the measurement (simulation) DataFrame """ - # create a column of dummy datasetIDs and legend entries: preallocate dataset_id_column = [] diff --git a/petab/visualize/plot_data_and_simulation.py b/petab/visualize/plot_data_and_simulation.py index 6d254c50..0353e71a 100644 --- a/petab/visualize/plot_data_and_simulation.py +++ b/petab/visualize/plot_data_and_simulation.py @@ -1,5 +1,6 @@ """Functions for plotting PEtab measurement files and simulation results in -the same format.""" +the same format. +""" from typing import Dict, List, Optional, Union @@ -58,7 +59,6 @@ def plot_with_vis_spec( ax: Axis object of the created plot. None: In case subplots are saved to a file. """ - if measurements_df is None and simulations_df is None: raise TypeError( "Not enough arguments. Either measurements_data " @@ -133,7 +133,6 @@ def plot_without_vis_spec( ax: Axis object of the created plot. None: In case subplots are saved to a file. """ - if measurements_df is None and simulations_df is None: raise TypeError( "Not enough arguments. Either measurements_data " @@ -203,7 +202,6 @@ def plot_problem( ax: Axis object of the created plot. None: In case subplots are saved to a file. """ - if petab_problem.visualization_df is not None: return plot_with_vis_spec( petab_problem.visualization_df, diff --git a/petab/visualize/plot_residuals.py b/petab/visualize/plot_residuals.py index 44418c9d..45a1e5a1 100644 --- a/petab/visualize/plot_residuals.py +++ b/petab/visualize/plot_residuals.py @@ -155,7 +155,6 @@ def plot_goodness_of_fit( ------- ax: Axis object of the created plot. """ - if isinstance(simulations_df, (str, Path)): simulations_df = get_simulation_df(simulations_df) diff --git a/petab/visualize/plotter.py b/petab/visualize/plotter.py index b6ea702a..68142cd2 100644 --- a/petab/visualize/plotter.py +++ b/petab/visualize/plotter.py @@ -22,7 +22,6 @@ class Plotter(ABC): Attributes ---------- - figure: Figure instance that serves as a markup for the figure that should be generated @@ -125,12 +124,7 @@ def generate_lineplot( ) # sorts according to ascending order of conditions cond, replicates = zip( - *sorted( - zip( - measurements_to_plot.conditions, - replicates - ) - ) + *sorted(zip(measurements_to_plot.conditions, replicates)) ) replicates = np.stack(replicates) @@ -448,7 +442,7 @@ def generate_subplot( # show 'e' as basis not 2.7... in natural log scale cases def ticks(y, _): - return r"$e^{{{:.0f}}}$".format(np.log(y)) + return rf"$e^{{{np.log(y):.0f}}}$" if subplot.xScale == LOG: ax.xaxis.set_major_formatter(mtick.FuncFormatter(ticks)) @@ -547,7 +541,6 @@ def _square_plot_equal_ranges( ------- Updated axis object. """ - ax.axis("square") if lim is None: diff --git a/petab/visualize/plotting.py b/petab/visualize/plotting.py index c257387f..4d60ad61 100644 --- a/petab/visualize/plotting.py +++ b/petab/visualize/plotting.py @@ -122,7 +122,6 @@ def __init__(self, plot_settings: dict): plot_settings: A plot spec for one dataplot (only VISUALIZATION_DF_SINGLE_PLOT_LEVEL_COLS) """ - for key, val in plot_settings.items(): setattr(self, key, val) @@ -307,7 +306,6 @@ def __init__( size: Figure size title: Figure title """ - # TODO: Isensee measurements table in doc/examples doesn't correspond # to documentation: observableTransformation and # noiseDistribution columns replicateId problem @@ -351,7 +349,6 @@ def set_axes_limits( ylim: Y axis limits. """ - for subplot in self.subplots: subplot.set_axes_limits(xlim, ylim) @@ -496,7 +493,6 @@ def _get_independent_var_values( uni_condition_id """ - indep_var = getattr(dataplot, X_VALUES) dataset_id = getattr(dataplot, DATASET_ID) @@ -726,7 +722,6 @@ def create_subplot( Returns ------- - Subplot """ subplot_columns = [ @@ -766,7 +761,6 @@ def parse_from_vis_spec( Returns ------- - A figure template with visualization settings and a data provider """ # import visualization specification, if file was specified @@ -858,7 +852,6 @@ def parse_from_id_list( A figure template with visualization settings and a data provider """ - if ids_per_plot is None: # this is the default case. If no grouping is specified, # all observables are plotted. One observable per plot. @@ -893,7 +886,6 @@ def _add_dataset_id_col(self) -> None: Add dataset_id column to the measurement table and simulations table (possibly overwrite). """ - if self.measurements_data is not None: if DATASET_ID in self.measurements_data.columns: self.measurements_data = self.measurements_data.drop( @@ -938,7 +930,6 @@ def _get_vis_spec_dependent_columns_dict( A dictionary with values for columns PLOT_ID, DATASET_ID, \ LEGEND_ENTRY, Y_VALUES for visualization specification. """ - if group_by != "dataset": dataset_id_list = create_dataset_id_list_new( self._data_df, group_by, id_list diff --git a/petab/yaml.py b/petab/yaml.py index 1a0ee079..364413f5 100644 --- a/petab/yaml.py +++ b/petab/yaml.py @@ -46,7 +46,6 @@ def validate( file if a filename was provided for ``yaml_config`` or the current working directory. """ - validate_yaml_syntax(yaml_config) validate_yaml_semantics(yaml_config=yaml_config, path_prefix=path_prefix) @@ -157,7 +156,6 @@ def load_yaml(yaml_config: Union[Dict, Path, str]) -> Dict: The unmodified dictionary if ``yaml_config`` was dictionary. Otherwise the parsed the YAML file. """ - # already parsed? all PEtab problem yaml files are dictionaries if isinstance(yaml_config, dict): return yaml_config @@ -173,7 +171,6 @@ def is_composite_problem(yaml_config: Union[Dict, str, Path]) -> bool: Arguments: yaml_config: PEtab configuration as dictionary or YAML file name """ - yaml_config = load_yaml(yaml_config) return len(yaml_config[PROBLEMS]) > 1 diff --git a/pyproject.toml b/pyproject.toml index 104f49e5..70fcb29f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,5 +5,21 @@ requires = [ ] build-backend = "setuptools.build_meta" -[tool.black] -line-length = 80 +[tool.ruff] +line-length = 79 +lint.extend-select = [ + "F", # Pyflakes + "I", # isort + "S", # flake8-bandit + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "T20", # flake8-print + "W", # pycodestyle Warnings + "E", # pycodestyle Errors + "UP", # pyupgrade + # TODO: "ANN001", "D", # pydocstyle (PEP 257) +] +lint.extend-ignore = ["F403", "F405", "S101"] + +[tool.ruff.lint.pydocstyle] +convention = "pep257" diff --git a/setup.py b/setup.py index 8f7f2cd0..aeb541bb 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,6 @@ def read(fname): def absolute_links(txt): """Replace relative petab github links by absolute links.""" - raw_base = ( "(https://raw.githubusercontent.com/petab-dev/libpetab-python/master/" ) diff --git a/tests/test_combine.py b/tests/test_combine.py index dbe54c90..398b2737 100644 --- a/tests/test_combine.py +++ b/tests/test_combine.py @@ -15,7 +15,6 @@ def test_combine_archive(): """Test `create_combine_archive` and `Problem.from_combine`""" - # Create test files import simplesbml diff --git a/tests/test_lint.py b/tests/test_lint.py index e6919f14..4d250118 100644 --- a/tests/test_lint.py +++ b/tests/test_lint.py @@ -425,7 +425,8 @@ def test_assert_parameter_prior_parameters_are_valid(): def test_petablint_succeeds(): """Run petablint and ensure we exit successfully for a file that should - contain no errors""" + contain no errors + """ dir_isensee = "../doc/example/example_Isensee/" dir_fujita = "../doc/example/example_Fujita/" @@ -550,7 +551,6 @@ def test_check_condition_df(): def test_check_ids(): """Test check_ids""" - lint.check_ids(["a1", "_1"]) with pytest.raises(ValueError): @@ -559,7 +559,6 @@ def test_check_ids(): def test_check_parameter_df(): """Check lint.check_parameter_df.""" - parameter_df = pd.DataFrame( { PARAMETER_ID: ["par0", "par1", "par2"], @@ -586,7 +585,6 @@ def test_check_parameter_df(): def test_check_observable_df(): """Check that we correctly detect errors in observable table""" - observable_df = pd.DataFrame( data={ OBSERVABLE_ID: ["obs1", "obs2"], diff --git a/tests/test_measurements.py b/tests/test_measurements.py index ba6a8495..ac3e59a3 100644 --- a/tests/test_measurements.py +++ b/tests/test_measurements.py @@ -77,7 +77,6 @@ def test_measurements_have_replicates(): def test_get_simulation_conditions(): """Test get_simulation_conditions""" - # only simulation condition measurement_df = pd.DataFrame( data={ diff --git a/tests/test_observables.py b/tests/test_observables.py index 06da3950..2897f86f 100644 --- a/tests/test_observables.py +++ b/tests/test_observables.py @@ -114,7 +114,6 @@ def test_get_output_parameters(): def test_get_formula_placeholders(): """Test get_formula_placeholders""" - # no placeholder assert petab.get_formula_placeholders("1.0", "any", "observable") == [] diff --git a/tests/test_parameter_mapping.py b/tests/test_parameter_mapping.py index b8f91288..f1db8c02 100644 --- a/tests/test_parameter_mapping.py +++ b/tests/test_parameter_mapping.py @@ -15,7 +15,7 @@ ] -class TestGetSimulationToOptimizationParameterMapping(object): +class TestGetSimulationToOptimizationParameterMapping: @staticmethod def test_no_condition_specific(condition_df_2_conditions): # Trivial case - no condition-specific parameters @@ -544,8 +544,8 @@ def test_parameterized_condition_table_changed_scale(): - a log10 parameter to be estimated (condition 1) - lin parameter not estimated (condition2) - log10 parameter not estimated (condition 3) - - constant override (condition 4)""" - + - constant override (condition 4) + """ # overridden parameter overridee_id = "overridee" diff --git a/tests/test_petab.py b/tests/test_petab.py index 8c44bd53..1003761a 100644 --- a/tests/test_petab.py +++ b/tests/test_petab.py @@ -766,7 +766,6 @@ def test_to_files(petab_problem): # pylint: disable=W0621 def test_load_remote(): """Test loading remote files""" - yaml_url = ( "https://raw.githubusercontent.com/PEtab-dev/petab_test_suite" "/main/petabtests/cases/v1.0.0/sbml/0001/_0001.yaml" @@ -797,7 +796,8 @@ def test_problem_from_yaml_v1_empty(): def test_problem_from_yaml_v1_multiple_files(): """Test loading PEtab version 1 yaml with multiple condition / measurement - / observable files""" + / observable files + """ yaml_config = """ format_version: 1 parameter_file: diff --git a/tests/test_simulate.py b/tests/test_simulate.py index 068a3a71..3c30b15f 100644 --- a/tests/test_simulate.py +++ b/tests/test_simulate.py @@ -138,7 +138,6 @@ def test_zero_bounded(petab_problem): def test_add_noise(petab_problem): """Test the noise generating method.""" - tested_noise_distributions = {"normal", "laplace"} assert set(petab.C.NOISE_MODELS) == tested_noise_distributions, ( "The noise generation methods have only been tested for " From b7d613004216090121f2016db28f4645251f5933 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 16 Apr 2024 15:22:32 +0200 Subject: [PATCH 03/14] Doc: Exclude private members (#253) --- doc/conf.py | 1 - petab/visualize/plotter.py | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index a309a3df..bf5cc489 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -74,7 +74,6 @@ "members": None, "imported-members": ["petab"], "inherited-members": None, - "private-members": None, "show-inheritance": None, } diff --git a/petab/visualize/plotter.py b/petab/visualize/plotter.py index 68142cd2..2114c364 100644 --- a/petab/visualize/plotter.py +++ b/petab/visualize/plotter.py @@ -55,11 +55,12 @@ def _error_column_for_plot_type_data(plot_type_data: str) -> Optional[str]: Parameters ---------- - plot_type_data: PEtab plotTypeData value (the way replicates - should be handled) + plot_type_data: PEtab plotTypeData value (the way replicates should be + handled) + Returns ------- - Name of corresponding column + Name of corresponding column """ if plot_type_data == MEAN_AND_SD: return "sd" From aa8cd78966d74a6cf25063f7523e555099020139 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 16 Apr 2024 16:12:59 +0200 Subject: [PATCH 04/14] maint: fix code style, check on gha (#252) Follow-up to #251 * Manual changes for compatibility with selected ruff rules * run pre-commit hooks on GHA instead of flake8 * run tests on python3.12 --- .github/workflows/ci_tests.yml | 2 +- doc/conf.py | 4 +- petab/calculate.py | 2 +- petab/conditions.py | 2 +- petab/core.py | 13 ++--- petab/measurements.py | 4 +- petab/models/model.py | 1 + petab/observables.py | 2 +- petab/parameters.py | 16 ++++-- petab/problem.py | 66 +++++++++++------------ petab/sampling.py | 3 +- petab/sbml.py | 11 ++-- petab/simulate.py | 11 ++-- petab/visualize/data_overview.py | 2 +- petab/visualize/plotting.py | 9 ++-- pyproject.toml | 3 ++ setup.py | 4 +- tests/test_lint.py | 6 +-- tests/test_parameters.py | 6 +-- tests/test_petab.py | 26 +++++---- tests/test_simulate.py | 5 +- tests/test_visualization.py | 2 +- tests/test_visualization_data_overview.py | 3 +- tox.ini | 2 +- 24 files changed, 117 insertions(+), 88 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index dc96c438..30cb098f 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -11,7 +11,7 @@ jobs: strategy: matrix: platform: [windows-latest, macos-latest, ubuntu-latest] - python-version: ["3.9", "3.11"] + python-version: ["3.9", "3.12"] runs-on: ${{ matrix.platform }} steps: diff --git a/doc/conf.py b/doc/conf.py index bf5cc489..4dbd3009 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -20,7 +20,7 @@ # -- Project information ----------------------------------------------------- project = "libpetab-python" -copyright = "2018-2023, the PEtab developers" +copyright = "2018-2024, the PEtab developers" author = "PEtab developers" # The full version, including alpha/beta/rc tags @@ -29,7 +29,7 @@ # -- Custom pre-build -------------------------------------------------------- -subprocess.run(["python", "md2rst.py"]) +subprocess.run([sys.executable, "md2rst.py"]) # noqa: S603 # -- General configuration --------------------------------------------------- diff --git a/petab/calculate.py b/petab/calculate.py index 4ca3240b..ce402241 100644 --- a/petab/calculate.py +++ b/petab/calculate.py @@ -336,7 +336,7 @@ def calculate_llh_for_table( noise_formulas = get_symbolic_noise_formulas(observable_df) # iterate over measurements, find corresponding simulations - for irow, row in measurement_df.iterrows(): + for _, row in measurement_df.iterrows(): measurement = row[MEASUREMENT] # look up in simulation df diff --git a/petab/conditions.py b/petab/conditions.py index 4e88e925..86c5f7d3 100644 --- a/petab/conditions.py +++ b/petab/conditions.py @@ -47,7 +47,7 @@ def get_condition_df( except KeyError: raise KeyError( f"Condition table missing mandatory field {CONDITION_ID}." - ) + ) from None return condition_file diff --git a/petab/core.py b/petab/core.py index 57d548f2..97b002d2 100644 --- a/petab/core.py +++ b/petab/core.py @@ -102,7 +102,8 @@ def get_visualization_df( except pd.errors.EmptyDataError: warn( "Visualization table is empty. Defaults will be used. " - "Refer to the documentation for details." + "Refer to the documentation for details.", + stacklevel=2, ) vis_spec = pd.DataFrame() return vis_spec @@ -226,9 +227,9 @@ def get_flattened_id_mappings( mappings[OBSERVABLE_ID][observable_replacement_id] = observable_id - for field, hyperparameter_type, target in [ - (NOISE_PARAMETERS, "noiseParameter", NOISE_FORMULA), - (OBSERVABLE_PARAMETERS, "observableParameter", OBSERVABLE_FORMULA), + for field, hyperparameter_type in [ + (NOISE_PARAMETERS, "noiseParameter"), + (OBSERVABLE_PARAMETERS, "observableParameter"), ]: if field in measurements: mappings[field][ @@ -432,11 +433,11 @@ def create_combine_archive( # other SWIG interfaces try: import libcombine - except ImportError: + except ImportError as err: raise ImportError( "To use PEtab's COMBINE functionality, libcombine " "(python-libcombine) must be installed." - ) + ) from err def _add_file_metadata(location: str, description: str = ""): """Add metadata to the added file""" diff --git a/petab/measurements.py b/petab/measurements.py index 6ab35eb7..caa32047 100644 --- a/petab/measurements.py +++ b/petab/measurements.py @@ -321,7 +321,7 @@ def assert_overrides_match_parameter_count( f"Mismatch of noise parameter overrides in:\n{row}\n" f"Expected {expected} but got {len(replacements)}" ) - except KeyError: + except KeyError as err: # no overrides defined, but a numerical sigma can be provided # anyways if len(replacements) != 1 or not isinstance( @@ -332,7 +332,7 @@ def assert_overrides_match_parameter_count( f"for observable {row[OBSERVABLE_ID]}, but parameter ID " "or multiple overrides were specified in the " "noiseParameters column." - ) + ) from err def measurement_is_at_steady_state(time: float) -> bool: diff --git a/petab/models/model.py b/petab/models/model.py index b123efd9..5d2f63ad 100644 --- a/petab/models/model.py +++ b/petab/models/model.py @@ -9,6 +9,7 @@ class Model(abc.ABC): """Base class for wrappers for any PEtab-supported model type""" + @abc.abstractmethod def __init__(self): ... diff --git a/petab/observables.py b/petab/observables.py index a0976cd7..496e7bd8 100644 --- a/petab/observables.py +++ b/petab/observables.py @@ -55,7 +55,7 @@ def get_observable_df( except KeyError: raise KeyError( f"Observable table missing mandatory field {OBSERVABLE_ID}." - ) + ) from None return observable_file diff --git a/petab/parameters.py b/petab/parameters.py index 62fd8980..ff4aeda8 100644 --- a/petab/parameters.py +++ b/petab/parameters.py @@ -447,7 +447,8 @@ def get_priors_from_df( Arguments: parameter_df: PEtab parameter table mode: ``'initialization'`` or ``'objective'`` - parameter_ids: A sequence of parameter IDs for which to sample starting points. + parameter_ids: A sequence of parameter IDs for which to sample starting + points. For subsetting or reordering the parameters. Defaults to all estimated parameters. @@ -463,7 +464,8 @@ def get_priors_from_df( except KeyError as e: missing_ids = set(parameter_ids) - set(par_to_estimate.index) raise KeyError( - f"Parameter table does not contain estimated parameter(s) {missing_ids}." + "Parameter table does not contain estimated parameter(s) " + f"{missing_ids}." ) from e prior_list = [] @@ -567,7 +569,10 @@ def map_scale( """ if isinstance(scale_strs, str): scale_strs = [scale_strs] * len(parameters) - return map(lambda x: scale(x[0], x[1]), zip(parameters, scale_strs)) + return ( + scale(par_val, scale_str) + for par_val, scale_str in zip(parameters, scale_strs) + ) def map_unscale( @@ -588,7 +593,10 @@ def map_unscale( """ if isinstance(scale_strs, str): scale_strs = [scale_strs] * len(parameters) - return map(lambda x: unscale(x[0], x[1]), zip(parameters, scale_strs)) + return ( + unscale(par_val, scale_str) + for par_val, scale_str in zip(parameters, scale_strs) + ) def normalize_parameter_df(parameter_df: pd.DataFrame) -> pd.DataFrame: diff --git a/petab/problem.py b/petab/problem.py index c9154a4c..6c5307b2 100644 --- a/petab/problem.py +++ b/petab/problem.py @@ -5,7 +5,7 @@ import tempfile from math import nan from pathlib import Path, PurePosixPath -from typing import TYPE_CHECKING, Iterable, Optional, Union +from typing import TYPE_CHECKING, Iterable from urllib.parse import unquote, urlparse, urlunparse from warnings import warn @@ -78,12 +78,12 @@ def __init__( mapping_df: pd.DataFrame = None, extensions_config: dict = None, ): - self.condition_df: Optional[pd.DataFrame] = condition_df - self.measurement_df: Optional[pd.DataFrame] = measurement_df - self.parameter_df: Optional[pd.DataFrame] = parameter_df - self.visualization_df: Optional[pd.DataFrame] = visualization_df - self.observable_df: Optional[pd.DataFrame] = observable_df - self.mapping_df: Optional[pd.DataFrame] = mapping_df + self.condition_df: pd.DataFrame | None = condition_df + self.measurement_df: pd.DataFrame | None = measurement_df + self.parameter_df: pd.DataFrame | None = parameter_df + self.visualization_df: pd.DataFrame | None = visualization_df + self.observable_df: pd.DataFrame | None = observable_df + self.mapping_df: pd.DataFrame | None = mapping_df if any( (sbml_model, sbml_document, sbml_reader), @@ -109,7 +109,7 @@ def __init__( model_id=model_id, ) - self.model: Optional[Model] = model + self.model: Model | None = model self.extensions_config = extensions_config or {} def __getattr__(self, name): @@ -169,14 +169,12 @@ def __str__(self): @staticmethod def from_files( - sbml_file: Union[str, Path] = None, - condition_file: Union[str, Path, Iterable[Union[str, Path]]] = None, - measurement_file: Union[str, Path, Iterable[Union[str, Path]]] = None, - parameter_file: Union[str, Path, Iterable[Union[str, Path]]] = None, - visualization_files: Union[ - str, Path, Iterable[Union[str, Path]] - ] = None, - observable_files: Union[str, Path, Iterable[Union[str, Path]]] = None, + sbml_file: str | Path = None, + condition_file: str | Path | Iterable[str | Path] = None, + measurement_file: str | Path | Iterable[str | Path] = None, + parameter_file: str | Path | Iterable[str | Path] = None, + visualization_files: str | Path | Iterable[str | Path] = None, + observable_files: str | Path | Iterable[str | Path] = None, model_id: str = None, extensions_config: dict = None, ) -> Problem: @@ -252,7 +250,7 @@ def from_files( ) @staticmethod - def from_yaml(yaml_config: Union[dict, Path, str]) -> Problem: + def from_yaml(yaml_config: dict | Path | str) -> Problem: """ Factory method to load model and tables as specified by YAML file. @@ -308,7 +306,7 @@ def from_yaml(yaml_config: Union[dict, Path, str]) -> Problem: f"{format_version.__format_version__}." ) if yaml_config[FORMAT_VERSION] == "2.0.0": - warn("Support for PEtab2.0 is experimental!") + warn("Support for PEtab2.0 is experimental!", stacklevel=2) problem0 = yaml_config["problems"][0] @@ -421,7 +419,7 @@ def from_yaml(yaml_config: Union[dict, Path, str]) -> Problem: ) @staticmethod - def from_combine(filename: Union[Path, str]) -> Problem: + def from_combine(filename: Path | str) -> Problem: """Read PEtab COMBINE archive (http://co.mbine.org/documents/archive). See also :py:func:`petab.create_combine_archive`. @@ -444,8 +442,7 @@ def from_combine(filename: Union[Path, str]) -> Problem: archive = libcombine.CombineArchive() if archive.initializeFromArchive(str(filename)) is None: - print(f"Invalid Combine Archive: {filename}") - return None + raise ValueError(f"Invalid Combine Archive: {filename}") with tempfile.TemporaryDirectory() as tmpdirname: archive.extractTo(tmpdirname) @@ -458,7 +455,7 @@ def from_combine(filename: Union[Path, str]) -> Problem: def to_files_generic( self, - prefix_path: Union[str, Path], + prefix_path: str | Path, ) -> str: """Save a PEtab problem to generic file names. @@ -510,17 +507,17 @@ def to_files_generic( def to_files( self, - sbml_file: Union[None, str, Path] = None, - condition_file: Union[None, str, Path] = None, - measurement_file: Union[None, str, Path] = None, - parameter_file: Union[None, str, Path] = None, - visualization_file: Union[None, str, Path] = None, - observable_file: Union[None, str, Path] = None, - yaml_file: Union[None, str, Path] = None, - prefix_path: Union[None, str, Path] = None, + sbml_file: None | str | Path = None, + condition_file: None | str | Path = None, + measurement_file: None | str | Path = None, + parameter_file: None | str | Path = None, + visualization_file: None | str | Path = None, + observable_file: None | str | Path = None, + yaml_file: None | str | Path = None, + prefix_path: None | str | Path = None, relative_paths: bool = True, - model_file: Union[None, str, Path] = None, - mapping_file: Union[None, str, Path] = None, + model_file: None | str | Path = None, + mapping_file: None | str | Path = None, ) -> None: """ Write PEtab tables to files for this problem @@ -573,7 +570,7 @@ def to_files( if prefix_path is not None: prefix_path = Path(prefix_path) - def add_prefix(path0: Union[None, str, Path]) -> str: + def add_prefix(path0: None | str | Path) -> str: return path0 if path0 is None else str(prefix_path / path0) model_file = add_prefix(model_file) @@ -913,7 +910,7 @@ def get_optimization_to_simulation_parameter_mapping(self, **kwargs): ) ) - def create_parameter_df(self, *args, **kwargs): + def create_parameter_df(self, **kwargs): """Create a new PEtab parameter table See :py:func:`create_parameter_df`. @@ -924,7 +921,6 @@ def create_parameter_df(self, *args, **kwargs): observable_df=self.observable_df, measurement_df=self.measurement_df, mapping_df=self.mapping_df, - *args, **kwargs, ) diff --git a/petab/sampling.py b/petab/sampling.py index ca4cfbd1..466c5284 100644 --- a/petab/sampling.py +++ b/petab/sampling.py @@ -118,7 +118,8 @@ def sample_parameter_startpoints( parameter_df: PEtab parameter DataFrame n_starts: Number of points to be sampled seed: Random number generator seed (see :func:`numpy.random.seed`) - parameter_ids: A sequence of parameter IDs for which to sample starting points. + parameter_ids: A sequence of parameter IDs for which to sample starting + points. For subsetting or reordering the parameters. Defaults to all estimated parameters. diff --git a/petab/sbml.py b/petab/sbml.py index 3f3e0769..b177478e 100644 --- a/petab/sbml.py +++ b/petab/sbml.py @@ -103,15 +103,15 @@ def globalize_parameters( creating global parameters """ warn( - "This function will be removed in future releases.", DeprecationWarning + "This function will be removed in future releases.", + DeprecationWarning, + stacklevel=2, ) for reaction in sbml_model.getListOfReactions(): law = reaction.getKineticLaw() # copy first so we can delete in the following loop - local_parameters = list( - local_parameter for local_parameter in law.getListOfParameters() - ) + local_parameters = list(law.getListOfParameters()) for lp in local_parameters: if prepend_reaction_id: parameter_id = f"{reaction.getId()}_{lp.getId()}" @@ -300,7 +300,8 @@ def remove_rules(target_id: str): if sbml_model.removeRuleByVariable(target_id): warn( "An SBML rule was removed to set the component " - f"{target_id} to a constant value." + f"{target_id} to a constant value.", + stacklevel=2, ) sbml_model.removeInitialAssignment(target_id) diff --git a/petab/simulate.py b/petab/simulate.py index 560aa073..59aa46cf 100644 --- a/petab/simulate.py +++ b/petab/simulate.py @@ -5,6 +5,7 @@ import shutil import tempfile from typing import Dict, Optional, Union +from warnings import warn import numpy as np import pandas as pd @@ -90,15 +91,17 @@ def remove_working_dir(self, force: bool = False, **kwargs) -> None: if force or self.temporary_working_dir: shutil.rmtree(self.working_dir, **kwargs) if self.working_dir.is_dir(): - print( + warn( "Failed to remove the working directory: " - + str(self.working_dir) + + str(self.working_dir), + stacklevel=2, ) else: - print( + warn( "By default, specified working directories are not removed. " "Please call this method with `force=True`, or manually " - f"delete the working directory: {self.working_dir}" + f"delete the working directory: {self.working_dir}", + stacklevel=2, ) @abc.abstractmethod diff --git a/petab/visualize/data_overview.py b/petab/visualize/data_overview.py index 2fd2404e..a327d655 100644 --- a/petab/visualize/data_overview.py +++ b/petab/visualize/data_overview.py @@ -35,7 +35,7 @@ def create_report( import jinja2 template_loader = jinja2.FileSystemLoader(searchpath=template_dir) - template_env = jinja2.Environment(loader=template_loader) + template_env = jinja2.Environment(loader=template_loader, autoescape=True) template = template_env.get_template(template_file) # Render and save diff --git a/petab/visualize/plotting.py b/petab/visualize/plotting.py index 4d60ad61..e1f874ce 100644 --- a/petab/visualize/plotting.py +++ b/petab/visualize/plotting.py @@ -213,7 +213,8 @@ def from_df( f"For {PLOT_ID} {plot_id} in column " f"{col} contradictory settings ({entry})" f". Proceeding with first entry " - f"({entry[0]})." + f"({entry[0]}).", + stacklevel=2, ) entry = entry[0] @@ -248,7 +249,8 @@ def from_df( f"Column {col} cannot be used to specify subplot" f", only settings from the following columns can" f" be used:" - + ", ".join(VISUALIZATION_DF_SUBPLOT_LEVEL_COLS) + + ", ".join(VISUALIZATION_DF_SUBPLOT_LEVEL_COLS), + stacklevel=2, ) return cls(plot_id, vis_spec_dict, dataplots) @@ -371,7 +373,8 @@ def save_to_tsv(self, output_file_path: str = "visuSpec.tsv") -> None: warnings.warn( f"Note: please check that {DATASET_ID} column " f"corresponds to {DATASET_ID} column in Measurement " - f"(Simulation) table." + f"(Simulation) table.", + stacklevel=2, ) visu_dict = {} diff --git a/pyproject.toml b/pyproject.toml index 70fcb29f..5fcdd9e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,3 +23,6 @@ lint.extend-ignore = ["F403", "F405", "S101"] [tool.ruff.lint.pydocstyle] convention = "pep257" + +[tool.ruff.lint.per-file-ignores] +"tests/*" = ["T201"] diff --git a/setup.py b/setup.py index aeb541bb..7799f18b 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ def absolute_links(txt): __version__ = "" version_file = os.path.join("petab", "version.py") # sets __version__ -exec(read(version_file)) # pylint: disable=W0122 # nosec +exec(read(version_file)) # pylint: disable=W0122 # nosec # noqa: S102 ENTRY_POINTS = { "console_scripts": [ @@ -83,7 +83,7 @@ def absolute_links(txt): "pysb", ], "quality": [ - "flake8>=3.8.3", + "pre-commit", ], "reports": [ # https://github.com/spatialaudio/nbsphinx/issues/641 diff --git a/tests/test_lint.py b/tests/test_lint.py index 4d250118..cc99f71d 100644 --- a/tests/test_lint.py +++ b/tests/test_lint.py @@ -435,18 +435,18 @@ def test_petablint_succeeds(): measurement_file = os.path.join( script_path, dir_isensee, "Isensee_measurementData.tsv" ) - result = subprocess.run(["petablint", "-m", measurement_file]) + result = subprocess.run(["petablint", "-m", measurement_file]) # noqa: S603,S607 assert result.returncode == 0 # run with yaml yaml_file = os.path.join(script_path, dir_fujita, "Fujita.yaml") - result = subprocess.run(["petablint", "-v", "-y", yaml_file]) + result = subprocess.run(["petablint", "-v", "-y", yaml_file]) # noqa: S603,S607 assert result.returncode == 0 parameter_file = os.path.join( script_path, dir_fujita, "Fujita_parameters.tsv" ) - result = subprocess.run(["petablint", "-v", "-p", parameter_file]) + result = subprocess.run(["petablint", "-v", "-p", parameter_file]) # noqa: S603,S607 assert result.returncode == 0 diff --git a/tests/test_parameters.py b/tests/test_parameters.py index e9073d31..a2fa5e66 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -22,7 +22,7 @@ def test_get_optimization_parameter_scaling(): df.set_index(PARAMETER_ID, inplace=True) # parameter and scale - expected = dict(p1=LIN, p3=LOG10) + expected = {"p1": LIN, "p3": LOG10} actual = petab.get_optimization_parameter_scaling(df) @@ -105,7 +105,7 @@ def test_get_parameter_df(): PARAMETER_NAME: ["different_name2", "name3"], } ) - for name, df in parameter_dfs.items(): + for name in parameter_dfs: with tempfile.NamedTemporaryFile( mode="w", delete=False, dir=directory ) as fh: @@ -151,7 +151,7 @@ def test_get_parameter_df(): parameter_dfs["subset2"] = pd.DataFrame( data={PARAMETER_ID: ["id3", "id4"], NOMINAL_VALUE: [1, 1]} ) - for name, df in parameter_dfs.items(): + for name in parameter_dfs: with tempfile.NamedTemporaryFile( mode="w", delete=False, dir=directory ) as fh: diff --git a/tests/test_petab.py b/tests/test_petab.py index 1003761a..ed4ac63a 100644 --- a/tests/test_petab.py +++ b/tests/test_petab.py @@ -165,7 +165,7 @@ def test_get_measurement_parameter_ids(): def test_serialization(petab_problem): # serialize and back - problem_recreated = pickle.loads(pickle.dumps(petab_problem)) + problem_recreated = pickle.loads(pickle.dumps(petab_problem)) # noqa: S301 assert problem_recreated.measurement_df.equals( petab_problem.measurement_df @@ -362,14 +362,16 @@ def test_flatten_timepoint_specific_output_overrides(): "x", ], NOISE_FORMULA: [ - "(observableParameter1_obs1 + observableParameter2_obs1) * noiseParameter1_obs1", + "(observableParameter1_obs1 + observableParameter2_obs1)" + " * noiseParameter1_obs1", 1, ], } ) observable_df.set_index(OBSERVABLE_ID, inplace=True) - # new observable IDs (obs${i_obs}_${i_obsParOverride}_${i_noiseParOverride}_${i_condition}) + # new observable IDs + # (obs${i_obs}_${i_obsParOverride}_${i_noiseParOverride}_${i_condition}) obs1_1_1_1 = "obs1__obsParOverride1_1_0__noiseParOverride1__condition1" obs1_2_1_1 = "obs1__obsParOverride2_1_0__noiseParOverride1__condition1" obs1_2_2_1 = "obs1__obsParOverride2_1_0__noiseParOverride2__condition1" @@ -382,17 +384,23 @@ def test_flatten_timepoint_specific_output_overrides(): "obs2__condition1", ], OBSERVABLE_FORMULA: [ - f"observableParameter1_{obs1_1_1_1} + observableParameter2_{obs1_1_1_1}", - f"observableParameter1_{obs1_2_1_1} + observableParameter2_{obs1_2_1_1}", - f"observableParameter1_{obs1_2_2_1} + observableParameter2_{obs1_2_2_1}", + f"observableParameter1_{obs1_1_1_1}" + f" + observableParameter2_{obs1_1_1_1}", + f"observableParameter1_{obs1_2_1_1}" + f" + observableParameter2_{obs1_2_1_1}", + f"observableParameter1_{obs1_2_2_1}" + f" + observableParameter2_{obs1_2_2_1}", "x", ], NOISE_FORMULA: [ - f"(observableParameter1_{obs1_1_1_1} + observableParameter2_{obs1_1_1_1})" + f"(observableParameter1_{obs1_1_1_1}" + f" + observableParameter2_{obs1_1_1_1})" f" * noiseParameter1_{obs1_1_1_1}", - f"(observableParameter1_{obs1_2_1_1} + observableParameter2_{obs1_2_1_1})" + f"(observableParameter1_{obs1_2_1_1}" + f" + observableParameter2_{obs1_2_1_1})" f" * noiseParameter1_{obs1_2_1_1}", - f"(observableParameter1_{obs1_2_2_1} + observableParameter2_{obs1_2_2_1})" + f"(observableParameter1_{obs1_2_2_1}" + f" + observableParameter2_{obs1_2_2_1})" f" * noiseParameter1_{obs1_2_2_1}", 1, ], diff --git a/tests/test_simulate.py b/tests/test_simulate.py index 3c30b15f..ee85ea08 100644 --- a/tests/test_simulate.py +++ b/tests/test_simulate.py @@ -55,7 +55,10 @@ def test_remove_working_dir(petab_problem): assert Path(simulator.working_dir).is_dir() # A user-specified working directory should not be removed unless # `force=True`. - simulator.remove_working_dir() + with pytest.warns( + UserWarning, match="working directories are not removed" + ): + simulator.remove_working_dir() # The user-specified working directory is not removed without `force=True` assert Path(simulator.working_dir).is_dir() simulator.remove_working_dir(force=True) diff --git a/tests/test_visualization.py b/tests/test_visualization.py index cf427c80..763aac48 100644 --- a/tests/test_visualization.py +++ b/tests/test_visualization.py @@ -553,7 +553,7 @@ def test_cli(): "-o", temp_dir, ] - subprocess.run(args, check=True) + subprocess.run(args, check=True) # noqa: S603 @pytest.mark.filterwarnings("ignore:Visualization table is empty") diff --git a/tests/test_visualization_data_overview.py b/tests/test_visualization_data_overview.py index c1d89500..76669f43 100644 --- a/tests/test_visualization_data_overview.py +++ b/tests/test_visualization_data_overview.py @@ -6,7 +6,8 @@ def test_data_overview(): - """Data overview generation with Fujita example data from this repository""" + """Data overview generation with Fujita example data from this + repository""" with TemporaryDirectory() as temp_dir: outfile = Path(temp_dir) / "Fujita.html" repo_root = Path(__file__).parent.parent diff --git a/tox.ini b/tox.ini index 6044e8d4..0ce0914d 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,7 @@ isolated_build = True [testenv:quality] extras = quality commands = - python -m flake8 petab setup.py tests + pre-commit run --all-files description = Quality tests From f32feed1acfd882c59ee97adc68b4e46469532d9 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 16 Apr 2024 16:47:52 +0200 Subject: [PATCH 05/14] Vis: Make line plot markers configurable (#254) Allow changing the default style for line plots. --- petab/visualize/plotter.py | 54 ++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/petab/visualize/plotter.py b/petab/visualize/plotter.py index 2114c364..c2ebe6e4 100644 --- a/petab/visualize/plotter.py +++ b/petab/visualize/plotter.py @@ -16,6 +16,22 @@ __all__ = ["Plotter", "MPLPlotter", "SeabornPlotter"] +#: Line style (:class:`matplotlib.lines.Line2D` options) for the measurement +# data in line plots +measurement_line_kwargs = { + "linestyle": "-.", + "marker": "x", + "markersize": 10, +} +#: Line style (:class:`matplotlib.lines.Line2D` options) for the simulation +# data in line plots +simulation_line_kwargs = { + "linestyle": "-", + "marker": "o", + "markersize": 10, +} + + class Plotter(ABC): """ Plotter abstract base class. @@ -78,7 +94,7 @@ def generate_lineplot( splitaxes_params: dict, ) -> Tuple[matplotlib.axes.Axes, matplotlib.axes.Axes]: """ - Generate lineplot. + Generate line plot. It is possible to plot only data or only simulation or both. @@ -136,19 +152,15 @@ def generate_lineplot( p = ax.plot( cond, replicates[:, 0], - linestyle="-.", - marker="x", - markersize=10, label=label_base, + **measurement_line_kwargs, ) # plot other replicates with the same color ax.plot( cond, replicates[:, 1:], - linestyle="-.", - marker="x", - markersize=10, + **measurement_line_kwargs, color=p[0].get_color(), ) @@ -177,9 +189,8 @@ def generate_lineplot( scond, smean, snoise, - linestyle="-.", - marker=".", label=label_base, + **measurement_line_kwargs, ) # simulations should have the same colors if both measurements @@ -229,11 +240,10 @@ def generate_lineplot( p = ax.plot( xs, ys, - linestyle="-", - marker="o", markevery=every, label=label_base + " simulation", color=simu_color, + **simulation_line_kwargs, ) # lines at t=inf should have the same colors also in case # only simulations are plotted @@ -628,23 +638,19 @@ def _line_plot_at_t_inf( p = ax_inf.plot( timepoints_inf, [replicates[0]] * 3, - linestyle="-.", - marker="x", - markersize=10, markevery=[1], label=label_base + " simulation", color=color, + **measurement_line_kwargs, ) # plot other replicates with the same color ax_inf.plot( timepoints_inf, [replicates[1:]] * 3, - linestyle="-.", - marker="x", - markersize=10, markevery=[1], color=p[0].get_color(), + **measurement_line_kwargs, ) else: p = ax_inf.plot( @@ -653,17 +659,16 @@ def _line_plot_at_t_inf( measurements_data_to_plot_inf["mean"], measurements_data_to_plot_inf["mean"], ], - linestyle="-.", color=color, + **measurement_line_kwargs, ) ax_inf.errorbar( t_inf, measurements_data_to_plot_inf["mean"], measurements_data_to_plot_inf[noise_col], - linestyle="-.", - marker=".", label=label_base + " simulation", color=p[0].get_color(), + **measurement_line_kwargs, ) if color is None: @@ -687,30 +692,27 @@ def _line_plot_at_t_inf( p = ax_inf.plot( timepoints_inf, [replicates[0]] * 3, - linestyle="-", - marker="o", markevery=[1], label=label_base, color=color, + **simulation_line_kwargs, ) # plot other replicates with the same color ax_inf.plot( timepoints_inf, [replicates[1:]] * 3, - linestyle="-", - marker="o", markevery=[1], color=p[0].get_color(), + **simulation_line_kwargs, ) else: ax_inf.plot( timepoints_inf, [simulations_data_to_plot_inf["mean"]] * 3, - linestyle="-", - marker="o", markevery=[1], color=color, + **simulation_line_kwargs, ) ax.set_xlim(right=ax_finite_right_limit) From 4cd02ef91bd97050ed285f7dbcfd7b4b9dd5e75b Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Fri, 19 Apr 2024 11:06:48 +0200 Subject: [PATCH 06/14] Validator: check datasetId in visualizations table exists in measurements table (#255) Closes #229 --- petab/visualize/lint.py | 14 ++++++++++++++ tests/test_visualization.py | 31 +++++++++++++++++++++++++++---- 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/petab/visualize/lint.py b/petab/visualize/lint.py index daa4c443..0e973928 100644 --- a/petab/visualize/lint.py +++ b/petab/visualize/lint.py @@ -127,6 +127,20 @@ def validate_visualization_df(problem: Problem) -> bool: ) errors = True + if problem.measurement_df is not None: + referenced_datasets = set(filter(bool, vis_df[C.DATASET_ID].unique())) + if referenced_datasets: + existing_datasets = set( + filter(bool, problem.measurement_df[C.DATASET_ID].unique()) + ) + if not referenced_datasets.issubset(existing_datasets): + logger.error( + f"Visualization table references {C.DATASET_ID}(s) " + f"{referenced_datasets - existing_datasets}, but no such " + "dataset(s) exist in the measurement table." + ) + errors = True + return errors diff --git a/tests/test_visualization.py b/tests/test_visualization.py index 763aac48..1e67afff 100644 --- a/tests/test_visualization.py +++ b/tests/test_visualization.py @@ -178,11 +178,16 @@ def test_visualization_with_vis_and_sim( simulation_file_Isensee, close_fig, ): - validate_visualization_df( - petab.Problem( - condition_df=petab.get_condition_df(condition_file_Isensee), - visualization_df=petab.get_visualization_df(vis_spec_file_Isensee), + assert ( + validate_visualization_df( + petab.Problem( + condition_df=petab.get_condition_df(condition_file_Isensee), + visualization_df=petab.get_visualization_df( + vis_spec_file_Isensee + ), + ) ) + is False ) plot_with_vis_spec( vis_spec_file_Isensee, @@ -577,3 +582,21 @@ def test_validate(vis_file, request): assert False is validate_visualization_df( petab.Problem(visualization_df=petab.get_visualization_df(vis_file)) ) + + +def test_validate_visualization_missing_dataset( + condition_file_Isensee, + data_file_Isensee, + vis_spec_file_Isensee, + simulation_file_Isensee, +): + petab_problem = petab.Problem( + condition_df=petab.get_condition_df(condition_file_Isensee), + measurement_df=petab.get_measurement_df(data_file_Isensee), + visualization_df=petab.get_visualization_df(vis_spec_file_Isensee), + ) + + assert validate_visualization_df(petab_problem) is False + + petab_problem.visualization_df.loc[0, petab.DATASET_ID] = "missing_dataset" + assert validate_visualization_df(petab_problem) is True From 0788f9b4923a87cde5fafc48b6e7b308e62eebc9 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 29 Apr 2024 12:59:00 +0200 Subject: [PATCH 07/14] Require python>=3.10 (#256) Require python>=3.10 according to [NEP-0029](https://numpy.org/neps/nep-0029-deprecation_policy.html#drop-schedule). --- .github/workflows/ci_tests.yml | 2 +- .readthedocs.yaml | 2 +- README.md | 4 ++-- setup.py | 7 +------ 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 30cb098f..4332819f 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -11,7 +11,7 @@ jobs: strategy: matrix: platform: [windows-latest, macos-latest, ubuntu-latest] - python-version: ["3.9", "3.12"] + python-version: ["3.10", "3.12"] runs-on: ${{ matrix.platform }} steps: diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 5d8f7845..255099f6 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -10,7 +10,7 @@ sphinx: build: os: "ubuntu-22.04" tools: - python: "3.9" + python: "3.10" python: install: diff --git a/README.md b/README.md index 043f21ad..167b336e 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ and the easiest way to install it is running pip3 install petab -It will require Python>=3.9 to run. (We are following the +It will require Python>=3.10 to run. (We are following the [numpy Python support policy](https://numpy.org/neps/nep-0029-deprecation_policy.html)). Development versions of the PEtab library can be installed using @@ -57,7 +57,7 @@ Examples for PEtab Python library usage: ## Getting help -If you have any question or problems with this package, feel free to post them +If you have any questions or problems with this package, feel free to post them at our GitHub [issue tracker](https://github.com/PEtab-dev/libpetab-python/issues/). ## Contributing diff --git a/setup.py b/setup.py index 7799f18b..2ff424eb 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,5 @@ import os import re -import sys from setuptools import find_namespace_packages, setup @@ -30,10 +29,6 @@ def absolute_links(txt): return txt -# Python version check -if sys.version_info < (3, 9, 0): - sys.exit("PEtab requires at least Python version 3.9") - # read version from file __version__ = "" version_file = os.path.join("petab", "version.py") @@ -72,7 +67,7 @@ def absolute_links(txt): "jsonschema", ], include_package_data=True, - python_requires=">=3.9.0", + python_requires=">=3.10.0", entry_points=ENTRY_POINTS, extras_require={ "tests": [ From 5be4f2c167fb11ffb795ad7a9cbede2b1d63a803 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 30 Apr 2024 14:05:33 +0200 Subject: [PATCH 08/14] GHA: trigger workflows on pull requests from forks --- .github/workflows/ci_tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 4332819f..f029fe11 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -2,6 +2,7 @@ name: CI on: push: + pull_request: workflow_dispatch: schedule: - cron: '48 4 * * *' From fd87618d562652d7d5a77eaeb5703f00b7b5ddf1 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 30 Apr 2024 14:35:38 +0200 Subject: [PATCH 09/14] Let dependabot update actions (#258) --- .github/dependabot.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..d3e0189b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch", "version-update:semver-minor"] From 22581126d2aae172213d827c2382ebe2906b9280 Mon Sep 17 00:00:00 2001 From: Maren Philipps <55318391+m-philipps@users.noreply.github.com> Date: Thu, 2 May 2024 08:48:32 +0200 Subject: [PATCH 10/14] Fix typo (#257) --- petab/visualize/helper_functions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/petab/visualize/helper_functions.py b/petab/visualize/helper_functions.py index 127afdde..b48e1ad6 100644 --- a/petab/visualize/helper_functions.py +++ b/petab/visualize/helper_functions.py @@ -57,7 +57,7 @@ def create_dataset_id_list_new( Parameters: df: Measurements or simulations DataFrame. - group_by: Defines grouping of data to plot. + group_by: Defines grouping of data to plot. id_list: Grouping list. Each sublist corresponds to a subplot in a figure, and contains the IDs of observables or simulation conditions for @@ -73,9 +73,9 @@ def create_dataset_id_list_new( dataset_id_list = [] if group_by == "simulation": - groupping_col = SIMULATION_CONDITION_ID + grouping_col = SIMULATION_CONDITION_ID elif group_by == "observable": - groupping_col = OBSERVABLE_ID + grouping_col = OBSERVABLE_ID if id_list is None: # this is the default case. If no grouping is specified, # all observables are plotted. One observable per plot. @@ -88,7 +88,7 @@ def create_dataset_id_list_new( plot_id_list = [] for cond_id in sublist: plot_id_list.extend( - list(df[df[groupping_col] == cond_id][DATASET_ID].unique()) + list(df[df[grouping_col] == cond_id][DATASET_ID].unique()) ) dataset_id_list.append(plot_id_list) return dataset_id_list From 128da31f6d19a60777833a317c2960619035f3ed Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Wed, 22 May 2024 09:49:19 +0200 Subject: [PATCH 11/14] Refactor sympification (#261) Centralize sympification of PEtab and SBML math expressions. Simplifies incorporating https://github.com/PEtab-dev/libpetab-python/pull/260 Also fixes likely collisions of SBML model entities with sympy entities. --- petab/calculate.py | 4 +-- petab/lint.py | 6 ++-- petab/math/__init__.py | 2 ++ petab/math/sympify.py | 20 ++++++++++++ petab/models/sbml_model.py | 64 +++++++++++++++++++++++++------------- petab/observables.py | 7 ++--- 6 files changed, 72 insertions(+), 31 deletions(-) create mode 100644 petab/math/__init__.py create mode 100644 petab/math/sympify.py diff --git a/petab/calculate.py b/petab/calculate.py index ce402241..f5258fc6 100644 --- a/petab/calculate.py +++ b/petab/calculate.py @@ -7,11 +7,11 @@ import numpy as np import pandas as pd import sympy -from sympy.abc import _clash import petab from .C import * +from .math import sympify_petab __all__ = [ "calculate_residuals", @@ -157,7 +157,7 @@ def get_symbolic_noise_formulas(observable_df) -> Dict[str, sympy.Expr]: if NOISE_FORMULA not in observable_df.columns: noise_formula = None else: - noise_formula = sympy.sympify(row.noiseFormula, locals=_clash) + noise_formula = sympify_petab(row.noiseFormula) noise_formulas[observable_id] = noise_formula return noise_formulas diff --git a/petab/lint.py b/petab/lint.py index a58678c8..07c1990b 100644 --- a/petab/lint.py +++ b/petab/lint.py @@ -10,12 +10,12 @@ import numpy as np import pandas as pd import sympy as sp -from sympy.abc import _clash import petab from . import core, measurements, parameters from .C import * # noqa: F403 +from .math import sympify_petab from .models import Model logger = logging.getLogger(__name__) @@ -317,7 +317,7 @@ def check_observable_df(observable_df: pd.DataFrame) -> None: for row in observable_df.itertuples(): obs = getattr(row, OBSERVABLE_FORMULA) try: - sp.sympify(obs, locals=_clash) + sympify_petab(obs) except sp.SympifyError as e: raise AssertionError( f"Cannot parse expression '{obs}' " @@ -326,7 +326,7 @@ def check_observable_df(observable_df: pd.DataFrame) -> None: noise = getattr(row, NOISE_FORMULA) try: - sympified_noise = sp.sympify(noise, locals=_clash) + sympified_noise = sympify_petab(noise) if sympified_noise is None or ( sympified_noise.is_Number and not sympified_noise.is_finite ): diff --git a/petab/math/__init__.py b/petab/math/__init__.py new file mode 100644 index 00000000..27ebacd2 --- /dev/null +++ b/petab/math/__init__.py @@ -0,0 +1,2 @@ +"""Functions for parsing and evaluating mathematical expressions.""" +from .sympify import sympify_petab # noqa: F401 diff --git a/petab/math/sympify.py b/petab/math/sympify.py new file mode 100644 index 00000000..9227c51d --- /dev/null +++ b/petab/math/sympify.py @@ -0,0 +1,20 @@ +"""PEtab math to sympy conversion.""" + +import sympy as sp +from sympy.abc import _clash + + +def sympify_petab(expr: str) -> sp.Expr: + """ + Convert a PEtab math expression to a sympy expression. + + Parameters + ---------- + expr: + The PEtab math expression. + + Returns + ------- + The sympy expression corresponding to ``expr``. + """ + return sp.sympify(expr, locals=_clash) diff --git a/petab/models/sbml_model.py b/petab/models/sbml_model.py index 26643abf..d68884fd 100644 --- a/petab/models/sbml_model.py +++ b/petab/models/sbml_model.py @@ -6,6 +6,7 @@ import libsbml import sympy as sp +from sympy.abc import _clash from ..sbml import ( get_sbml_model, @@ -104,34 +105,17 @@ def get_free_parameter_ids_with_values( ar.getVariable() for ar in self.sbml_model.getListOfRules() } - parser_settings = libsbml.L3ParserSettings( - self.sbml_model, - libsbml.L3P_PARSE_LOG_AS_LOG10, - libsbml.L3P_EXPAND_UNARY_MINUS, - libsbml.L3P_NO_UNITS, - libsbml.L3P_AVOGADRO_IS_CSYMBOL, - libsbml.L3P_COMPARE_BUILTINS_CASE_INSENSITIVE, - None, - libsbml.L3P_MODULO_IS_PIECEWISE, - ) - def get_initial(p): # return the initial assignment value if there is one, and it is a # number; `None`, if there is a non-numeric initial assignment; # otherwise, the parameter value if ia := self.sbml_model.getInitialAssignmentBySymbol(p.getId()): - formula_str = libsbml.formulaToL3StringWithSettings( - ia.getMath(), parser_settings + sym_expr = sympify_sbml(ia.getMath()) + return ( + float(sym_expr.evalf()) + if sym_expr.evalf().is_Number + else None ) - try: - return float(formula_str) - except ValueError: - sym_expr = sp.sympify(formula_str) - return ( - float(sym_expr.evalf()) - if sym_expr.evalf().is_Number - else None - ) return p.getValue() return ( @@ -200,3 +184,39 @@ def is_state_variable(self, id_: str) -> bool: or self.sbml_model.getCompartment(id_) is not None or self.sbml_model.getRuleByVariable(id_) is not None ) + + +def sympify_sbml(sbml_obj: libsbml.ASTNode | libsbml.SBase) -> sp.Expr: + """Convert SBML math expression to sympy expression. + + Parameters + ---------- + sbml_obj: + SBML math element or an SBML object with a math element. + + Returns + ------- + The sympy expression corresponding to ``sbml_obj``. + """ + ast_node = ( + sbml_obj + if isinstance(sbml_obj, libsbml.ASTNode) + else sbml_obj.getMath() + ) + + parser_settings = libsbml.L3ParserSettings( + ast_node.getParentSBMLObject().getModel(), + libsbml.L3P_PARSE_LOG_AS_LOG10, + libsbml.L3P_EXPAND_UNARY_MINUS, + libsbml.L3P_NO_UNITS, + libsbml.L3P_AVOGADRO_IS_CSYMBOL, + libsbml.L3P_COMPARE_BUILTINS_CASE_INSENSITIVE, + None, + libsbml.L3P_MODULO_IS_PIECEWISE, + ) + + formula_str = libsbml.formulaToL3StringWithSettings( + ast_node, parser_settings + ) + + return sp.sympify(formula_str, locals=_clash) diff --git a/petab/observables.py b/petab/observables.py index 496e7bd8..d10058fc 100644 --- a/petab/observables.py +++ b/petab/observables.py @@ -6,11 +6,10 @@ from typing import List, Literal, Union import pandas as pd -import sympy as sp -from sympy.abc import _clash from . import core, lint from .C import * # noqa: F403 +from .math import sympify_petab from .models import Model __all__ = [ @@ -102,7 +101,7 @@ def get_output_parameters( for formula in formulas: free_syms = sorted( - sp.sympify(formula, locals=_clash).free_symbols, + sympify_petab(formula).free_symbols, key=lambda symbol: symbol.name, ) for free_sym in free_syms: @@ -110,7 +109,7 @@ def get_output_parameters( if model.symbol_allowed_in_observable_formula(sym): continue - # does it mapping to a model entity? + # does it map to a model entity? if ( mapping_df is not None and sym in mapping_df.index From c30b9df17d04d3beceb2a5f50edb4418eb56151c Mon Sep 17 00:00:00 2001 From: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> Date: Wed, 29 May 2024 13:30:29 +0200 Subject: [PATCH 12/14] Table getters: drop pre-existing table index unless it matches the PEtab format (#262) --- petab/conditions.py | 5 ++++- petab/mapping.py | 5 ++++- petab/observables.py | 5 ++++- petab/parameters.py | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/petab/conditions.py b/petab/conditions.py index 86c5f7d3..deef08f7 100644 --- a/petab/conditions.py +++ b/petab/conditions.py @@ -40,7 +40,10 @@ def get_condition_df( ) if not isinstance(condition_file.index, pd.RangeIndex): - condition_file.reset_index(inplace=True) + condition_file.reset_index( + drop=condition_file.index.name != CONDITION_ID, + inplace=True, + ) try: condition_file.set_index([CONDITION_ID], inplace=True) diff --git a/petab/mapping.py b/petab/mapping.py index dd91102e..a345ca88 100644 --- a/petab/mapping.py +++ b/petab/mapping.py @@ -36,7 +36,10 @@ def get_mapping_df( ) if not isinstance(mapping_file.index, pd.RangeIndex): - mapping_file.reset_index(inplace=True) + mapping_file.reset_index( + drop=mapping_file.index.name != PETAB_ENTITY_ID, + inplace=True, + ) for col in MAPPING_DF_REQUIRED_COLS: if col not in mapping_file.columns: diff --git a/petab/observables.py b/petab/observables.py index d10058fc..35cbd705 100644 --- a/petab/observables.py +++ b/petab/observables.py @@ -47,7 +47,10 @@ def get_observable_df( ) if not isinstance(observable_file.index, pd.RangeIndex): - observable_file.reset_index(inplace=True) + observable_file.reset_index( + drop=observable_file.index.name != OBSERVABLE_ID, + inplace=True, + ) try: observable_file.set_index([OBSERVABLE_ID], inplace=True) diff --git a/petab/parameters.py b/petab/parameters.py index ff4aeda8..3339ef63 100644 --- a/petab/parameters.py +++ b/petab/parameters.py @@ -82,7 +82,10 @@ def get_parameter_df( ) if not isinstance(parameter_df.index, pd.RangeIndex): - parameter_df.reset_index(inplace=True) + parameter_df.reset_index( + drop=parameter_file.index.name != PARAMETER_ID, + inplace=True, + ) try: parameter_df.set_index([PARAMETER_ID], inplace=True) From 8adf38c20bccc773050aeb42839766298bfb0adb Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 24 Jun 2024 19:17:47 +0200 Subject: [PATCH 13/14] Require sympy>=1.12.1 in tests So far, sympy 1.11 was used due to pysb, but this is incompatible with numpy>=2.0 --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 0ce0914d..b7c94b39 100644 --- a/tox.ini +++ b/tox.ini @@ -14,6 +14,7 @@ description = [testenv:unit] extras = tests,reports,combine,vis commands = + python -m pip install sympy>=1.12.1 python -m pytest --cov=petab --cov-report=xml --cov-append \ tests description = From d8ad9b8afe4fb74f384b7f6818ef0e05c794a030 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 24 Jun 2024 19:35:02 +0200 Subject: [PATCH 14/14] Bump version number, update changelog --- CHANGELOG.md | 24 ++++++++++++++++++++++++ petab/version.py | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ae718b1..b944ab53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # PEtab changelog +## 0.3 series + +### 0.3.0 + +**This release requires `python>=3.10`** + +**Features** +* Visualization: Make line plot lines configurable via + `petab.visualize.plotter.measurement_line_kwargs and + `petab.visualize.plotter.simulation_line_kwargs` + by @dweindl in https://github.com/PEtab-dev/libpetab-python/pull/254 +* Validator: check that `datasetId`s referenced in visualization tables exists + in measurement tables + by @dweindl in https://github.com/PEtab-dev/libpetab-python/pull/255 + +**Fixes** +* Drop an index if it's reset + by @dilpath in https://github.com/PEtab-dev/libpetab-python/pull/262 + +## New Contributors +* @PaulJonasJost made their first contribution in https://github.com/PEtab-dev/libpetab-python/pull/250 + +**Full Changelog**: https://github.com/PEtab-dev/libpetab-python/compare/v0.2.9...v0.3.0 + ## 0.2 series ### 0.2.9 diff --git a/petab/version.py b/petab/version.py index d5547869..54e96123 100644 --- a/petab/version.py +++ b/petab/version.py @@ -1,2 +1,2 @@ """PEtab library version""" -__version__ = "0.2.9" +__version__ = "0.3.0"