diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 99b2223..c2cff8d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -23,16 +23,13 @@ jobs: run: | python -m pip install --upgrade pip pip install --upgrade setuptools wheel - pip install -r scripts/requirements.txt + cd src/python && pip install -e . - name: Check with petablint - run: ./check_petablint.py - working-directory: scripts/ + run: bmp-petablint - name: Show overview - run: ./overview.py - working-directory: scripts/ + run: bmp-create-overview - name: Check SBML metadata - run: ./check_sbml_metadata.py - working-directory: scripts/ + run: bmp-check-sbml-metadata diff --git a/scripts/_helpers.py b/scripts/_helpers.py deleted file mode 100644 index 6a472fc..0000000 --- a/scripts/_helpers.py +++ /dev/null @@ -1,20 +0,0 @@ -from pathlib import Path - - -benchmark_path = Path(__file__).resolve().parent.parent / 'Benchmark-Models' -petab_yamls = { - petab_path.name: benchmark_path / petab_path.name / (petab_path.name + '.yaml') - for petab_path in benchmark_path.glob('*') - if petab_path.name != '.DS_Store' -} -petab_yamls = {k: petab_yamls[k] for k in sorted(petab_yamls)} - -for problem_id, petab_yaml in petab_yamls.items(): - if not petab_yaml.exists(): - petab_yamls[problem_id] = petab_yaml.parent / "problem.yaml" - if not petab_yamls[problem_id].exists(): - raise FileNotFoundError( - f"Could not find the YAML for problem `{problem_id}`." - ) - -readme_md = Path(__file__).resolve().parent.parent / "README.md" diff --git a/scripts/check_petablint.py b/scripts/check_petablint.py deleted file mode 100755 index eeda3bc..0000000 --- a/scripts/check_petablint.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python3 - -from pathlib import Path -import os -import sys - -from _helpers import petab_yamls - - -num_failures = 0 - -for petab_problem_id, petab_yaml in petab_yamls.items(): - print(petab_problem_id) - ret = os.system(f"petablint -v {petab_yaml}") - print('='*100) # just for output readability - - if ret: - num_failures += 1 - -num_passed = len(petab_yamls) - num_failures -print(f'Result: {Path(__file__).stem}') -print(f"{num_passed} out of {len(petab_yamls)} passed.") -print(f"{num_failures} out of {len(petab_yamls)} failed.") -# Fail unless all models passed -sys.exit(num_failures) diff --git a/scripts/check_sbml_metadata.py b/scripts/check_sbml_metadata.py deleted file mode 100755 index 72d4391..0000000 --- a/scripts/check_sbml_metadata.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python3 - -from pathlib import Path -import sys - -import petab.v1 as petab - -from overview import get_reference_uris -from _helpers import petab_yamls - - -num_failures = 0 - -for petab_problem_id, petab_yaml in petab_yamls.items(): - petab_problem = petab.Problem.from_yaml(petab_yaml) - - model_id = petab_problem.sbml_model.getId() - model_name = petab_problem.sbml_model.getName() - reference_uris = get_reference_uris(sbml_model=petab_problem.sbml_model) - - errors = [] - - if model_id != petab_problem_id: - errors.append( - 'Please change the model ID to match the problem ID.' - ) - - if model_name != petab_problem_id: - errors.append( - 'Please change the model name to match the problem ID.' - ) - - if not reference_uris: - errors.append( - 'Please add relevant references (e.g. the paper) to the model.' - ) - - errors = ['\t' + error for error in errors] - - print(petab_problem_id) - if errors: - print('\n'.join(errors)) - num_failures += 1 - print('='*100) # just for output readability - -num_passed = len(petab_yamls) - num_failures -print(f'Result: {Path(__file__).stem}') -print(f"{num_passed} out of {len(petab_yamls)} passed.") -print(f"{num_failures} out of {len(petab_yamls)} failed.") -# Fail unless all models passed -sys.exit(num_failures) diff --git a/scripts/requirements.txt b/scripts/requirements.txt deleted file mode 100644 index 2e904ea..0000000 --- a/scripts/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy -pandas -petab>=0.2.3 -python-libsbml -tabulate diff --git a/src/python/benchmark_models_petab/check_petablint.py b/src/python/benchmark_models_petab/check_petablint.py new file mode 100755 index 0000000..3550ddd --- /dev/null +++ b/src/python/benchmark_models_petab/check_petablint.py @@ -0,0 +1,23 @@ +from pathlib import Path +import os +import sys +from . import MODELS, get_problem_yaml_path + +def main(): + num_failures = 0 + + for petab_problem_id in MODELS: + print(petab_problem_id) + petab_yaml = get_problem_yaml_path(petab_problem_id) + ret = os.system(f"petablint -v {petab_yaml}") + print('='*100) # just for output readability + + if ret: + num_failures += 1 + + num_passed = len(MODELS) - num_failures + print(f'Result: {Path(__file__).stem}') + print(f"{num_passed} out of {len(MODELS)} passed.") + print(f"{num_failures} out of {len(MODELS)} failed.") + # Fail unless all models passed + sys.exit(num_failures) diff --git a/src/python/benchmark_models_petab/check_sbml_metadata.py b/src/python/benchmark_models_petab/check_sbml_metadata.py new file mode 100755 index 0000000..11f44bf --- /dev/null +++ b/src/python/benchmark_models_petab/check_sbml_metadata.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +from pathlib import Path +import sys +from .overview import get_reference_uris +from . import MODELS, get_problem + +def main(): + num_failures = 0 + + for petab_problem_id in MODELS: + petab_problem = get_problem(petab_problem_id) + + model_id = petab_problem.sbml_model.getId() + model_name = petab_problem.sbml_model.getName() + reference_uris = get_reference_uris(sbml_model=petab_problem.sbml_model) + + errors = [] + + if model_id != petab_problem_id: + errors.append( + 'Please change the model ID to match the problem ID.' + ) + + if model_name != petab_problem_id: + errors.append( + 'Please change the model name to match the problem ID.' + ) + + if not reference_uris: + errors.append( + 'Please add relevant references (e.g. the paper) to the model.' + ) + + errors = ['\t' + error for error in errors] + + print(petab_problem_id) + if errors: + print('\n'.join(errors)) + num_failures += 1 + print('='*100) # just for output readability + + num_passed = len(MODELS) - num_failures + print(f'Result: {Path(__file__).stem}') + print(f"{num_passed} out of {len(MODELS)} passed.") + print(f"{num_failures} out of {len(MODELS)} failed.") + # Fail unless all models passed + sys.exit(num_failures) diff --git a/scripts/overview.py b/src/python/benchmark_models_petab/overview.py similarity index 93% rename from scripts/overview.py rename to src/python/benchmark_models_petab/overview.py index 3b06b1c..e580f05 100755 --- a/scripts/overview.py +++ b/src/python/benchmark_models_petab/overview.py @@ -1,16 +1,16 @@ -#!/usr/bin/env python3 """Print some stats for each benchmark PEtab problem""" - -import os from typing import Dict, List +import sys import libsbml import numpy as np import pandas as pd import petab.v1 as petab +from pathlib import Path +from . import get_problem_yaml_path, MODELS, get_problem +from petab.yaml import load_yaml -from _helpers import petab_yamls, readme_md - +readme_md = Path(__file__).resolve().parent.parent / "README.md" markdown_columns = { 'conditions': 'Conditions', @@ -91,8 +91,7 @@ def get_reference_uris(sbml_model: libsbml.Model) -> List[str]: def get_sbml4humans_urls(petab_problem_id: str) -> List[str]: """Get URL to SBML4humans model""" - from petab.yaml import load_yaml - yaml_file = petab_yamls[petab_problem_id] + yaml_file = get_problem_yaml_path(petab_problem_id) yaml_dict = load_yaml(yaml_file) repo_root = "https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master" urls = [] @@ -136,8 +135,8 @@ def get_noise_distributions(observable_df): def get_overview_table() -> pd.DataFrame: """Get overview table with stats for all benchmark PEtab problems""" data = [] - for petab_problem_id, petab_yaml in petab_yamls.items(): - petab_problem = petab.Problem.from_yaml(petab_yaml) + for petab_problem_id in MODELS: + petab_problem = get_problem(petab_problem_id) summary = get_summary(petab_problem, petab_problem_id) data.append(summary) df = pd.DataFrame(data) @@ -148,7 +147,7 @@ def get_overview_table() -> pd.DataFrame: start_overview_table = '\n\n' end_overview_table = '\n\n' -def main( +def create_overview_table( markdown: bool = False, update_readme: bool = False, ): @@ -188,9 +187,7 @@ def main( print(df) -if __name__ == '__main__': - import sys - +def main(): markdown = False update_readme = False if '--markdown' in sys.argv: @@ -198,4 +195,4 @@ def main( if '--update' in sys.argv: markdown = True update_readme = True - main(markdown=markdown, update_readme=update_readme) + create_overview_table(markdown=markdown, update_readme=update_readme) diff --git a/src/python/build.sh b/src/python/build.sh index 90cf7ce..ef6e27c 100755 --- a/src/python/build.sh +++ b/src/python/build.sh @@ -24,7 +24,7 @@ mkdir $BUILD_DIR # Copy code cp -r \ $CODE_DIR \ - "setup.cfg" "setup.py" "pyproject.toml" "MANIFEST.in" \ + "pyproject.toml" "MANIFEST.in" \ "../../LICENSE" "../../README.md" \ $BUILD_DIR # Remove link diff --git a/src/python/pyproject.toml b/src/python/pyproject.toml index cfc502a..1243e68 100644 --- a/src/python/pyproject.toml +++ b/src/python/pyproject.toml @@ -2,9 +2,44 @@ requires = [ "setuptools", "wheel", + "petab", ] build-backend = "setuptools.build_meta" + +[project] +name = "benchmark_models_petab" +description = "A collection of models with experimental data in the PEtab format" +dynamic = ["version"] +readme = { file = "README.md", content-type = "text/markdown" } +requires-python = ">=3.10" +license = { file = "LICENSE" } +authors = [ + { name = "The Benchmark-Models-PEtab maintainers", email = "yannik.schaelte@gmail.com" } +] +urls = { "Homepage" = "https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab" } +dependencies = [ + "petab>=0.4.0", + "numpy", + "pandas", + "python-libsbml", + "tabulate", +] + +[project.optional-dependencies] +dev = ["flake8"] + +[tool.setuptools.dynamic] +version = {attr = "benchmark_models_petab.__version__"} + +[tool.flake8] +per-file-ignores = { "__init__.py" = ["F401"] } + +[project.scripts] +bmp-petablint = "benchmark_models_petab.check_petablint:main" +bmp-check-sbml-metadata = "benchmark_models_petab.check_sbml_metadata:main" +bmp-create-overview = "benchmark_models_petab.overview:main" + [tool.black] line-length = 79 @@ -12,3 +47,6 @@ line-length = 79 profile = "black" line_length = 79 multi_line_output = 3 + + + diff --git a/src/python/setup.cfg b/src/python/setup.cfg deleted file mode 100644 index 091bf78..0000000 --- a/src/python/setup.cfg +++ /dev/null @@ -1,29 +0,0 @@ -# Benchmark-Models-PEtab python package.""" - -[metadata] -name = benchmark_models_petab -version = attr: benchmark_models_petab.version.__version__ -description = "A collection of models with experimental data in the PEtab format" -long_description = file: README.md -long_description_content_type = text/markdown -url = "https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab" - -author = "The Benchmark-Models-PEtab maintainers" -author_email = "yannik.schaelte@gmail.com" - -license = BSD-3-Clause -license_file = LICENSE - -[options] -install_requires = - petab >= 0.4.0 - -python_requires = >= 3.10 -zip_safe = False -include_package_data = True - -packages = find: - -[flake8] -per-file-ignores = - */__init__.py:F401 diff --git a/src/python/setup.py b/src/python/setup.py deleted file mode 100644 index 0e53246..0000000 --- a/src/python/setup.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Setup.py required for editable install.""" - -import setuptools - -setuptools.setup()