Skip to content

Commit

Permalink
Consolidate scripts and python package
Browse files Browse the repository at this point in the history
* some functionality from scripts/ would be useful inside the library
* some functionality from the library was replicated in scripts/
-> consolidate, turn separate scripts into entrypoints

* Reduce number of package configuration files
  • Loading branch information
dweindl committed Sep 26, 2024
1 parent 8c2290b commit b0d1720
Show file tree
Hide file tree
Showing 12 changed files with 125 additions and 157 deletions.
11 changes: 4 additions & 7 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,13 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install --upgrade setuptools wheel
pip install -r scripts/requirements.txt
cd src/python && pip install -e .
- name: Check with petablint
run: ./check_petablint.py
working-directory: scripts/
run: bmp-petablint

- name: Show overview
run: ./overview.py
working-directory: scripts/
run: bmp-create-overview

- name: Check SBML metadata
run: ./check_sbml_metadata.py
working-directory: scripts/
run: bmp-check-sbml-metadata
20 changes: 0 additions & 20 deletions scripts/_helpers.py

This file was deleted.

25 changes: 0 additions & 25 deletions scripts/check_petablint.py

This file was deleted.

51 changes: 0 additions & 51 deletions scripts/check_sbml_metadata.py

This file was deleted.

5 changes: 0 additions & 5 deletions scripts/requirements.txt

This file was deleted.

23 changes: 23 additions & 0 deletions src/python/benchmark_models_petab/check_petablint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from pathlib import Path
import os
import sys
from . import MODELS, get_problem_yaml_path

def main():
num_failures = 0

for petab_problem_id in MODELS:
print(petab_problem_id)
petab_yaml = get_problem_yaml_path(petab_problem_id)
ret = os.system(f"petablint -v {petab_yaml}")
print('='*100) # just for output readability

if ret:
num_failures += 1

num_passed = len(MODELS) - num_failures
print(f'Result: {Path(__file__).stem}')
print(f"{num_passed} out of {len(MODELS)} passed.")
print(f"{num_failures} out of {len(MODELS)} failed.")
# Fail unless all models passed
sys.exit(num_failures)
48 changes: 48 additions & 0 deletions src/python/benchmark_models_petab/check_sbml_metadata.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#!/usr/bin/env python3

from pathlib import Path
import sys
from .overview import get_reference_uris
from . import MODELS, get_problem

def main():
num_failures = 0

for petab_problem_id in MODELS:
petab_problem = get_problem(petab_problem_id)

model_id = petab_problem.sbml_model.getId()
model_name = petab_problem.sbml_model.getName()
reference_uris = get_reference_uris(sbml_model=petab_problem.sbml_model)

errors = []

if model_id != petab_problem_id:
errors.append(
'Please change the model ID to match the problem ID.'
)

if model_name != petab_problem_id:
errors.append(
'Please change the model name to match the problem ID.'
)

if not reference_uris:
errors.append(
'Please add relevant references (e.g. the paper) to the model.'
)

errors = ['\t' + error for error in errors]

print(petab_problem_id)
if errors:
print('\n'.join(errors))
num_failures += 1
print('='*100) # just for output readability

num_passed = len(MODELS) - num_failures
print(f'Result: {Path(__file__).stem}')
print(f"{num_passed} out of {len(MODELS)} passed.")
print(f"{num_failures} out of {len(MODELS)} failed.")
# Fail unless all models passed
sys.exit(num_failures)
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
#!/usr/bin/env python3
"""Print some stats for each benchmark PEtab problem"""

import os
from typing import Dict, List
import sys

import libsbml
import numpy as np
import pandas as pd
import petab.v1 as petab
from pathlib import Path
from . import get_problem_yaml_path, MODELS, get_problem
from petab.yaml import load_yaml

from _helpers import petab_yamls, readme_md

readme_md = Path(__file__).resolve().parent.parent / "README.md"

markdown_columns = {
'conditions': 'Conditions',
Expand Down Expand Up @@ -91,8 +91,7 @@ def get_reference_uris(sbml_model: libsbml.Model) -> List[str]:

def get_sbml4humans_urls(petab_problem_id: str) -> List[str]:
"""Get URL to SBML4humans model"""
from petab.yaml import load_yaml
yaml_file = petab_yamls[petab_problem_id]
yaml_file = get_problem_yaml_path(petab_problem_id)
yaml_dict = load_yaml(yaml_file)
repo_root = "https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master"
urls = []
Expand Down Expand Up @@ -136,8 +135,8 @@ def get_noise_distributions(observable_df):
def get_overview_table() -> pd.DataFrame:
"""Get overview table with stats for all benchmark PEtab problems"""
data = []
for petab_problem_id, petab_yaml in petab_yamls.items():
petab_problem = petab.Problem.from_yaml(petab_yaml)
for petab_problem_id in MODELS:
petab_problem = get_problem(petab_problem_id)
summary = get_summary(petab_problem, petab_problem_id)
data.append(summary)
df = pd.DataFrame(data)
Expand All @@ -148,7 +147,7 @@ def get_overview_table() -> pd.DataFrame:
start_overview_table = '\n<!-- START OVERVIEW TABLE -->\n'
end_overview_table = '\n<!-- END OVERVIEW TABLE -->\n'

def main(
def create_overview_table(
markdown: bool = False,
update_readme: bool = False,
):
Expand Down Expand Up @@ -188,14 +187,12 @@ def main(
print(df)


if __name__ == '__main__':
import sys

def main():
markdown = False
update_readme = False
if '--markdown' in sys.argv:
markdown = True
if '--update' in sys.argv:
markdown = True
update_readme = True
main(markdown=markdown, update_readme=update_readme)
create_overview_table(markdown=markdown, update_readme=update_readme)
2 changes: 1 addition & 1 deletion src/python/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ mkdir $BUILD_DIR
# Copy code
cp -r \
$CODE_DIR \
"setup.cfg" "setup.py" "pyproject.toml" "MANIFEST.in" \
"pyproject.toml" "MANIFEST.in" \
"../../LICENSE" "../../README.md" \
$BUILD_DIR
# Remove link
Expand Down
38 changes: 38 additions & 0 deletions src/python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,51 @@
requires = [
"setuptools",
"wheel",
"petab",
]
build-backend = "setuptools.build_meta"


[project]
name = "benchmark_models_petab"
description = "A collection of models with experimental data in the PEtab format"
dynamic = ["version"]
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
license = { file = "LICENSE" }
authors = [
{ name = "The Benchmark-Models-PEtab maintainers", email = "yannik.schaelte@gmail.com" }
]
urls = { "Homepage" = "https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab" }
dependencies = [
"petab>=0.4.0",
"numpy",
"pandas",
"python-libsbml",
"tabulate",
]

[project.optional-dependencies]
dev = ["flake8"]

[tool.setuptools.dynamic]
version = {attr = "benchmark_models_petab.__version__"}

[tool.flake8]
per-file-ignores = { "__init__.py" = ["F401"] }

[project.scripts]
bmp-petablint = "benchmark_models_petab.check_petablint:main"
bmp-check-sbml-metadata = "benchmark_models_petab.check_sbml_metadata:main"
bmp-create-overview = "benchmark_models_petab.overview:main"

[tool.black]
line-length = 79

[tool.isort]
profile = "black"
line_length = 79
multi_line_output = 3



29 changes: 0 additions & 29 deletions src/python/setup.cfg

This file was deleted.

5 changes: 0 additions & 5 deletions src/python/setup.py

This file was deleted.

0 comments on commit b0d1720

Please sign in to comment.