Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add snapshot tests with syrupy #285

Merged
merged 8 commits into from
Oct 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions .github/workflows/gh-ci-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,10 @@ jobs:
micromamba info
micromamba list

- name: "test snapshots"
working-directory: doc/source/scripts/
run: python -m pytest
Comment on lines +46 to +48
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we don't have to do this – this will be very brittle, anytime an output changes, we'll have to re-generate the snapshots. helpful to document how to run the snapshot tests however, so maybe we can leave it with "allow to fail"

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would say leave it for now -- we have scheduled twice-weekly tests that should pick up changes in the core repo in suitable time so we can update snapshots as needed.


- name: "test notebooks"
run: |
cd ${GITHUB_WORKSPACE}/tests
pytest
working-directory: tests/
run: python -m pytest
5 changes: 3 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,15 @@ repos:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
exclude: ^.*\.(pdb)$
exclude: ^.*\.(pdb|ambr)$
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
args: ["--profile", "black"]
args: ["--profile", "black", "--line-length", "79"]
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
args: [--line-length=79]
4 changes: 3 additions & 1 deletion doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,9 @@ def sort_authors(filename):
html_favicon = "_static/logos/mdanalysis-logo.ico"
html_logo = "_static/logos/user_guide.png"

html_context = {"versions_json_url": "https://userguide.mdanalysis.org/versions.json"}
html_context = {
"versions_json_url": "https://userguide.mdanalysis.org/versions.json"
}

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
Expand Down
10 changes: 9 additions & 1 deletion doc/source/preparing_releases_and_hotfixes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,15 @@ To do this you will need to:
Create a release of the UserGuide
---------------------------------

For now, the UserGuide is released at the same time as the core library. To make a release of the UserGuide you should:
For now, the UserGuide is released at the same time as the core library. If it's failing please fix *before* you do the tag / release. Here is how to update the snapshots

#. Update the version of MDA used by the UserGuide to the release version.

#. Re-generate the Syrupy test snapshots, and commit those a to git and confirm the build passes.

.. code-block:: bash
cd doc/source/scripts
python -m pytest tests/snapshot/ --snapshot-update

#. Create a new release tag and upload them for the UserGuide repository.

Expand Down
8 changes: 6 additions & 2 deletions doc/source/scripts/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,16 @@ def write_table(self):
@staticmethod
def sphinx_class(klass, tilde=True):
prefix = "~" if tilde else ""
return ":class:`{}{}.{}`".format(prefix, klass.__module__, klass.__name__)
return ":class:`{}{}.{}`".format(
prefix, klass.__module__, klass.__name__
)

@staticmethod
def sphinx_meth(meth, tilde=True):
prefix = "~" if tilde else ""
return ":meth:`{}{}.{}`".format(prefix, meth.__module__, meth.__qualname__)
return ":meth:`{}{}.{}`".format(
prefix, meth.__module__, meth.__qualname__
)

@staticmethod
def sphinx_ref(txt: str, label: str = None, suffix: str = "") -> str:
Expand Down
18 changes: 12 additions & 6 deletions doc/source/scripts/clean_example_notebooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,9 @@ class JupyterCell:
time_fmt = "%b %d, %Y"
tag = "a"
close_tag = "</{}>".format(tag)
tagline = ('<{} data-cite="{{key}}" ' 'href="{{url}}">{{authors}}</{}>').format(
tag, tag
)
tagline = (
'<{} data-cite="{{key}}" ' 'href="{{url}}">{{authors}}</{}>'
).format(tag, tag)

@classmethod
def as_references(cls, refs, keys=[]):
Expand Down Expand Up @@ -171,7 +171,9 @@ def find_reference_keys(self, refs, keys=[]):

Track the order of the keys for a final bibliography cell.
"""
matches = [x for x in re.split(refs.regex, self.source) if x is not None]
matches = [
x for x in re.split(refs.regex, self.source) if x is not None
]
new_source = ""

while len(matches) > 1:
Expand All @@ -191,7 +193,9 @@ def find_reference_keys(self, refs, keys=[]):
elif prev_char in ('"', "'"):
new_source += before.rsplit("<", maxsplit=1)[0]
if len(matches) > 2:
matches[2] = matches[2].split(self.close_tag, maxsplit=1)[-1]
matches[2] = matches[2].split(self.close_tag, maxsplit=1)[
-1
]
tag = self.tagline.format(key=key, authors=authors, url=url)
new_source += tag
matches.pop(0)
Expand Down Expand Up @@ -322,7 +326,9 @@ def clean_all_notebooks(notebooks):
if len(errs):
errmsgs = ["{}: {}".format(nb, err) for nb, err in errs]
delim = "\n" + "===" * 10 + "\n"
raise ValueError("Notebooks have errors: {}".format(delim.join(errmsgs)))
raise ValueError(
"Notebooks have errors: {}".format(delim.join(errmsgs))
)


if __name__ == "__main__":
Expand Down
16 changes: 13 additions & 3 deletions doc/source/scripts/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,17 +75,27 @@
for c in _TOPOLOGY_ATTRS.values()
}

base_attrnames = set(["atomattrs", "residueattrs", "segmentattrs", "topologyattrs"])
base_attrnames = set(
["atomattrs", "residueattrs", "segmentattrs", "topologyattrs"]
)

core_attrnames = set(["indices", "resindices", "segindices"])

BASE_ATTRS = {k: v for k, v in ATTRS.items() if k in base_attrnames}

NON_BASE_ATTRS = {k: v for k, v in ATTRS.items() if k not in base_attrnames}

NON_CORE_ATTRS = {k: v for k, v in NON_BASE_ATTRS.items() if k not in core_attrnames}
NON_CORE_ATTRS = {
k: v for k, v in NON_BASE_ATTRS.items() if k not in core_attrnames
}

TOPOLOGY_CLS = sorted(
set([x for x in _TOPOLOGY_ATTRS.values() if x.attrname in NON_CORE_ATTRS.keys()]),
set(
[
x
for x in _TOPOLOGY_ATTRS.values()
if x.attrname in NON_CORE_ATTRS.keys()
]
),
key=lambda x: x.attrname,
)
15 changes: 12 additions & 3 deletions doc/source/scripts/gen_format_overview_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,22 @@

sorted_types = sorted(FILE_TYPES.items())

SUCCESS = "\u2713"
SUCCESS = "\u2713" # checkmark
FAIL = ""


class FormatOverview(TableWriter):
filename = "formats/format_overview.txt"
include_table = "Table of all supported formats in MDAnalysis"
preprocess = ["keys"]
headings = ["File type", "Description", "Topology", "Coordinates", "Read", "Write"]
headings = [
"File type",
"Description",
"Topology",
"Coordinates",
"Read",
"Write",
]

def _set_up_input(self):
return sorted_types
Expand Down Expand Up @@ -80,7 +87,9 @@ def _write(self, fmt, handlers):

class CoordinateReaders(FormatOverview):
filename = "formats/coordinate_readers.txt"
include_table = "Table of supported coordinate readers and the information read"
include_table = (
"Table of supported coordinate readers and the information read"
)
headings = ["File type", "Description", "Velocities", "Forces"]

def _set_up_input(self):
Expand Down
12 changes: 9 additions & 3 deletions doc/source/scripts/gen_standard_selections.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,14 @@ def get_lines(self, klass, attr, sort=False, n=8):

if __name__ == "__main__":
StandardSelectionTable("protein", sel.ProteinSelection, "prot_res", True)
StandardSelectionTable("protein_backbone", sel.BackboneSelection, "bb_atoms")
StandardSelectionTable(
"protein_backbone", sel.BackboneSelection, "bb_atoms"
)
StandardSelectionTable("nucleic", sel.NucleicSelection, "nucl_res")
StandardSelectionTable("nucleic_backbone", sel.NucleicBackboneSelection, "bb_atoms")
StandardSelectionTable(
"nucleic_backbone", sel.NucleicBackboneSelection, "bb_atoms"
)
StandardSelectionTable("base", sel.BaseSelection, "base_atoms")
StandardSelectionTable("nucleic_sugar", sel.NucleicSugarSelection, "sug_atoms")
StandardSelectionTable(
"nucleic_sugar", sel.NucleicSugarSelection, "sug_atoms"
)
15 changes: 12 additions & 3 deletions doc/source/scripts/gen_topologyparser_attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,17 @@


class TopologyParsers(TableWriter):
headings = ["Format", "Description", "Attributes read", "Attributes guessed"]
headings = [
"Format",
"Description",
"Attributes read",
"Attributes guessed",
]
preprocess = ["keys"]
filename = "formats/topology_parsers.txt"
include_table = "Table of supported topology parsers and the attributes read"
include_table = (
"Table of supported topology parsers and the attributes read"
)
sort = True

def __init__(self):
Expand Down Expand Up @@ -133,7 +140,9 @@ def __init__(self, attrs):

def _set_up_input(self):
return sorted(
[x, *y] for x, y in NON_CORE_ATTRS.items() if x not in MANDATORY_ATTRS
[x, *y]
for x, y in NON_CORE_ATTRS.items()
if x not in MANDATORY_ATTRS
)

def _atom(self, name, singular, *args):
Expand Down
4 changes: 3 additions & 1 deletion doc/source/scripts/gen_unit_tables.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,13 @@ def write_unit_table(filename):
f.write("\n\n")
f.write(
textwrap.indent(
tabulate.tabulate(lines, headers=headings, tablefmt="rst"), " "
tabulate.tabulate(lines, headers=headings, tablefmt="rst"),
" ",
)
)
f.write("\n")
print("Wrote ", filename)
return tables


if __name__ == "__main__":
Expand Down
Loading