diff --git a/MANIFEST.in b/MANIFEST.in index ac6643f5d..ad41c0d37 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,3 +10,4 @@ include openfecli/tests/data/*.json include openfecli/tests/data/*.tar.gz recursive-include openfecli/tests/ *.sdf recursive-include openfecli/tests/ *.pdb +include openfe/tests/data/openmm_rfe/vacuum_nocoord.nc diff --git a/docs/_sass/custom-api.scss b/docs/_sass/custom-api.scss deleted file mode 100644 index 461e36e84..000000000 --- a/docs/_sass/custom-api.scss +++ /dev/null @@ -1,384 +0,0 @@ -// The Numpydoc API layout - -html { - --ofe-api-name-color: var(--ofe-color-FeelingSpicy); - --ofe-api-property-color: var(--pst-color-text-muted); - --ofe-api-path-color: var(--pst-color-text-base); - --ofe-api-bg-color: var(--pst-color-on-background); - --ofe-api-param-symbol-color: var(--pst-color-text-base); - --ofe-api-param-name-color: var(--pst-color-text-base); - --ofe-api-param-value-color: var(--pst-color-text-base); - --ofe-api-type-color: var(--pst-color-text-base); - --ofe-api-type-link-color: var(--pst-color-link); - --ofe-api-source-link-color: var(--pst-color-link); - - --ofe-api-header-font-size: 1.1rem; - --ofe-api-header-padding: var(--ofe-api-header-font-size); - --ofe-api-arguments-indent: calc(2 * var(--ofe-api-header-padding)); -} - -// Definition of an object. Hopefully. -// Could also use dl.py, but this would fail -// if we ever wanted to document something that -// isn't Python -// RTD does it like this, but with fewer exceptions, so -// we should be OK -dl:not(.docutils):not(.field-list):not(.simple):not(.citation):not(.option-list):not(.footnote)[class] { - padding-bottom: 0.5em; - border: 1px solid var(--pst-color-border); - border-radius: .25rem; - - // Text specifying class, function, method, pydantic model, etc. - // Usually present in the .property class, but if it's missing - // we can insert it - > dt { - > *:not(.property):first-child::before, > .property { - color: var(--ofe-api-property-color); - font-size: inherit; - font-weight: normal; - font-style: italic; - } - } - &.attribute > dt > *:not(.property):first-child::before { - content: "attribute "; - } - &.method > dt > *:not(.property):first-child::before { - content: "method "; - } - &.function > dt > *:not(.property):first-child::before { - content: "function "; - } - - - // Header and signature - > dt { - font-family: var(--pst-font-family-monospace); - font-size: var(--ofe-api-header-font-size); - padding: var(--ofe-api-header-padding); - background-color: var(--ofe-api-bg-color); - border-radius: .25rem 0; - // Allow words to break anywhere, if necessary - overflow-wrap: break-word; - // Position relative so we can absolutely position source link - position: relative; - &:target { - &::before { - background-color: var(--pst-color-background); - } - } - - // Indent the argument list - padding-left: calc(var(--ofe-api-header-padding) + var(--ofe-api-arguments-indent)); - > :first-child { - margin-left: calc(-1 * var(--ofe-api-arguments-indent)); - } - - // Text providing path to the object - > .sig-prename { - padding: 0; - background-color: transparent; - font-weight: 200; - font-size: inherit; - color: var(--ofe-api-path-color); - display: inline-block; - } - // Text providing the name of the object - > .sig-name { - padding: 0; - background-color: transparent; - color: var(--ofe-api-name-color); - font-weight: 600; - font-size: inherit; - // autodoc_pydantic produces types as properties _after_ the sig name - ~ .property .pre { - color: var(--ofe-api-type-color); - font-style: normal; - } - ~ .property a .pre { - color: var(--ofe-api-type-link-color); - } - - // If the source link immediately follows the name, don't position it absolutely - + a.reference.internal .viewcode-link { - position: static !important; - float: right; - margin-left: 0.5em; - } - } - // Opening and closing parenthesis - > .sig-paren { - font-size: inherit; - - } - // Each parameter - > .sig-param { - font-size: inherit; - font-style: normal; - // Entire parameter if parsing the parameter has failed. Splits on commas - > .pre { - color: var(--ofe-api-param-name-color); - } - // Name of a parameter - > .n > .pre { - color: var(--ofe-api-param-name-color); - } - // Symbols; equals sign, asterisk, etc - > .o > .pre { - color: var(--ofe-api-param-symbol-color); - padding-left: 0.2em; - padding-right: 0.2em; - } - // Type anotation - .p, .p + .n, .p + .w + .n { - font-weight: normal; - .pre { - color: var(--ofe-api-type-color); - } - a .pre { - color: var(--ofe-api-type-link-color); - } - } - // Default values of arguments - > .default_value > .pre { - color: var(--ofe-api-param-value-color); - } - // After each parameter, newline - &::before { - content: "\a"; - white-space: pre; - } - // Links - a { - &:hover { - .pre, pre, code { - color: var(--pst-color-link-hover); - } - } - } - } - // Brackets [] denoting optional arguments - // This is redundant information and I am displeased to have to support it - > .optional { - // Put optional [] brackets on their own lines - &::before { - content: "\a"; - white-space: pre; - } - // Optional parameters need extra indentation - ~ .sig-param::before { - content: "\a "; - } - } - // Closing parenthesis - .sig-param, .optional { - + .sig-paren { - &::before { - content: "\a"; - white-space: pre; - } - - // Unindent closing paren, and everything following (except source link) - position: relative; - left: calc(-1 * var(--ofe-api-arguments-indent)); - ~*:not(a.reference.internal, .headerlink) { - position: relative; - left: calc(-1 * var(--ofe-api-arguments-indent)); - max-width: calc(100% + #{var(--ofe-api-arguments-indent)} - 4em); - display: inline-block; - vertical-align: top; - } - ~ .headerlink { - position: absolute; - bottom: var(--ofe-api-header-padding); - right: var(--ofe-api-header-padding); - } - } - } - // Pydantic validator arrow - .autodoc_pydantic_validator_arrow { - &::before { - content: "\a"; - white-space: pre; - } - // Comma separating validated fields - ~ .property { - &::after { - content: "\a "; - white-space: pre; - } - } - ~ .headerlink { - position: absolute; - bottom: var(--ofe-api-header-padding); - right: var(--ofe-api-header-padding); - } - } - // Link to the source code for the object (not present on inherited objects) - .viewcode-link { - position: absolute; - top: var(--ofe-api-header-padding); - right: var(--ofe-api-header-padding); - color: var(--ofe-api-source-link-color); - &:hover { - color: var(--pst-color-link-hover); - } - } - // Permalink to the object (to here) - > a.headerlink { - font-size: 1em; - opacity: 1; - transform: translate(0); - } - } - // Content - >dd { - margin: 1em; - &:empty { - padding-bottom: 0 !important; - } - // Description/docstring - >p { - - } - // JSON schema for pydantic stuff - >.autodoc_pydantic_collapsable_json { - } - // Parameters, Returns, Other Parameters, Raises sections - >.field-list { - // Headings - >dt { - @extend .rubric; - } - // Content - >dd { - >ul.simple { - margin-left: 0; - } - >ul.simple:first-child>li { - list-style: none; - margin-left: 0; - } - >dl>dt, >ul.simple:first-child>li>p:first-child, >p:first-child { - // Name of the parameter or return value - >strong { - font-family: var(--pst-font-family-monospace); - color: var(--ofe-api-param-name-color); - } - // Type of the parameter, or type of a named return value - >.classifier { - font-family: var(--pst-font-family-monospace); - color: var(--ofe-api-type-color); - overflow-wrap: break-word; - } - >a.reference>em { - font-family: var(--pst-font-family-monospace); - } - } - >dl>dt>strong::after { - content: ": "; - } - // Description of the parameter, return value, or exception - >dl>dd, >ul.simple:first-child>li>p:not(:first-child) { - margin-top: 0; - margin-bottom: 0.5em; - } - } - } - // Notes, References, Methods, Attributes, and Examples headings - >.rubric { - - } - // Reference list (bibliography) - >dl.citation { - - } - // Doctests - already nicely formatted! - >.doctest { - - } - // Tables of methods, attributes, classes, etc. - // Should probably style this globally so it captures the same tables in autosummary directives - >.longtable.docutils { - // A row of the table - tr { - //An odd row - *.row-odd { - - } - //An even row - *.row-even { - - } - // An entry on the LHS of the table - link to another object + possibly a signature - // Signature is a direct child of this element - td:first-child { - // May need to style everything in td:first-child, then re-overwrite things here - a.reference code { - - } - } - // An entry on the RHS of the table - description - td:last-child { - - } - - } - - } - // Child object - recapitulates structure above - >dl:not(.docutils):not(.field-list):not(.simple):not(.citation):not(.option-list):not(.footnote)[class] { - padding-bottom: 0; - box-shadow: 0 4px 5px 0 rgba(black, .14), - 0 1px 10px 0 rgba(black, .12), - 0 2px 4px -1px rgba(black, .40); - border-radius: .25rem; - border: none; - // Inner object body - > dd { - margin-right: 1.5em; - margin-left: 1.5em; - padding-bottom: 0.75em; - } - } - } - - // Don't justify/hyphenate in API - // Undoes styling found by searching "@if hyphenate" in sphinx-api.scss - p { - hyphens: none; - text-align: unset; - } -} - -// Pydantic fields have their own stuff going on -// We still want types to be the right colour -// This works as long as the type annotation is defined, -// but can cause the field's name to be recoloured if eg no -// type is defined and the field has an alias -.pydantic_field .sig { - > .sig-name + .property:not(:last-of-type) { - a { - font-weight: bold; - } - .pre { - color: var(--ofe-api-type-color); - } - } -} - -details.autodoc_pydantic_collapsable_json > summary { - max-width: 42.5rem; - margin-left: auto; - margin-right: auto; -} - -// Break headings wherever, if necessary -h1, h2, h3, h4, h5, h6 { - overflow-wrap: break-word; -} - -// Docs button in source on right -.viewcode-back { - float: right; - color: var(--ofe-api-source-link-color); -} diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css deleted file mode 100644 index f374d40a5..000000000 --- a/docs/_static/css/custom.css +++ /dev/null @@ -1,267 +0,0 @@ -html{ - --ofe-color-BadassBlue: #31394d; - --ofe-color-OtherBlue: #002f4a; - --ofe-color-SandySergio: #d9c4b1; - --ofe-color-SergiosCousin: #ede3da; - --ofe-color-FeelingSpicy: #b85741; - --ofe-color-FeelingSick: #009384; - --ofe-color-BeastlyGrey: #666666; - - --ofe-color-GoldenYellow: #EEC044FF; - --ofe-color-DarkGoldenYellow: #c9a239; - - --ofe-color-SergiosBrighterCousin: #fffaf5; - --ofe-color-BeastlyLightGrey: #464545; -} - -/* NavBar */ -.bd-page-width { - max-width: calc(1500px + var(--pst-sidebar-secondary)); -} -.navbar img{ - border: 0px; -} -.nav-item.current.active .nav-link:hover { - text-decoration: none; - color: var(--pst-color-primary); - font-weight: 600; -} - -nav.bd-header.navbar { - /* Copied from dark theme below, with --pst-color-on-background changed */ - --pst-color-on-background: var(--ofe-color-BadassBlue); - - --pst-color-primary: var(--ofe-color-SergiosCousin); - --pst-color-primary-text: var(--ofe-color-SandySergio); - --pst-color-primary-highlight: #306e81; - --pst-color-secondary: #ee9040; - --pst-color-secondary-text: #fff; - --pst-color-secondary-highlight: #cf6912; - --pst-color-success: #488757; - --pst-color-success-text: #fff; - --pst-color-success-highlight: #2d5537; - --pst-color-info: #459db9; - --pst-color-info-text: #fff; - --pst-color-info-highlight: #306e81; - --pst-color-warning: #ee9040; - --pst-color-warning-text: #fff; - --pst-color-warning-highlight: #cf6912; - --pst-color-danger: #cb4653; - --pst-color-danger-text: #fff; - --pst-color-danger-highlight: #992b36; - --pst-color-light: #c9c9c9; - --pst-color-light-text: #000; - --pst-color-light-highlight: #a3a3a3; - --pst-color-muted: #a6a6a6; - --pst-color-muted-text: #fff; - --pst-color-muted-highlight: gray; - --pst-color-dark: #cecece; - --pst-color-dark-text: #000; - --pst-color-dark-highlight: #a8a8a8; - --pst-color-black: #000; - --pst-color-black-text: #fff; - --pst-color-black-highlight: #000; - --pst-color-white: #fff; - --pst-color-white-text: #000; - --pst-color-white-highlight: #d9d9d9; - --pst-color-attention: var(--ofe-color-FeelingSpicy); - --pst-color-text-base: var(--ofe-color-SergiosBrighterCousin); - --pst-color-text-muted: var(--ofe-color-SandySergio); - --pst-color-shadow: #212121; - --pst-color-border: silver; - --pst-color-inline-code: var(--ofe-color-SergiosCousin); - --pst-color-target: var(--ofe-color-DarkGoldenYellow); - --pst-color-background: var(--ofe-color-BeastlyLightGrey); - --pst-color-surface: #666666; - --pst-color-on-surface: #373737; - --pst-color-link: var(--ofe-color-DarkGoldenYellow); - --pst-color-link-hover: var(--pst-color-warning); - --pst-color-inline-code-links: var(--pst-color-link); -} - -img { - border: 2px solid var(--ofe-color-BeastlyGrey); - background: var(--ofe-color-SandySergio); -} - -/* Remove underlines from links */ -a:not(:hover) { - text-decoration: none; -} - - -/* -Links bar at top of notebooks -*/ -.ofe-top-of-notebook { - display: flex; - flex-flow: row wrap; - justify-content: center; - gap: 3em; - margin: 0; - padding: 0; -} - -/* -Move break points for secondary sidebar and search button -*/ -@media (max-width: 1500px) { - .bd-sidebar-secondary { - flex-grow: .75; - height: 100vh; - margin-right: -75%; - max-height: 100vh; - max-width: 350px; - position: fixed; - right: 0; - top: 0; - transition: visibility .2s ease-out,margin .2s ease-out; - visibility: hidden; - width: 75%; - z-index: 1055; - font-size: var(--pst-sidebar-font-size-mobile); - } - - .search-button-field .search-button__kbd-shortcut - { - display: none; - } -} - -/* -Themes - Deviating Colors are ofe-colorVars rest is original scipyData theme. -*/ - -html { - --sd-color-secondary: var(--pst-color-secondary); - --sd-color-secondary-text: var(--pst-color-secondary-text); - --sd-color-secondary-highlight: var(--pst-color-secondary-highlight); - --sd-color-success: var(--pst-color-success); - --sd-color-success-text: var(--pst-color-success-text); - --sd-color-success-highlight: var(--pst-color-success-highlight); - --sd-color-info: var(--pst-color-info); - --sd-color-info-text: var(--pst-color-info-text); - --sd-color-info-highlight: var(--pst-color-info-highlight); - --sd-color-warning: var(--pst-color-warning); - --sd-color-warning-text: var(--pst-color-warning-text); - --sd-color-warning-highlight: var(--pst-color-warning-highlight); - --sd-color-danger: var(--pst-color-danger); - --sd-color-danger-text: var(--pst-color-danger-text); - --sd-color-danger-highlight: var(--pst-color-danger-highlight); - --sd-color-light: var(--pst-color-light); - --sd-color-light-text: var(--pst-color-light-text); - --sd-color-light-highlight: var(--pst-color-light-highlight); - --sd-color-muted: var(--pst-color-muted); - --sd-color-muted-text: var(--pst-color-muted-text); - --sd-color-muted-highlight: var(--pst-color-muted-highlight); - --sd-color-dark: var(--pst-color-dark); - --sd-color-dark-text: var(--pst-color-dark-text); - --sd-color-dark-highlight: var(--pst-color-dark-highlight); - --sd-color-black: var(--pst-color-black); - --sd-color-black-text: var(--pst-color-black-text); - --sd-color-black-highlight: var(--pst-color-black-highlight); - --sd-color-white: var(--pst-color-white); - --sd-color-white-text: var(--pst-color-white-text); - --sd-color-white-highlight: var(--pst-color-white-highlight); -} - -html[data-theme=light]{ - --pst-color-primary: var(--ofe-color-OtherBlue); - --pst-color-primary-text: var(--ofe-color-SandySergio); - --pst-color-primary-highlight: var(--ofe-color-SergiosCousin); - --pst-color-secondary: var(--ofe-color-SandySergio); - --pst-color-secondary-text: #fff; - --pst-color-secondary-highlight: #cf6912; - --pst-color-success: #28a745; - --pst-color-success-text: #fff; - --pst-color-success-highlight: #19692c; - --pst-color-info: #459db9; - --pst-color-info-text: #fff; - --pst-color-info-highlight: #306e81; - --pst-color-warning: #ee9040; - --pst-color-warning-text: #fff; - --pst-color-warning-highlight: #cf6912; - --pst-color-danger: #dc3545; - --pst-color-danger-text: #fff; - --pst-color-danger-highlight: #a71d2a; - --pst-color-light: #c9c9c9; - --pst-color-light-text: #000; - --pst-color-light-highlight: #a3a3a3; - --pst-color-muted: #646464; - --pst-color-muted-text: #fff; - --pst-color-muted-highlight: #3e3e3e; - --pst-color-dark: #323232; - --pst-color-dark-text: #fff; - --pst-color-dark-highlight: #0c0c0c; - --pst-color-black: #000; - --pst-color-black-text: #fff; - --pst-color-black-highlight: #000; - --pst-color-white: #fff; - --pst-color-white-text: #000; - --pst-color-white-highlight: #d9d9d9; - --pst-color-attention: var(--ofe-color-GoldenYellow); - --pst-color-text-base: var(--ofe-color-OtherBlue); - --pst-color-text-muted: var(--ofe-color-BadassBlue); - --pst-color-shadow: #d8d8d8; - --pst-color-border: #c9c9c9; - --pst-color-inline-code: var(--ofe-color-FeelingSick); - --pst-color-target: var(--ofe-color-GoldenYellow); - --pst-color-background: var(--ofe-color-SergiosBrighterCousin); - --pst-color-on-background: #fff; - --pst-color-surface: #f5f5f5; - --pst-color-on-surface: #e1e1e1; - --pst-color-link: var(--ofe-color-DarkGoldenYellow); - --pst-color-link-hover: var(--pst-color-link); - --pst-color-inline-code-links: var(--pst-color-link); -} - -/*DarkTheme*/ -html[data-theme=dark] { - --pst-color-primary: var(--ofe-color-SergiosCousin); - --pst-color-primary-text: var(--ofe-color-SandySergio); - --pst-color-primary-highlight: #306e81; - --pst-color-secondary: #ee9040; - --pst-color-secondary-text: #fff; - --pst-color-secondary-highlight: #cf6912; - --pst-color-success: #488757; - --pst-color-success-text: #fff; - --pst-color-success-highlight: #2d5537; - --pst-color-info: #459db9; - --pst-color-info-text: #fff; - --pst-color-info-highlight: #306e81; - --pst-color-warning: #ee9040; - --pst-color-warning-text: #fff; - --pst-color-warning-highlight: #cf6912; - --pst-color-danger: #cb4653; - --pst-color-danger-text: #fff; - --pst-color-danger-highlight: #992b36; - --pst-color-light: #c9c9c9; - --pst-color-light-text: #000; - --pst-color-light-highlight: #a3a3a3; - --pst-color-muted: #a6a6a6; - --pst-color-muted-text: #fff; - --pst-color-muted-highlight: gray; - --pst-color-dark: #cecece; - --pst-color-dark-text: #000; - --pst-color-dark-highlight: #a8a8a8; - --pst-color-black: #000; - --pst-color-black-text: #fff; - --pst-color-black-highlight: #000; - --pst-color-white: #fff; - --pst-color-white-text: #000; - --pst-color-white-highlight: #d9d9d9; - --pst-color-attention: var(--ofe-color-FeelingSpicy); - --pst-color-text-base: var(--ofe-color-SergiosBrighterCousin); - --pst-color-text-muted: var(--ofe-color-SandySergio); - --pst-color-shadow: #212121; - --pst-color-border: silver; - --pst-color-inline-code: var(--ofe-color-SergiosCousin); - --pst-color-target: var(--ofe-color-DarkGoldenYellow); - --pst-color-background: var(--ofe-color-BeastlyLightGrey); - --pst-color-on-background: #1e1e1e; - --pst-color-surface: #666666; - --pst-color-on-surface: #373737; - --pst-color-link: var(--ofe-color-DarkGoldenYellow); - --pst-color-link-hover: var(--pst-color-link); - --pst-color-inline-code-links: var(--pst-color-link); -} diff --git a/docs/conf.py b/docs/conf.py index 5612db921..9deb7f180 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -125,17 +125,18 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "pydata_sphinx_theme" +html_theme = "ofe_sphinx_theme" html_theme_options = { "logo": {"text": "OpenFE Documentation"}, "icon_links": [ { - "name": "Github", + "name": "GitHub", "url": "https://github.com/OpenFreeEnergy/openfe", "icon": "fa-brands fa-square-github", "type": "fontawesome", } ], + "accent_color": "DarkGoldenYellow", } html_logo = "_static/Squaredcircle.svg" diff --git a/docs/environment.yaml b/docs/environment.yaml index 049c93e27..ef93acb7b 100644 --- a/docs/environment.yaml +++ b/docs/environment.yaml @@ -10,8 +10,7 @@ dependencies: - openmm - packaging - plugcli -- pydata-sphinx-theme -- python=3.9 +- python=3.10 - sphinx<7 - sphinx-click - gitpython @@ -25,6 +24,7 @@ dependencies: - sphinx-toolbox - sphinx<7 - git+https://github.com/OpenFreeEnergy/gufe@main + - git+https://github.com/OpenFreeEnergy/ofe-sphinx-theme@main # These are added automatically by RTD, so we include them here # for a consistent environment. diff --git a/docs/guide/hpc.rst b/docs/guide/hpc.rst deleted file mode 100644 index 254fe0777..000000000 --- a/docs/guide/hpc.rst +++ /dev/null @@ -1,115 +0,0 @@ -HPC -=== - -We recommend using `apptainer (formally singularity) `_ when running ``openfe`` workflows in HPC environments. -This images provide a software environment that is isolated from the host which can make workflow execution easier to setup and more reproducible. -See our guide on :ref:`containers ` for how to get started using apptainer/singularity. - -``micromamba`` Installation Considerations in HPC Environments --------------------------------------------------------------- - -``conda``, ``mamba`` and ``micromamba`` all use `virtual packages `_ to detect which version of CUDA should be installed. -For example, on a login node where there likely is not a GPU or a CUDA environment, ``micromamba info`` may produce output that looks like this :: - - $ micromamba info - - __ - __ ______ ___ ____ _____ ___ / /_ ____ _ - / / / / __ `__ \/ __ `/ __ `__ \/ __ \/ __ `/ - / /_/ / / / / / / /_/ / / / / / / /_/ / /_/ / - / .___/_/ /_/ /_/\__,_/_/ /_/ /_/_.___/\__,_/ - /_/ - - - environment : openfe_env (active) - env location : /lila/home/henrym3/micromamba/envs/openfe_env - user config files : /home/henrym3/.mambarc - populated config files : /home/henrym3/.condarc - libmamba version : 1.2.0 - micromamba version : 1.2.0 - curl version : libcurl/7.87.0 OpenSSL/1.1.1s zlib/1.2.13 libssh2/1.10.0 nghttp2/1.47.0 - libarchive version : libarchive 3.6.2 zlib/1.2.13 bz2lib/1.0.8 libzstd/1.5.2 - virtual packages : __unix=0=0 - __linux=3.10.0=0 - __glibc=2.17=0 - __archspec=1=x86_64 - channels : https://conda.anaconda.org/conda-forge/linux-64 - https://conda.anaconda.org/conda-forge/noarch - base environment : /lila/home/henrym3/micromamba - platform : linux-64 - - -Now if we run the same command on a HPC node that has a GPU :: - - $ micromamba info - - __ - __ ______ ___ ____ _____ ___ / /_ ____ _ - / / / / __ `__ \/ __ `/ __ `__ \/ __ \/ __ `/ - / /_/ / / / / / / /_/ / / / / / / /_/ / /_/ / - / .___/_/ /_/ /_/\__,_/_/ /_/ /_/_.___/\__,_/ - /_/ - - - environment : openfe_env (active) - env location : /lila/home/henrym3/micromamba/envs/openfe_env - user config files : /home/henrym3/.mambarc - populated config files : /home/henrym3/.condarc - libmamba version : 1.2.0 - micromamba version : 1.2.0 - curl version : libcurl/7.87.0 OpenSSL/1.1.1s zlib/1.2.13 libssh2/1.10.0 nghttp2/1.47.0 - libarchive version : libarchive 3.6.2 zlib/1.2.13 bz2lib/1.0.8 libzstd/1.5.2 - virtual packages : __unix=0=0 - __linux=3.10.0=0 - __glibc=2.17=0 - __archspec=1=x86_64 - __cuda=11.7=0 - channels : https://conda.anaconda.org/conda-forge/linux-64 - https://conda.anaconda.org/conda-forge/noarch - base environment : /lila/home/henrym3/micromamba - platform : linux-64 - -We can see that there is a virtual package ``__cuda=11.7=0``. -This means that if we run a ``micromamba install`` command on a node with a GPU, the solver will install the correct version of the ``cudatoolkit``. -However, if we ran the same command on the login node, the solver may install the wrong version of the ``cudatoolkit``, or depending on how the conda packages are setup, a CPU only version of the package. -We can control the virtual package with the environmental variable ``CONDA_OVERRIDE_CUDA``. - -In order to determine the correct ``cudatoolkit`` version, we recommend connecting to the node where the simulation will be executed and run ``nvidia-smi``. -For example :: - - $ nvidia-smi - Tue Jun 13 17:47:11 2023 - +-----------------------------------------------------------------------------+ - | NVIDIA-SMI 515.43.04 Driver Version: 515.43.04 CUDA Version: 11.7 | - |-------------------------------+----------------------+----------------------+ - | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | - | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | - | | | MIG M. | - |===============================+======================+======================| - | 0 NVIDIA A40 On | 00000000:65:00.0 Off | 0 | - | 0% 30C P8 32W / 300W | 0MiB / 46068MiB | 0% Default | - | | | N/A | - +-------------------------------+----------------------+----------------------+ - - +-----------------------------------------------------------------------------+ - | Processes: | - | GPU GI CI PID Type Process name GPU Memory | - | ID ID Usage | - |=============================================================================| - | No running processes found | - +-----------------------------------------------------------------------------+ - -in this output of ``nvidia-smi`` we can see in the upper right of the output ``CUDA Version: 11.7`` which means the installed driver will support a ``cudatoolkit`` version up to ``11.7`` - -So on the login node, we can run ``CONDA_OVERRIDE_CUDA=11.7 micromamba info`` and see that the "correct" virtual CUDA is listed. -For example, to install a version of ``openfe`` which is compatible with ``cudatoolkit 11.7`` run ``CONDA_OVERRIDE_CUDA=11.7 micromamba install openfe``. - -Common Errors -------------- - -openmm.OpenMMException: Error loading CUDA module: CUDA_ERROR_UNSUPPORTED_PTX_VERSION (222) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This error likely means that the CUDA version that ``openmm`` was built with is incompatible with the CUDA driver. -Try re-making the environment while specifying the CUDA toolkit version that works with the CUDA driver on the node. -For example ``micromamba create -n openfe_env openfe cudatoolkit==11.3``. diff --git a/docs/guide/index.rst b/docs/guide/index.rst index ed31e252c..f66bf06f3 100644 --- a/docs/guide/index.rst +++ b/docs/guide/index.rst @@ -9,4 +9,3 @@ User Guide execution/index results/index cli - hpc diff --git a/docs/installation.rst b/docs/installation.rst index 970424c68..5297cafd4 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -6,22 +6,29 @@ packages that integrate with ``openfe``, and testing that your ``openfe`` installation is working. ``openfe`` currently only works on POSIX system (macOS and UNIX/Linux). It -is tested against Python 3.9 and 3.10. - -Installing ``openfe`` ---------------------- +is tested against Python 3.9, 3.10, and 3.11. When you install ``openfe`` through any of the methods described below, you will install both the core library and the command line interface (CLI). +If you already have a Mamba installation, you can install ``openfe`` with: + +.. parsed-literal:: + + mamba create -c conda-forge -n openfe_env openfe=\ |version| + mamba activate openfe_env + +Note that you must run the latter line in each shell session where you want to use ``openfe``. OpenFE recommends the Mamba package manager for most users as it is orders of magnitude faster than the default Conda package manager. Mamba is a drop in replacement for Conda. + Installation with ``mambaforge`` (recommended) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------------------------- + +.. _MambaForge: https://github.com/conda-forge/miniforge#mambaforge -We recommend installing ``openfe`` with `mambaforge `_, because it provides easy -installation of other tools, including molecular dynamics tools such as -OpenMM and ambertools, which are needed by ``openfe``. -We recommend ``mambaforge`` because it is faster than ``conda`` and comes -preconfigured to use ``conda-forge``. +We recommend installing ``openfe`` with `MambaForge`_ because it provides easy +installation of other software that ``openfe`` needs, such as OpenMM and +AmberTools. We recommend ``mambaforge`` because it is faster than ``conda`` and +comes preconfigured to use ``conda-forge``. To install and configure ``mambaforge``, you need to know your operating system, your machine architecture (output of ``uname -m``), and your shell @@ -96,21 +103,21 @@ commands it suggests. You should then close your current session and open a fresh login to ensure that everything is properly registered. -Next we will create an environment called ``openfe_env`` with the ``openfe`` package and all required dependencies +Next we will create an environment called ``openfe_env`` with the ``openfe`` package and all required dependencies: .. parsed-literal:: - $ mamba create -n openfe_env openfe=\ |version| + mamba create -n openfe_env openfe=\ |version| Now we need to activate our new environment :: - $ mamba activate openfe_env + mamba activate openfe_env .. warning:: - Installing on newer Macs with Apple Silicon requires a creating an x86_64 - environmment, as one of our requirements is not yet available for Apple + Installing on Macs with Apple Silicon requires a creating an x86_64 + environment, as one of our requirements is not yet available for Apple Silicon. Run the following modified commands .. parsed-literal:: @@ -121,9 +128,10 @@ Now we need to activate our new environment :: To make sure everything is working, run the tests :: - $ openfe test --long + openfe test --long -The test suite contains several hundred individual tests. This may take up to an hour, and all tests should complete with status either passed, +The test suite contains several hundred individual tests. This may take up to +an hour, and all tests should complete with status either passed, skipped, or xfailed (expected fail). The very first time you run this, the initial check that you can import ``openfe`` will take a while, because some code is compiled the first time it is encountered. That compilation only @@ -132,11 +140,13 @@ happens once per installation. With that, you should be ready to use ``openfe``! Single file installer -^^^^^^^^^^^^^^^^^^^^^ +--------------------- + +.. _releases on GitHub: https://github.com/OpenFreeEnergy/openfe/releases Single file installers are available for x86_64 Linux and MacOS. -They are attached to our `releases on GitHub `_ and can be downloaded with a browser or ``curl`` (or similar tool). -For example, the linux installer can be downloaded with :: +They are attached to our `releases on GitHub`_ and can be downloaded with a browser or ``curl`` (or similar tool). +For example, the Linux installer can be downloaded with :: $ curl -LOJ https://github.com/OpenFreeEnergy/openfe/releases/latest/download/OpenFEforge-Linux-x86_64.sh @@ -145,6 +155,7 @@ And the MacOS installer :: $ curl -LOJ https://github.com/OpenFreeEnergy/openfe/releases/latest/download/OpenFEforge-MacOSX-x86_64.sh The single file installer contains all of the dependencies required for ``openfe`` and does not require internet access to use. + Both ``conda`` and ``mamba`` are also available in the environment created by the single file installer and can be used to install additional packages. The installer can be installed in batch mode or interactively :: @@ -190,8 +201,7 @@ Example installer output is shown below (click to expand "Installer Output") [no] >>> yes .. note:: - Your path will be different - + The install location will be different when you run the installer. .. code-block:: @@ -286,7 +296,7 @@ Example installer output is shown below (click to expand "Installer Output") conda config --set auto_activate_base false Thank you for installing OpenFEforge! - + After the installer completes, close and reopen your shell. To check if your path is setup correctly, run ``which python`` your output should look something like this :: @@ -332,14 +342,14 @@ skipped, or xfailed (expected fail). With that, you should be ready to use ``openfe``! Containers -^^^^^^^^^^ +---------- -We provide an official docker and apptainer (formally singularity) image. +We provide an official docker and Apptainer (formerly Singularity) image. The docker image is tagged with the version of ``openfe`` on the image and can be pulled with :: $ docker pull ghcr.io/openfreeenergy/openfe:latest -The apptainer image is pre-built and can be pulled with :: +The Apptainer image is pre-built and can be pulled with :: $ singularity pull oras://ghcr.io/openfreeenergy/openfe:latest-apptainer @@ -374,10 +384,10 @@ This can be done with the following command :: All differences are within tolerance. -The ``--nv`` flag is required for the apptainer image to access the GPU on the host. +The ``--nv`` flag is required for the Apptainer image to access the GPU on the host. Your output may produce different values for the forces, but should list the CUDA platform if everything is working properly. -You can access the ``openfe`` CLI from the singularity image with :: +You can access the ``openfe`` CLI from the Singularity image with :: $ singularity run --nv openfe_latest-apptainer.sif openfe --help @@ -392,7 +402,7 @@ skipped, or xfailed (expected fail). With that, you should be ready to use ``openfe``! Developer install -^^^^^^^^^^^^^^^^^ +----------------- If you're going to be developing for ``openfe``, you will want an installation where your changes to the code are immediately reflected in the @@ -424,7 +434,7 @@ Note the ``.`` at the end of that command, which indicates the current directory. Optional dependencies -^^^^^^^^^^^^^^^^^^^^^ +--------------------- Certain functionalities are only available if you also install other, optional packages. @@ -433,3 +443,137 @@ optional packages. and you need a valid OpenEye license. To install both packages, use:: $ mamba install -c openeye perses openeye-toolkits + +HPC Environments +---------------- + +When using High Performance Computing resources, jobs are typically submitted to a queue from a "login node" and then run at a later time, often on different hardware and in a different software environment. +This can complicate installation as getting something working on the login node does not guarantee it will work in the job. +We recommend using `Apptainer (formerly Singularity) `_ when running ``openfe`` workflows in HPC environments. +This images provide a software environment that is isolated from the host which can make workflow execution easier to setup and more reproducible. +See our guide on :ref:`containers ` for how to get started using Apptainer/Singularity. + +.. _installation:mamba_hpc: + +``mamba`` in HPC Environments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _virtual packages: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-virtual.html#managing-virtual-packages + +We recommend using a :ref:`container ` to install ``openfe`` in HPC environments. +Nonetheless, ``openfe`` can be installed via Conda Forge on these environments also. +Conda Forge distributes its own CUDA binaries for interfacing with the GPU, rather than use the host drivers. +``conda``, ``mamba`` and ``micromamba`` all use `virtual packages`_ to detect and specify which version of CUDA should be installed. +This is a common point of difference in hardware between the login and job nodes in an HPC environment. +For example, on a login node where there likely is not a GPU or a CUDA environment, ``mamba info`` may produce output that looks like this :: + + $ mamba info + + mamba version : 1.5.1 + active environment : base + active env location : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0 + shell level : 1 + user config file : /home/henrym3/.condarc + populated config files : /lila/home/henrym3/.condarc + conda version : 23.7.4 + conda-build version : not installed + python version : 3.11.5.final.0 + virtual packages : __archspec=1=x86_64 + __glibc=2.17=0 + __linux=3.10.0=0 + __unix=0=0 + base environment : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0 (writable) + conda av data dir : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0/etc/conda + conda av metadata url : None + channel URLs : https://conda.anaconda.org/conda-forge/linux-64 + https://conda.anaconda.org/conda-forge/noarch + package cache : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0/pkgs + /home/henrym3/.conda/pkgs + envs directories : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0/envs + /home/henrym3/.conda/envs + platform : linux-64 + user-agent : conda/23.7.4 requests/2.31.0 CPython/3.11.5 Linux/3.10.0-957.12.2.el7.x86_64 centos/7.6.1810 glibc/2.17 + UID:GID : 1987:3008 + netrc file : None + offline mode : False + +Now if we run the same command on a HPC node that has a GPU :: + + $ mamba info + + mamba version : 1.5.1 + active environment : base + active env location : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0 + shell level : 1 + user config file : /home/henrym3/.condarc + populated config files : /lila/home/henrym3/.condarc + conda version : 23.7.4 + conda-build version : not installed + python version : 3.11.5.final.0 + virtual packages : __archspec=1=x86_64 + __cuda=11.7=0 + __glibc=2.17=0 + __linux=3.10.0=0 + __unix=0=0 + base environment : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0 (writable) + conda av data dir : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0/etc/conda + conda av metadata url : None + channel URLs : https://conda.anaconda.org/conda-forge/linux-64 + https://conda.anaconda.org/conda-forge/noarch + package cache : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0/pkgs + /home/henrym3/.conda/pkgs + envs directories : /lila/home/henrym3/mamba/envs/QA-openfe-0.14.0/envs + /home/henrym3/.conda/envs + platform : linux-64 + user-agent : conda/23.7.4 requests/2.31.0 CPython/3.11.5 Linux/3.10.0-1160.45.1.el7.x86_64 centos/7.9.2009 glibc/2.17 + UID:GID : 1987:3008 + netrc file : None + offline mode : False + + +We can see that there is a virtual package ``__cuda=11.7=0``. +This means that if we run a ``mamba install`` command on a node with a GPU, the solver will install the correct version of the ``cudatoolkit``. +However, if we ran the same command on the login node, the solver may install the wrong version of the ``cudatoolkit``, or depending on how the Conda packages are setup, a CPU only version of the package. +We can control the virtual package with the environmental variable ``CONDA_OVERRIDE_CUDA``. + +In order to determine the correct ``cudatoolkit`` version, we recommend connecting to the node where the simulation will be executed and run ``nvidia-smi``. +For example :: + + $ nvidia-smi + Tue Jun 13 17:47:11 2023 + +-----------------------------------------------------------------------------+ + | NVIDIA-SMI 515.43.04 Driver Version: 515.43.04 CUDA Version: 11.7 | + |-------------------------------+----------------------+----------------------+ + | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | + | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | + | | | MIG M. | + |===============================+======================+======================| + | 0 NVIDIA A40 On | 00000000:65:00.0 Off | 0 | + | 0% 30C P8 32W / 300W | 0MiB / 46068MiB | 0% Default | + | | | N/A | + +-------------------------------+----------------------+----------------------+ + + +-----------------------------------------------------------------------------+ + | Processes: | + | GPU GI CI PID Type Process name GPU Memory | + | ID ID Usage | + |=============================================================================| + | No running processes found | + +-----------------------------------------------------------------------------+ + +in this output of ``nvidia-smi`` we can see in the upper right of the output ``CUDA Version: 11.7`` which means the installed driver will support a ``cudatoolkit`` version up to ``11.7`` + +So on the login node, we can run ``CONDA_OVERRIDE_CUDA=11.7 mamba info`` and see that the "correct" virtual CUDA is listed. +For example, to install a version of ``openfe`` which is compatible with ``cudatoolkit 11.7``, run: + +.. parsed-literal:: + + $ CONDA_OVERRIDE_CUDA=11.7 mamba create -n openfe_env openfe=\ |version| + +Common Errors +------------- + +openmm.OpenMMException: Error loading CUDA module: CUDA_ERROR_UNSUPPORTED_PTX_VERSION (222) + This error likely means that the CUDA version that ``openmm`` was built with is incompatible with the CUDA driver. + Try re-making the environment while specifying the correct CUDA toolkit version for your hardware and driver. + See :ref:`installation:mamba_hpc` for more details. diff --git a/openfe/analysis/plotting.py b/openfe/analysis/plotting.py index eba7b7e7f..282ef5439 100644 --- a/openfe/analysis/plotting.py +++ b/openfe/analysis/plotting.py @@ -1,12 +1,13 @@ # This code is part of OpenFE and is licensed under the MIT license. # For details, see https://github.com/OpenFreeEnergy/openfe import matplotlib.pyplot as plt +from matplotlib.axes import Axes import numpy.typing as npt from openff.units import unit from typing import Optional, Union -def plot_lambda_transition_matrix(matrix: npt.NDArray) -> plt.Axes: +def plot_lambda_transition_matrix(matrix: npt.NDArray) -> Axes: """ Plot out a transition matrix. @@ -17,7 +18,7 @@ def plot_lambda_transition_matrix(matrix: npt.NDArray) -> plt.Axes: Returns ------- - ax : matplotlib.pyplot.Axes + ax : matplotlib.axes.Axes An Axes object to plot. """ num_states = len(matrix) @@ -79,7 +80,7 @@ def plot_lambda_transition_matrix(matrix: npt.NDArray) -> plt.Axes: def plot_convergence( forward_and_reverse: dict[str, Union[npt.NDArray, unit.Quantity]], units: unit.Quantity -) -> plt.Axes: +) -> Axes: """ Plot a Reverse and Forward convergence analysis of the free energies. @@ -95,7 +96,7 @@ def plot_convergence( Returns ------- - ax : matplotlib.pyplot.Axes + ax : matplotlib.axes.Axes An Axes object to plot. """ known_units = { @@ -165,7 +166,7 @@ def plot_convergence( def plot_replica_timeseries( state_timeseries: npt.NDArray, equilibration_iterations: Optional[int] = None, -) -> plt.Axes: +) -> Axes: """ Plot a the state timeseries of a set of replicas. @@ -178,7 +179,7 @@ def plot_replica_timeseries( Returns ------- - ax : matplotlib.pyplot.Axes + ax : matplotlib.axes.Axes An Axes object to plot. """ num_states = len(state_timeseries.T) diff --git a/openfe/protocols/openmm_rfe/equil_rfe_methods.py b/openfe/protocols/openmm_rfe/equil_rfe_methods.py index f2430aa27..0f6aa98c6 100644 --- a/openfe/protocols/openmm_rfe/equil_rfe_methods.py +++ b/openfe/protocols/openmm_rfe/equil_rfe_methods.py @@ -198,7 +198,15 @@ def get_individual_estimates(self) -> list[tuple[unit.Quantity, unit.Quantity]]: def get_forward_and_reverse_energy_analysis(self) -> list[dict[str, Union[npt.NDArray, unit.Quantity]]]: """ Get a list of forward and reverse analysis of the free energies - for each repeat using uncorrolated production samples. + for each repeat using uncorrelated production samples. + + The returned dicts have keys: + 'fractions' - the fraction of data used for this estimate + 'forward_DGs', 'reverse_DGs' - for each fraction of data, the estimate + 'forward_dDGs', 'reverse_dDGs' - for each estimate, the uncertainty + + The 'fractions' values are a numpy array, while the other arrays are + Quantity arrays, with units attached. Returns ------- @@ -231,9 +239,7 @@ def get_overlap_matrices(self) -> list[dict[str, npt.NDArray]]: return overlap_stats def get_replica_transition_statistics(self) -> list[dict[str, npt.NDArray]]: - """ - Returns the replica lambda state transition statistics for each - repeat. + """The replica lambda state transition statistics for each repeat. Note ---- @@ -246,7 +252,7 @@ def get_replica_transition_statistics(self) -> list[dict[str, npt.NDArray]]: A list of dictionaries containing the following: * ``eigenvalues``: The sorted (descending) eigenvalues of the lambda state transition matrix - * ``matrix``: The transition matrix estimate of a replica switchin + * ``matrix``: The transition matrix estimate of a replica switching from state i to state j. """ try: diff --git a/openfe/protocols/openmm_utils/multistate_analysis.py b/openfe/protocols/openmm_utils/multistate_analysis.py index 4c22f4f13..6d523f7f4 100644 --- a/openfe/protocols/openmm_utils/multistate_analysis.py +++ b/openfe/protocols/openmm_utils/multistate_analysis.py @@ -74,7 +74,7 @@ def plot(self, filepath: Path, filename_prefix: str): # MBAR overlap matrix ax = plotting.plot_lambda_transition_matrix(self.free_energy_overlaps['matrix']) ax.set_title('MBAR overlap matrix') - ax.figure.savefig( + ax.figure.savefig( # type: ignore filepath / (filename_prefix + 'mbar_overlap_matrix.png') ) @@ -83,7 +83,7 @@ def plot(self, filepath: Path, filename_prefix: str): self.forward_and_reverse_free_energies, self.units ) ax.set_title('Forward and Reverse free energy convergence') - ax.figure.savefig( + ax.figure.savefig( # type: ignore filepath / (filename_prefix + 'forward_reverse_convergence.png') ) @@ -92,7 +92,7 @@ def plot(self, filepath: Path, filename_prefix: str): self.replica_states, self.equilibration_iterations ) ax.set_title('Change in replica state over time') - ax.figure.savefig( + ax.figure.savefig( # type: ignore filepath / (filename_prefix + 'replica_state_timeseries.png') ) @@ -102,7 +102,7 @@ def plot(self, filepath: Path, filename_prefix: str): self.replica_exchange_statistics['matrix'] ) ax.set_title('Replica exchange transition matrix') - ax.figure.savefig( + ax.figure.savefig( # type: ignore filepath / (filename_prefix + 'replica_exchange_matrix.png') ) diff --git a/openfe/setup/ligand_network_planning.py b/openfe/setup/ligand_network_planning.py index 1c6265868..723c9edcf 100644 --- a/openfe/setup/ligand_network_planning.py +++ b/openfe/setup/ligand_network_planning.py @@ -160,7 +160,7 @@ def generate_maximal_network( total = len(nodes) * (len(nodes) - 1) // 2 progress = functools.partial(tqdm, total=total, delay=1.5) elif progress is False: - progress = lambda x: x + def progress(x): return x # otherwise, it should be a user-defined callable mapping_generator = itertools.chain.from_iterable( @@ -229,6 +229,74 @@ def generate_minimal_spanning_network( return min_network +def generate_minimal_redundant_network( + ligands: Iterable[SmallMoleculeComponent], + mappers: Union[AtomMapper, Iterable[AtomMapper]], + scorer: Callable[[LigandAtomMapping], float], + progress: Union[bool, Callable[[Iterable], Iterable]] = True, + mst_num: int = 2, +) -> LigandNetwork: + """ + Plan a network with a specified amount of redundancy for each node + + Creates a network with as few edges as possible with maximum total score, + ensuring that every node is connected to two edges to introduce + statistical redundancy. + + Parameters + ---------- + ligands : Iterable[SmallMoleculeComponent] + the ligands to include in the LigandNetwork + mappers : AtomMapper or Iterable[AtomMapper] + the AtomMapper(s) to use to propose mappings. At least 1 required, + but many can be given, in which case all will be tried to find the + highest score edges + scorer : Scoring function + any callable which takes a LigandAtomMapping and returns a float + progress : Union[bool, Callable[Iterable], Iterable] + progress bar: if False, no progress bar will be shown. If True, use a + tqdm progress bar that only appears after 1.5 seconds. You can also + provide a custom progress bar wrapper as a callable. + mst_num: int + Minimum Spanning Tree number: the number of minimum spanning trees to + generate. If two, the second-best edges are included in the returned + network. If three, the third-best edges are also included, etc. + """ + if isinstance(mappers, AtomMapper): + mappers = [mappers] + mappers = [_hasten_lomap(m, ligands) if isinstance(m, LomapAtomMapper) + else m for m in mappers] + + # First create a network with all the proposed mappings (scored) + network = generate_maximal_network(ligands, mappers, scorer, progress) + + # Flip network scores so we can use minimal algorithm + g2 = nx.MultiGraph() + for e1, e2, d in network.graph.edges(data=True): + g2.add_edge(e1, e2, weight=-d['score'], object=d['object']) + + # As in .generate_minimal_spanning_network(), use nx to get the minimal + # network. But now also remove those edges from the fully-connected + # network, then get the minimal network again. Add mappings from all + # minimal networks together. + mappings = [] + for _ in range(mst_num): # can increase range here for more redundancy + # get list from generator so that we don't adjust network by calling it: + current_best_edges = list(nx.minimum_spanning_edges(g2)) + + g2.remove_edges_from(current_best_edges) + for _, _, _, edge_data in current_best_edges: + mappings.append(edge_data['object']) + + redund_network = LigandNetwork(mappings) + missing_nodes = set(network.nodes) - set(redund_network.nodes) + if missing_nodes: + raise RuntimeError("Unable to create edges to some nodes: " + f"{list(missing_nodes)}") + + return redund_network + + def generate_network_from_names( ligands: list[SmallMoleculeComponent], mapper: AtomMapper, @@ -353,7 +421,7 @@ def load_orion_network( KeyError If an unexpected line format is encountered. """ - + with open(network_file, 'r') as f: network_lines = [l.strip().split(' ') for l in f if not l.startswith('#')] diff --git a/openfe/tests/data/openmm_rfe/Transformation-e1702a3efc0fa735d5c14fc7572b5278_results.json.gz b/openfe/tests/data/openmm_rfe/Transformation-e1702a3efc0fa735d5c14fc7572b5278_results.json.gz new file mode 100644 index 000000000..9a598d21e Binary files /dev/null and b/openfe/tests/data/openmm_rfe/Transformation-e1702a3efc0fa735d5c14fc7572b5278_results.json.gz differ diff --git a/openfe/tests/data/openmm_rfe/vac_results.json.gz b/openfe/tests/data/openmm_rfe/vac_results.json.gz deleted file mode 100644 index e0178d09d..000000000 Binary files a/openfe/tests/data/openmm_rfe/vac_results.json.gz and /dev/null differ diff --git a/openfe/tests/protocols/conftest.py b/openfe/tests/protocols/conftest.py index b2f17d438..c18204ce0 100644 --- a/openfe/tests/protocols/conftest.py +++ b/openfe/tests/protocols/conftest.py @@ -147,5 +147,5 @@ def transformation_json() -> str: """string of a result of quickrun""" d = resources.files('openfe.tests.data.openmm_rfe') - with gzip.open((d / 'vac_results.json.gz').as_posix(), 'r') as f: # type: ignore + with gzip.open((d / 'Transformation-e1702a3efc0fa735d5c14fc7572b5278_results.json.gz').as_posix(), 'r') as f: # type: ignore return f.read().decode() # type: ignore diff --git a/openfe/tests/protocols/test_openmm_equil_rfe_protocols.py b/openfe/tests/protocols/test_openmm_equil_rfe_protocols.py index a9a502686..3445d7b2f 100644 --- a/openfe/tests/protocols/test_openmm_equil_rfe_protocols.py +++ b/openfe/tests/protocols/test_openmm_equil_rfe_protocols.py @@ -1241,10 +1241,103 @@ def test_constraints(tyk2_xml, tyk2_reference_xml): assert float(a.get('d')) == pytest.approx(float(b.get('d'))) -def test_reload_protocol_result(transformation_json): - d = json.loads(transformation_json, - cls=gufe.tokenization.JSON_HANDLER.decoder) +class TestProtocolResult: + @pytest.fixture() + def protocolresult(self, transformation_json): + d = json.loads(transformation_json, + cls=gufe.tokenization.JSON_HANDLER.decoder) - pr = openmm_rfe.RelativeHybridTopologyProtocolResult.from_dict(d['protocol_result']) + pr = openfe.ProtocolResult.from_dict(d['protocol_result']) - assert pr + return pr + + def test_reload_protocol_result(self, transformation_json): + d = json.loads(transformation_json, + cls=gufe.tokenization.JSON_HANDLER.decoder) + + pr = openmm_rfe.RelativeHybridTopologyProtocolResult.from_dict(d['protocol_result']) + + assert pr + + def test_get_estimate(self, protocolresult): + est = protocolresult.get_estimate() + + assert est + assert est.m == pytest.approx(-15.768768285032115) + assert isinstance(est, unit.Quantity) + assert est.is_compatible_with(unit.kilojoule_per_mole) + + def test_get_uncertainty(self, protocolresult): + est = protocolresult.get_uncertainty() + + assert est + assert est.m == pytest.approx(0.03662634237353985) + assert isinstance(est, unit.Quantity) + assert est.is_compatible_with(unit.kilojoule_per_mole) + + def test_get_individual(self, protocolresult): + inds = protocolresult.get_individual_estimates() + + assert isinstance(inds, list) + assert len(inds) == 3 + for e, u in inds: + assert e.is_compatible_with(unit.kilojoule_per_mole) + assert u.is_compatible_with(unit.kilojoule_per_mole) + + def test_get_forwards_etc(self, protocolresult): + far = protocolresult.get_forward_and_reverse_energy_analysis() + + assert isinstance(far, list) + far1 = far[0] + assert isinstance(far1, dict) + for k in ['fractions', 'forward_DGs', 'forward_dDGs', + 'reverse_DGs', 'reverse_dDGs']: + assert k in far1 + + if k == 'fractions': + assert isinstance(far1[k], np.ndarray) + else: + assert isinstance(far1[k], unit.Quantity) + assert far1[k].is_compatible_with(unit.kilojoule_per_mole) + + def test_get_overlap_matrices(self, protocolresult): + ovp = protocolresult.get_overlap_matrices() + + assert isinstance(ovp, list) + assert len(ovp) == 3 + + ovp1 = ovp[0] + assert isinstance(ovp1['matrix'], np.ndarray) + assert ovp1['matrix'].shape == (11,11) + + def test_get_replica_transition_statistics(self, protocolresult): + rpx = protocolresult.get_replica_transition_statistics() + + assert isinstance(rpx, list) + assert len(rpx) == 3 + rpx1 = rpx[0] + assert 'eigenvalues' in rpx1 + assert 'matrix' in rpx1 + assert rpx1['eigenvalues'].shape == (11,) + assert rpx1['matrix'].shape == (11, 11) + + def test_get_replica_states(self, protocolresult): + rep = protocolresult.get_replica_states() + + assert isinstance(rep, list) + assert len(rep) == 3 + assert rep[0].shape == (6, 11) + + def test_equilibration_iterations(self, protocolresult): + eq = protocolresult.equilibration_iterations() + + assert isinstance(eq, list) + assert len(eq) == 3 + assert all(isinstance(v, float) for v in eq) + + def test_production_iterations(self, protocolresult): + prod = protocolresult.production_iterations() + + assert isinstance(prod, list) + assert len(prod) == 3 + assert all(isinstance(v, float) for v in prod) diff --git a/openfe/tests/setup/test_network_planning.py b/openfe/tests/setup/test_network_planning.py index 7e1e38e0d..30dc78c01 100644 --- a/openfe/tests/setup/test_network_planning.py +++ b/openfe/tests/setup/test_network_planning.py @@ -76,7 +76,8 @@ def scorer(mapping): assert len(network.edges) == len(others) for edge in network.edges: - assert len(edge.componentA_to_componentB) > 1 # we didn't take the bad mapper + # we didn't take the bad mapper + assert len(edge.componentA_to_componentB) > 1 assert 'score' in edge.annotations assert edge.annotations['score'] == len(edge.componentA_to_componentB) @@ -196,7 +197,8 @@ def test_minimal_spanning_network(minimal_spanning_network, toluene_vs_others): tol, others = toluene_vs_others assert len(minimal_spanning_network.nodes) == len(others) + 1 for edge in minimal_spanning_network.edges: - assert edge.componentA_to_componentB != {0: 0} # lomap should find something + assert edge.componentA_to_componentB != { + 0: 0} # lomap should find something def test_minimal_spanning_network_connectedness(minimal_spanning_network): @@ -245,6 +247,106 @@ def scorer(mapping): ) +@pytest.fixture(scope='session') +def minimal_redundant_network(toluene_vs_others): + toluene, others = toluene_vs_others + mappers = [BadMapper(), openfe.setup.atom_mapping.LomapAtomMapper()] + + def scorer(mapping): + return len(mapping.componentA_to_componentB) + + network = openfe.setup.ligand_network_planning.generate_minimal_redundant_network( + ligands=others + [toluene], + mappers=mappers, + scorer=scorer, + mst_num=2 + ) + return network + + +def test_minimal_redundant_network(minimal_redundant_network, toluene_vs_others): + tol, others = toluene_vs_others + + # test for correct number of nodes + assert len(minimal_redundant_network.nodes) == len(others) + 1 + + # test for correct number of edges + assert len(minimal_redundant_network.edges) == 2 * \ + (len(minimal_redundant_network.nodes) - 1) + + for edge in minimal_redundant_network.edges: + assert edge.componentA_to_componentB != { + 0: 0} # lomap should find something + + +def test_minimal_redundant_network_connectedness(minimal_redundant_network): + found_pairs = set() + for edge in minimal_redundant_network.edges: + pair = frozenset([edge.componentA, edge.componentB]) + assert pair not in found_pairs + found_pairs.add(pair) + + assert nx.is_connected(nx.MultiGraph(minimal_redundant_network.graph)) + + +def test_redundant_vs_spanning_network(minimal_redundant_network, minimal_spanning_network): + # when setting minimal redundant network to only take one MST, it should have as many + # edges as the regular minimum spanning network + assert 2 * len(minimal_spanning_network.edges) == len( + minimal_redundant_network.edges) + + +def test_minimal_redundant_network_edges(minimal_redundant_network): + # issue #244, this was previously giving non-reproducible (yet valid) + # networks when scores were tied. + edge_ids = sorted( + (edge.componentA.name, edge.componentB.name) + for edge in minimal_redundant_network.edges + ) + ref = sorted([ + ('1,3,7-trimethylnaphthalene', '2,6-dimethylnaphthalene'), + ('1,3,7-trimethylnaphthalene', '2-methyl-6-propylnaphthalene'), + ('1-butyl-4-methylbenzene', '2,6-dimethylnaphthalene'), + ('1-butyl-4-methylbenzene', '2-methyl-6-propylnaphthalene'), + ('1-butyl-4-methylbenzene', 'toluene'), + ('2,6-dimethylnaphthalene', '2-methyl-6-propylnaphthalene'), + ('2,6-dimethylnaphthalene', '2-methylnaphthalene'), + ('2,6-dimethylnaphthalene', '2-naftanol'), + ('2,6-dimethylnaphthalene', 'methylcyclohexane'), + ('2,6-dimethylnaphthalene', 'toluene'), + ('2-methyl-6-propylnaphthalene', '2-methylnaphthalene'), + ('2-methylnaphthalene', '2-naftanol'), + ('2-methylnaphthalene', 'methylcyclohexane'), + ('2-methylnaphthalene', 'toluene') + ]) + + assert len(edge_ids) == len(ref) + assert edge_ids == ref + + +def test_minimal_redundant_network_redundant(minimal_redundant_network): + # test that each node is connected to 2 edges. + network = minimal_redundant_network + for node in network.nodes: + assert len(network.graph.in_edges(node)) + \ + len(network.graph.out_edges(node)) >= 2 + + +def test_minimal_redundant_network_unreachable(toluene_vs_others): + toluene, others = toluene_vs_others + nimrod = openfe.SmallMoleculeComponent(mol_from_smiles("N")) + + def scorer(mapping): + return len(mapping.componentA_to_componentB) + + with pytest.raises(RuntimeError, match="Unable to create edges"): + network = openfe.setup.ligand_network_planning.generate_minimal_redundant_network( + ligands=others + [toluene, nimrod], + mappers=[openfe.setup.atom_mapping.LomapAtomMapper()], + scorer=scorer + ) + + def test_network_from_names(atom_mapping_basic_test_files): ligs = list(atom_mapping_basic_test_files.values()) @@ -366,10 +468,12 @@ def test_network_from_external(file_fixture, loader, request, expected_edges = { (benzene_modifications['benzene'], benzene_modifications['toluene']), (benzene_modifications['benzene'], benzene_modifications['phenol']), - (benzene_modifications['benzene'], benzene_modifications['benzonitrile']), + (benzene_modifications['benzene'], + benzene_modifications['benzonitrile']), (benzene_modifications['benzene'], benzene_modifications['anisole']), (benzene_modifications['benzene'], benzene_modifications['styrene']), - (benzene_modifications['benzene'], benzene_modifications['benzaldehyde']), + (benzene_modifications['benzene'], + benzene_modifications['benzaldehyde']), } actual_edges = {(e.componentA, e.componentB) for e in list(network.edges)} @@ -423,7 +527,6 @@ def test_bad_orion_network(benzene_modifications, tmpdir): ) - BAD_EDGES = """\ 1c91235:9c91235 benzene -> toluene 1c91235:7876633 benzene -> phenol diff --git a/openfe/utils/network_plotting.py b/openfe/utils/network_plotting.py index 09bc657b1..ad767dc73 100644 --- a/openfe/utils/network_plotting.py +++ b/openfe/utils/network_plotting.py @@ -16,12 +16,12 @@ from matplotlib.patches import Rectangle from matplotlib.lines import Line2D -from typing import Dict, List, Tuple, Optional, Any, Union, cast +from typing import Optional, Any, Union, cast from openfe.utils.custom_typing import ( MPL_MouseEvent, MPL_FigureCanvasBase, MPL_Axes, TypeAlias ) -ClickLocation: TypeAlias = Tuple[Tuple[float, float], Tuple[Any, Any]] +ClickLocation: TypeAlias = tuple[tuple[float, float], tuple[Any, Any]] class Node: @@ -54,14 +54,14 @@ def register_artist(self, ax: MPL_Axes): ax.add_patch(self.artist) @property - def extent(self) -> Tuple[float, float, float, float]: + def extent(self) -> tuple[float, float, float, float]: """extent of this node in matplotlib data coordinates""" bounds = self.artist.get_bbox().bounds return (bounds[0], bounds[0] + bounds[2], bounds[1], bounds[1] + bounds[3]) @property - def xy(self) -> Tuple[float, float]: + def xy(self) -> tuple[float, float]: """lower left (matplotlib data coordinates) position of this node""" return self.artist.xy @@ -153,14 +153,14 @@ class Edge: """ pickable = True - def __init__(self, node_artist1: Node, node_artist2: Node, data: Dict): + def __init__(self, node_artist1: Node, node_artist2: Node, data: dict): self.data = data self.node_artists = [node_artist1, node_artist2] self.artist = self._make_artist(node_artist1, node_artist2, data) self.picked = False def _make_artist(self, node_artist1: Node, node_artist2: Node, - data: Dict) -> Any: + data: dict) -> Any: xs, ys = self._edge_xs_ys(node_artist1, node_artist2) return Line2D(xs, ys, color='black', picker=True, zorder=-1) @@ -238,7 +238,7 @@ class EventHandler: selected : Optional[Union[Node, Edge]] Object selected by a mouse click (after mouse is up), or None if no object has been selected in the graph. - click_location : Optional[Tuple[int, int]] + click_location : Optional[tuple[Optional[float], Optional[float]]] Cached location of the mousedown event, or None if mouse is up connections : List[int] list of IDs for connections to matplotlib canvas @@ -247,15 +247,15 @@ def __init__(self, graph: GraphDrawing): self.graph = graph self.active: Optional[Union[Node, Edge]] = None self.selected: Optional[Union[Node, Edge]] = None - self.click_location: Optional[Tuple[int, int]] = None - self.connections: List[int] = [] + self.click_location: Optional[tuple[Optional[float], Optional[float]]] = None + self.connections: list[int] = [] def connect(self, canvas: MPL_FigureCanvasBase): """Connect our methods to events in the matplotlib canvas""" self.connections.extend([ - canvas.mpl_connect('button_press_event', self.on_mousedown), - canvas.mpl_connect('motion_notify_event', self.on_drag), - canvas.mpl_connect('button_release_event', self.on_mouseup), + canvas.mpl_connect('button_press_event', self.on_mousedown), # type: ignore + canvas.mpl_connect('motion_notify_event', self.on_drag), # type: ignore + canvas.mpl_connect('button_release_event', self.on_mouseup), # type: ignore ]) def disconnect(self, canvas: MPL_FigureCanvasBase): @@ -346,8 +346,8 @@ def __init__(self, graph: nx.Graph, positions=None, ax=None): # TODO: use scale to scale up the positions? self.event_handler = EventHandler(self) self.graph = graph - self.nodes: Dict[Node, Any] = {} - self.edges: Dict[Tuple[Node, Node], Any] = {} + self.nodes: dict[Node, Any] = {} + self.edges: dict[tuple[Node, Node], Any] = {} if positions is None: positions = nx.nx_agraph.graphviz_layout(self.graph, prog='neato') @@ -378,7 +378,7 @@ def __init__(self, graph: nx.Graph, positions=None, ax=None): def _ipython_display_(self): # -no-cov- return self.fig - def edges_for_node(self, node: Node) -> List[Edge]: + def edges_for_node(self, node: Node) -> list[Edge]: """List of edges for the given node""" edges = (list(self.graph.in_edges(node)) + list(self.graph.out_edges(node))) @@ -410,7 +410,7 @@ def draw(self): self.fig.canvas.draw() self.fig.canvas.flush_events() - def _register_node(self, node: Any, position: Tuple[float, float]): + def _register_node(self, node: Any, position: tuple[float, float]): """Create and register ``Node`` from NetworkX node and position""" if node in self.nodes: raise RuntimeError("node provided multiple times") @@ -419,7 +419,7 @@ def _register_node(self, node: Any, position: Tuple[float, float]): self.nodes[node] = draw_node draw_node.register_artist(self.ax) - def _register_edge(self, edge: Tuple[Node, Node, Dict]): + def _register_edge(self, edge: tuple[Node, Node, dict]): """Create and register ``Edge`` from NetworkX edge information""" node1, node2, data = edge draw_edge = self.EdgeCls(self.nodes[node1], self.nodes[node2], data) diff --git a/openfecli/commands/gather.py b/openfecli/commands/gather.py index 7b9e466e0..704a14e29 100644 --- a/openfecli/commands/gather.py +++ b/openfecli/commands/gather.py @@ -107,8 +107,10 @@ def _get_ddgs(legs): if not ((DG1_mag is None) or (DG2_mag is None)): DDGhyd = (DG1_mag - DG2_mag).m hyd_unc = np.sqrt(np.sum(np.square([DG1_unc.m, DG2_unc.m]))) - else: # -no-cov- - raise RuntimeError(f"Unknown DDG type for {vals}") + else: + raise RuntimeError("Unable to determine type of RFE calculation " + f"for edges with labels {list(vals)} for " + f"ligands {ligpair}") DDGs.append((*ligpair, DDGbind, bind_unc, DDGhyd, hyd_unc)) diff --git a/openfecli/tests/commands/test_gather.py b/openfecli/tests/commands/test_gather.py index 416bcfec7..ca71e6acc 100644 --- a/openfecli/tests/commands/test_gather.py +++ b/openfecli/tests/commands/test_gather.py @@ -2,6 +2,7 @@ import glob from importlib import resources import tarfile +import pathlib import pytest from openfecli.commands.gather import ( @@ -106,3 +107,15 @@ def test_gather(results_dir, report): assert set(expected.split(b'\n')) == actual_lines + +def test_missing_leg_error(results_dir): + file_to_remove = "easy_rbfe_lig_ejm_31_complex_lig_ejm_42_complex.json" + (pathlib.Path("results") / file_to_remove).unlink() + + runner = CliRunner() + result = runner.invoke(gather, ['results'] + ['-o', '-']) + assert result.exit_code == 1 + assert isinstance(result.exception, RuntimeError) + assert "labels ['solvent']" in str(result.exception) + assert "'lig_ejm_31'" in str(result.exception) + assert "'lig_ejm_42'" in str(result.exception)