From 01934770c594bfc0bcab2c168176e5541844ee44 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 12:23:58 +0000 Subject: [PATCH] Deployed e3be522 with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 728 ++ CODE_OF_CONDUCT/index.html | 1110 +++ CONTRIBUTING/index.html | 1021 +++ LICENSE | 1 + api/api/index.html | 767 ++ api/io/index.html | 1357 ++++ api/loss/index.html | 1608 ++++ api/mlops/index.html | 1559 ++++ api/models/index.html | 1870 +++++ api/normalizer/index.html | 1082 +++ api/plot/index.html | 2064 +++++ api/postprocess/index.html | 1875 +++++ api/preprocess/index.html | 2736 +++++++ api/scnn/index.html | 1044 +++ api/transformations/index.html | 1289 ++++ api/utils/index.html | 1374 ++++ assets/_mkdocstrings.css | 119 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.d6f25eb3.min.js | 16 + assets/javascripts/bundle.d6f25eb3.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.8c3ca2c6.min.css | 1 + assets/stylesheets/main.8c3ca2c6.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + index.html | 1017 +++ installation/index.html | 832 ++ objects.inv | Bin 0 -> 1115 bytes search/search_index.json | 1 + sitemap.xml | 67 + sitemap.xml.gz | Bin 0 -> 336 bytes 67 files changed, 30798 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 CODE_OF_CONDUCT/index.html create mode 100644 CONTRIBUTING/index.html create mode 100644 LICENSE create mode 100644 api/api/index.html create mode 100644 api/io/index.html create mode 100644 api/loss/index.html create mode 100644 api/mlops/index.html create mode 100644 api/models/index.html create mode 100644 api/normalizer/index.html create mode 100644 api/plot/index.html create mode 100644 api/postprocess/index.html create mode 100644 api/preprocess/index.html create mode 100644 api/scnn/index.html create mode 100644 api/transformations/index.html create mode 100644 api/utils/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.d6f25eb3.min.js create mode 100644 assets/javascripts/bundle.d6f25eb3.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.8c3ca2c6.min.css create mode 100644 assets/stylesheets/main.8c3ca2c6.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 index.html create mode 100644 installation/index.html create mode 100644 objects.inv create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..092193c --- /dev/null +++ b/404.html @@ -0,0 +1,728 @@ + + + +
+ + + + + + + + + + + + + + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation.
+We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community.
+Examples of behavior that contributes to a positive environment for our +community include:
+Examples of unacceptable behavior include:
+Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful.
+Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate.
+This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event.
+Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +s.ciarella@esciencecenter.nl. +All complaints will be reviewed and investigated promptly and fairly.
+All community leaders are obligated to respect the privacy and security of the +reporter of any incident.
+Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct:
+Community Impact: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community.
+Consequence: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested.
+Community Impact: A violation through a single incident or series +of actions.
+Consequence: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban.
+Community Impact: A serious violation of community standards, including +sustained inappropriate behavior.
+Consequence: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban.
+Community Impact: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals.
+Consequence: A permanent ban from any sort of public interaction within +the community.
+This Code of Conduct is adapted from the Contributor Covenant, +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+Community Impact Guidelines were inspired by Mozilla's code of conduct +enforcement ladder.
+For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations.
+ + + + + + + + + + + + + +Welcome! SpeckCn2 is an open-source project for the analysis of speckle patterns. If you're trying SpeckCn2 with your data, your experience, questions, bugs you encountered, and suggestions for improvement are important to the success of the project.
+We have a Code of Conduct, please follow it in all your interactions with the project.
+Use the search function to see if someone else already ran across the same issue. Feel free to open a new issue here to ask a question, suggest improvements/new features, or report any bugs that you ran into.
+Even better than a good bug report is a fix for the bug or the implementation of a new feature. We welcome any contributions that help improve the code.
+When contributing to this repository, please first discuss the change you wish to make via an issue with the owners of this repository before making a change.
+Contributions can come in the form of:
+We use the usual GitHub pull request flow. For more info see GitHub's own documentation.
+Typically this means:
+One of the code owners will review your code and request changes if needed. Once your changes have been approved, your contributions will become part of SpeckCn2. 🎉
+SpeckCn2 targets Python 3.9 or newer.
+Clone the repository into the speckcn2
directory:
git clone https://github.com/MALES-project/SpeckleCn2Profiler speckcn2
+
Install using virtualenv
:
cd speckcn2
+python3 -m venv env
+source env/bin/activate
+python3 -m pip install -e .[develop]
+
Alternatively, install using Conda:
+cd speckcn2
+conda create -n speckcn2 python=3.10
+conda activate speckcn2
+pip install -e .[develop]
+
SpeckCn2 uses pytest to run the tests. You can run the tests for yourself using:
+pytest
+
To check coverage:
+coverage run -m pytest
+coverage report # to output to terminal
+coverage html # to generate html report
+
The documentation is written in markdown, and uses mkdocs to generate the pages.
+To build the documentation for yourself:
+pip install -e .[docs]
+mkdocs serve
+
You can find the documentation source in the docs directory.
+If you are adding new pages, make sure to update the listing in the mkdocs.yml
under the nav
entry.
Make a new release.
+Under 'Choose a tag', set the tag to the new version. The versioning scheme we use is SemVer, so bump the version (major/minor/patch) as needed. Bumping the version is handled transparently by bumpversion
in this workflow.
The upload to pypi is triggered when a release is published and handled by this workflow.
+The upload to zenodo is triggered when a release is published.
+This module provides utility functions for loading and saving model +configurations and states.
+It includes functions to load configuration files, save model states, +load model states, and load the latest model state from a directory.
+ + + +load(model, datadirectory, epoch)
+
+Load the model state and the model itself from a specified directory and +epoch.
+This function loads the model's state dictionary and other relevant information +such as epoch, loss, validation loss, and time from a file in the specified directory.
+ + +Parameters:
+model
+ (Module
)
+ –
+ The model to load
+datadirectory
+ (str
)
+ –
+ The directory where the data is stored
+epoch
+ (int
)
+ –
+ The epoch of the model
+src/speckcn2/io.py
65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 |
|
load_config(config_file_path)
+
+Load the configuration file from a given path.
+This function reads a YAML configuration file and returns its contents as a dictionary.
+ + +Parameters:
+config_file_path
+ (str
)
+ –
+ Path to the .yaml configuration file
+Returns:
+config
( dict
+) –
+ Dictionary containing the configuration
+src/speckcn2/io.py
18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 |
|
load_model_state(model, datadirectory)
+
+Loads the latest model state from the given directory.
+This function checks the specified directory for the latest model state file, +loads it, and updates the model with the loaded state. If no state is found, +it initializes the model state.
+ + +Parameters:
+model
+ (Module
)
+ –
+ The model to load the state into
+datadirectory
+ (str
)
+ –
+ The directory where the model states are stored
+Returns:
+model
( Module
+) –
+ The model with the loaded state
+last_model_state
( int
+) –
+ The number of the last model state
+src/speckcn2/io.py
94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 |
|
save(model, datadirectory)
+
+Save the model state and the model itself to a specified directory.
+This function saves the model's state dictionary and other relevant information +such as epoch, loss, validation loss, and time to a file in the specified directory.
+ + +Parameters:
+model
+ (Module
)
+ –
+ The model to save
+datadirectory
+ (str
)
+ –
+ The directory where the data is stored
+src/speckcn2/io.py
38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 |
|
This module implements various loss functions for training machine learning +models.
+It includes custom loss functions that extend PyTorch's nn.Module, +allowing for flexible and efficient computation of loss values during +training. The loss functions handle different scenarios such as +classification, regression, and segmentation tasks. They incorporate +techniques like weighted losses, focal losses, and smooth L1 losses to +address class imbalances and improve model performance. The module +ensures that the loss calculations are compatible with PyTorch's +autograd system, enabling seamless integration into training loops.
+ + + +ComposableLoss(config, nz, device)
+
+
+ Bases: Module
Compose the loss function using several terms. The importance of each +term has to be specified in the configuration file. Each term with a >0 +weight will be added to the loss function.
+The loss term available are: +- MSE: mean squared error between predicted and target normalized screen tags +- MAE: mean absolute error between predicted and target normalized screen tags +- JMSE: mean squared error between predicted and target J +- JMAE: mean absolute error between predicted and target J +- Pearson: Pearson correlation coefficient between predicted and target J +- Fried: Fried parameter r0 +- Isoplanatic: Isoplanatic angle theta0 +- Rytov: Rytov variance sigma_r^2 that will be computed on log averaged Cn2 +- Scintillation_w: scintillation index for weak turbulence +- Scintillation_m: scintillation index for moderate-strong turbulence
+ + +Parameters:
+config
+ (dict
)
+ –
+ Dictionary containing the configuration
+nz
+ (Normalizer
)
+ –
+ Normalizer object to be used to extract J in its original scale
+src/speckcn2/loss.py
49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 |
|
forward(pred, target)
+
+Forward pass of the loss function.
+ + +Parameters:
+pred
+ (Tensor
)
+ –
+ The predicted screen tags
+target
+ (Tensor
)
+ –
+ The target screen tags
+Returns:
+loss
( Tensor
+) –
+ The composed loss
+losses
( dict
+) –
+ Dictionary containing the individual losses
+src/speckcn2/loss.py
109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 |
|
get_FriedParameter(Jnorm)
+
+Compute the Fried parameter r0 from the screen tags.
+ +src/speckcn2/loss.py
301 +302 +303 +304 |
|
get_IsoplanaticAngle(Cn2)
+
+Compute the isoplanatic angle theta0 from the screen tags.
+ +src/speckcn2/loss.py
334 +335 +336 +337 +338 +339 +340 |
|
get_J(Jnorm)
+
+Recover J from the normalized tags. This needs to be done to compute +Cn2.
+ + +Parameters:
+Jnorm
+ (Tensor
)
+ –
+ The normalized screen tags between 0 and 1
+Returns:
+J
( Tensor
+) –
+ The recovered screen tags
+src/speckcn2/loss.py
153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 |
|
get_ScintillationModerateStrong(x)
+
+Compute the scintillation index for moderate-strong turbulence +sigma^2 from the screen tags.
+ +src/speckcn2/loss.py
419 +420 +421 +422 +423 +424 |
|
get_ScintillationWeak(Cn)
+
+Compute the scintillation index for weak turbulence sigma^2 from the +screen tags.
+ +src/speckcn2/loss.py
381 +382 +383 +384 +385 +386 +387 +388 |
|
reconstruct_cn2(Jnorm)
+
+Reconstruct Cn2 from screen tags +c_i = J_i / (h[i+1] - h[i])
+ + +Parameters:
+Jnorm
+ (Tensor
)
+ –
+ The screen tags normalized between 0 and 1
+Returns:
+Cn2
( Tensor
+) –
+ The Cn2 reconstructed from the screen tags, assuming a uniform profile
+src/speckcn2/loss.py
182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 |
|
score(model, test_set, device, criterion, normalizer, nimg_plot=100)
+
+Tests the model.
+ + +Parameters:
+model
+ (Module
)
+ –
+ The model to test
+test_set
+ (list
)
+ –
+ The testing set
+device
+ (device
)
+ –
+ The device to use
+criterion
+ (ComposableLoss
)
+ –
+ The composable loss function, where I can access useful parameters
+normalizer
+ (Normalizer
)
+ –
+ The normalizer used to recover the tags
+nimg_plot
+ (int
, default:
+ 100
+)
+ –
+ Number of images to plot
+Returns:
+test_tags
( list
+) –
+ List of all the predicted tags of the test set
+test_losses
( list
+) –
+ List of all the losses of the test set
+test_measures
( list
+) –
+ List of all the measures of the test set
+test_cn2_pred
( list
+) –
+ List of all the predicted Cn2 profiles of the test set
+test_cn2_true
( list
+) –
+ List of all the true Cn2 profiles of the test set
+test_recovered_tag_pred
( list
+) –
+ List of all the recovered tags from the model prediction
+test_recovered_tag_true
( list
+) –
+ List of all the recovered tags
+src/speckcn2/mlops.py
120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 |
|
train(model, last_model_state, conf, train_set, test_set, device, optimizer, criterion)
+
+Trains the model for the given number of epochs.
+ + +Parameters:
+model
+ (Module
)
+ –
+ The model to train
+last_model_state
+ (int
)
+ –
+ The number of the last model state
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+train_set
+ (list
)
+ –
+ The training set
+test_set
+ (list
)
+ –
+ The testing set
+device
+ (device
)
+ –
+ The device to use
+optimizer
+ (optim
)
+ –
+ The optimizer to use
+criterion
+ (ComposableLoss
)
+ –
+ The loss function to use
+Returns:
+model
( Module
+) –
+ The trained model
+average_loss
( float
+) –
+ The average loss of the last epoch
+src/speckcn2/mlops.py
17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 |
|
This module contains the definition of the EnsembleModel class and a +setup_model function.
+The EnsembleModel class is a wrapper that allows any model to be used +for ensembled data. The setup_model function initializes and returns a +model based on the provided configuration.
+ + + +EnsembleModel(conf, device)
+
+
+ Bases: Module
Wrapper that allows any model to be used for ensembled data.
+ + + +Parameters:
+conf
+ (dict
)
+ –
+ The global configuration containing the model parameters.
+device
+ (device
)
+ –
+ The device to use
+src/speckcn2/mlmodels.py
25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 |
|
apply_noise(image_tensor)
+
+Processes a tensor of 2D images.
+ + +Parameters:
+image_tensor
+ (Tensor
)
+ –
+ Tensor of 2D images with shape (batch, channels, width, height).
+Returns:
+processed_tensor
( Tensor
+) –
+ Tensor of processed 2D images.
+src/speckcn2/mlmodels.py
108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 |
|
create_masks(resolution)
+
+Creates the masks for the circular aperture and the spider.
+ + +Parameters:
+resolution
+ (int
)
+ –
+ Resolution of the images.
+Returns:
+mask_D
( Tensor
+) –
+ Mask for the circular aperture.
+mask_d
( Tensor
+) –
+ Mask for the central obscuration.
+mask_X
( Tensor
+) –
+ Mask for the horizontal spider.
+mask_Y
( Tensor
+) –
+ Mask for the vertical spider.
+src/speckcn2/mlmodels.py
159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 |
|
forward(model, batch_ensemble)
+
+Forward pass through the model.
+ + +Parameters:
+model
+ (Module
)
+ –
+ The model to use
+batch_ensemble
+ (list
)
+ –
+ Each element is a batch of an ensemble of samples.
+src/speckcn2/mlmodels.py
55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 |
|
get_a_resnet(config)
+
+Returns a pretrained ResNet model, with the last layer corresponding to +the number of screens.
+ + +Parameters:
+config
+ (dict
)
+ –
+ Dictionary containing the configuration
+Returns:
+model
( Module
+) –
+ The model with the loaded state
+last_model_state
( int
+) –
+ The number of the last model state
+src/speckcn2/mlmodels.py
226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 |
|
get_scnn(config)
+
+Returns a pretrained Spherical-CNN model, with the last layer +corresponding to the number of screens.
+ +src/speckcn2/mlmodels.py
287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 |
|
setup_model(config)
+
+Returns the model specified in the configuration file, with the last +layer corresponding to the number of screens.
+ + +Parameters:
+config
+ (dict
)
+ –
+ Dictionary containing the configuration
+Returns:
+model
( Module
+) –
+ The model with the loaded state
+last_model_state
( int
+) –
+ The number of the last model state
+src/speckcn2/mlmodels.py
196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 |
|
This module defines the Normalizer class, which handles the normalization of +images and tags based on a given configuration.
+The class includes methods to precompile normalization functions, +normalize images and tags, and define specific normalization strategies +such as Z-score and uniform normalization. The normalization process +involves replacing NaN values, creating masks, and scaling values to a +specified range. The module also provides functions to recover the +original values from the normalized data. The Normalizer class ensures +that both images and tags are consistently normalized according to the +specified configuration, facilitating further processing and analysis.
+ + + +Normalizer(conf)
+
+Class to handle the normalization of images and tags.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+src/speckcn2/normalizer.py
30 +31 |
|
normalize_imgs_and_tags(all_images, all_tags, all_ensemble_ids)
+
+Normalize both the input images and the tags to be between 0 and 1.
+ + +Parameters:
+all_images
+ (list
)
+ –
+ List of all images
+all_tags
+ (list
)
+ –
+ List of all tags
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+Returns:
+dataset
( list
+) –
+ List of tuples (image, normalized_tag)
+src/speckcn2/normalizer.py
67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 |
|
plot_histo_losses(conf, test_losses, data_dir)
+
+Plots the histogram of the losses.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+test_losses
+ (list[dict]
)
+ –
+ List of all the losses of the test set
+data_dir
+ (str
)
+ –
+ The directory where the data is stored
+src/speckcn2/plots.py
186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 |
|
plot_loss(conf, model, data_dir)
+
+Plots the loss of the model.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+model
+ (Module
)
+ –
+ The model to plot the loss of
+data_dir
+ (str
)
+ –
+ The directory where the data is stored
+src/speckcn2/plots.py
126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 |
|
plot_param_histo(conf, test_losses, data_dir, measures)
+
+Plots the histograms of different parameters.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+test_losses
+ (list[dict]
)
+ –
+ List of all the losses of the test set
+data_dir
+ (str
)
+ –
+ The directory where the data is stored
+measures
+ (list
)
+ –
+ The measures of the model
+src/speckcn2/plots.py
327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 |
|
plot_param_vs_loss(conf, test_losses, data_dir, measures)
+
+Plots the parameter vs the loss. Optionally, it also plots the detailed +histo for all the bins for the desired metrics.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+test_losses
+ (list[dict]
)
+ –
+ List of all the losses of the test set
+data_dir
+ (str
)
+ –
+ The directory where the data is stored
+measures
+ (list
)
+ –
+ The measures of the model
+src/speckcn2/plots.py
221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 |
|
plot_time(conf, model, data_dir)
+
+Plots the time per epoch of the model.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+model
+ (Module
)
+ –
+ The model to plot the loss of
+data_dir
+ (str
)
+ –
+ The directory where the data is stored
+src/speckcn2/plots.py
157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 |
|
score_plot(conf, inputs, tags, loss, losses, i, counter, measures, Cn2_pred, Cn2_true, recovered_tag_pred, recovered_tag_true)
+
+Plots side by side: +- [0:Nensemble] the input images (single or ensemble) +- [-3] the predicted/exact tags J +- [-2] the Cn2 profile +- [-1] the different information of the loss +normalize value in model units.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+inputs
+ (Tensor
)
+ –
+ The input speckle patterns
+tags
+ (list
)
+ –
+ The exact tags of the data
+loss
+ (Tensor
)
+ –
+ The total loss of the model (for this prediction)
+losses
+ (dict
)
+ –
+ The individual losses of the model
+i
+ (int
)
+ –
+ The batch index of the image
+counter
+ (int
)
+ –
+ The global index of the image
+measures
+ (dict
)
+ –
+ The different measures of the model
+Cn2_pred
+ (Tensor
)
+ –
+ The predicted Cn2 profile
+Cn2_true
+ (Tensor
)
+ –
+ The true Cn2 profile
+recovered_tag_pred
+ (Tensor
)
+ –
+ The predicted tags
+recovered_tag_true
+ (Tensor
)
+ –
+ The true tags
+src/speckcn2/plots.py
11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 |
|
average_speckle_input(conf, test_set, device, model, criterion, n_ensembles_to_plot=100)
+
+Test to see if averaging the speckle patterns (before the prediction) +improves the results. This function is then going to plot the relative +error over the screen tags and the Fried parameter to make this evaluation.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+test_set
+ (list
)
+ –
+ The test set
+device
+ (device
)
+ –
+ The device to use
+model
+ (Torch
)
+ –
+ The trained model
+criterion
+ (ComposableLoss
)
+ –
+ The loss function
+n_ensembles_to_plot
+ (int
, default:
+ 100
+)
+ –
+ The number of ensembles to plot
+src/speckcn2/postprocess.py
268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 |
|
average_speckle_output(conf, test_set, device, model, criterion, n_ensembles_to_plot=100)
+
+Test to see if averaging the prediction of multiple speckle patterns +improves the results. This function is then going to plot the relative +error over the screen tags and the Fried parameter to make this evaluation.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+test_set
+ (list
)
+ –
+ The test set
+device
+ (device
)
+ –
+ The device to use
+model
+ (Torch
)
+ –
+ The trained model
+criterion
+ (ComposableLoss
)
+ –
+ The loss function
+n_ensembles_to_plot
+ (int
, default:
+ 100
+)
+ –
+ The number of ensembles to plot
+src/speckcn2/postprocess.py
139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 |
|
tags_distribution(conf, train_set, test_tags, device, rescale=False, recover_tag=None)
+
+Plots the distribution of the tags.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+train_set
+ (list
)
+ –
+ The training set
+test_tags
+ (Tensor
)
+ –
+ The predicted tags for the test dataset
+device
+ (device
)
+ –
+ The device to use
+data_directory
+ (str
)
+ –
+ The directory where the data is stored
+rescale
+ (bool
, default:
+ False
+)
+ –
+ Whether to rescale the tags using recover_tag() or leave them between 0 and 1
+recover_tag
+ (list
, default:
+ None
+)
+ –
+ List of functions to recover each tag
+src/speckcn2/postprocess.py
19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 |
|
This module contains functions for training and evaluating a neural network +model using PyTorch. It includes the following key components:
+train
: Trains the model for a specified number of epochs, logs training
+ and validation losses, and saves the model state at specified intervals.score
: Evaluates the model on a test dataset, calculates various metrics,
+ and generates plots for a specified number of test samples.The module relies on several external utilities and models from the speckcn2
+package, including EnsembleModel
, ComposableLoss
, and Normalizer
.
assemble_transform(conf)
+
+Assembles the transformation to apply to each image.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+Returns:
+transform
( Compose
+) –
+ Transformation to apply to the images
+src/speckcn2/preprocess.py
31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 |
|
create_average_dataset(dataset, average_size)
+
+Creates a dataset of averages from a dataset of single images. The +averages are created by grouping together average_size images.
+ + +Parameters:
+dataset
+ (list
)
+ –
+ List of single images
+average_size
+ (int
)
+ –
+ The number of images that will be averaged together
+Returns:
+average_dataset
( list
+) –
+ List of averages
+src/speckcn2/preprocess.py
388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 |
|
create_ensemble_dataset(dataset, ensemble_size)
+
+Creates a dataset of ensembles from a dataset of single images. The +ensembles are created by grouping together ensemble_size images. These +images will be used to train the model in parallel.
+ + +Parameters:
+dataset
+ (list
)
+ –
+ List of single images
+ensemble_size
+ (int
)
+ –
+ The number of images that will be processed together as an ensemble
+Returns:
+ensemble_dataset
( list
+) –
+ List of ensembles
+src/speckcn2/preprocess.py
458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 |
|
get_ensemble_dict(tag_files)
+
+Function to associate each Cn2 profile to an ensemble ID for parallel +processing.
+ + +Parameters:
+tag_files
+ (dict
)
+ –
+ Dictionary of image files and their corresponding tag files
+Returns:
+ensemble_dict
( dict
+) –
+ Dictionary of image files and their corresponding ensemble IDs
+src/speckcn2/preprocess.py
289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 |
|
get_tag_files(file_list, datadirectory)
+
+Function to check the existence of tag files for each image file.
+ + +Parameters:
+file_list
+ (list
)
+ –
+ List of image files
+datadirectory
+ (str
)
+ –
+ The directory containing the data
+Returns:
+tag_files
( dict
+) –
+ Dictionary of image files and their corresponding tag files
+src/speckcn2/preprocess.py
257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 |
|
imgs_as_single_datapoint(conf, nimg_print=5)
+
+Preprocesses the data by loading images and tags from the given +directory, applying a transformation to the images. Each image is treated +as a single data point.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+nimg_print
+ (int
, default:
+ 5
+)
+ –
+ Number of images to print
+Returns:
+all_images
( list
+) –
+ List of all images
+all_tags
( list
+) –
+ List of all tags
+all_ensemble_ids
( list
+) –
+ List of all ensemble ids, representing images from the same Cn2 profile
+src/speckcn2/preprocess.py
136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 |
|
prepare_data(conf, nimg_print=5)
+
+If not already available, preprocesses the data by loading images and +tags from the given directory, applying a transformation to the images.
+ + +Parameters:
+conf
+ (dict
)
+ –
+ Dictionary containing the configuration
+nimg_print
+ (int
, default:
+ 5
+)
+ –
+ Number of images to print
+Returns:
+all_images
( list
+) –
+ List of all images
+all_tags
( list
+) –
+ List of all tags
+all_ensemble_ids
( list
+) –
+ List of all ensemble ids, representing images from the same Cn2 profile
+src/speckcn2/preprocess.py
78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 |
|
print_average_info(dataset, average_size, ttsplit)
+
+Prints the information about the average dataset.
+ + +Parameters:
+dataset
+ (list
)
+ –
+ The average dataset
+average_size
+ (int
)
+ –
+ The number of images in each average
+ttsplit
+ (int
)
+ –
+ The train-test split
+src/speckcn2/preprocess.py
436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 |
|
print_dataset_info(dataset, ttsplit)
+
+Prints the information about the dataset.
+ + +Parameters:
+ + +src/speckcn2/preprocess.py
521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 |
|
print_ensemble_info(dataset, ensemble_size, ttsplit)
+
+Prints the information about the ensemble dataset.
+ + +Parameters:
+dataset
+ (list
)
+ –
+ The ensemble dataset
+ensemble_size
+ (int
)
+ –
+ The number of images in each ensemble
+ttsplit
+ (int
)
+ –
+ The train-test split
+src/speckcn2/preprocess.py
498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 |
|
split_dataset(dataset, ttsplit)
+
+Splits the dataset into training and testing sets.
+ + +Parameters:
+ + + +Returns:
+ + +src/speckcn2/preprocess.py
537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 |
|
train_test_split(all_images, all_tags, all_ensemble_ids, nz)
+
+Splits the data into training and testing sets.
+ + +Parameters:
+all_images
+ (list
)
+ –
+ List of images
+all_tags
+ (list
)
+ –
+ List of tags
+all_ensemble_ids
+ (list
)
+ –
+ List of ensemble ids
+nz
+ (Normalizer
)
+ –
+ The normalizer object to preprocess the data
+Returns:
+ + +src/speckcn2/preprocess.py
314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 |
|
This module defines a Steerable Convolutional Neural Network (SteerableCNN) +using the escnn library for equivariant neural networks. The primary class, +SteerableCNN, allows for the creation of a convolutional neural network that +handles various symmetries, making it useful for tasks requiring rotational +invariance.
+The module imports necessary libraries, including torch and escnn, and defines utility
+functions such as create_block
, compute_new_features
, create_pool
, and create_final_block
.
+These functions help construct convolutional blocks, calculate feature map sizes,
+create anti-aliased pooling layers, and build fully connected layers.
The SteerableCNN class initializes with a configuration dictionary and a symmetry parameter, +setting up parameters like kernel sizes, paddings, strides, and feature fields. +It determines the symmetry group and initializes the input type for the network.
+The network is built by iterating through specified kernel sizes, creating convolutional
+blocks and pooling layers, adding a group pooling layer for invariance, and creating final
+fully connected layers using create_final_block
.
The forward method processes the input tensor through the network, applying each +equivariant block, performing group pooling, and classifying the output using the fully connected layers.
+Overall, this module provides a flexible framework for building steerable +convolutional neural networks with configurable symmetries and architectures.
+ + + +create_final_block(config, n_initial, nscreens)
+
+Creates a fully connected neural network block based on a predefined +configuration.
+This function dynamically creates a sequence of PyTorch layers for a fully connected
+neural network. The configuration for the layers is read from a global config
dictionary
+which should contain a 'final_block' key with a list of layer configurations. Each layer
+configuration is a dictionary that must include a 'type' key with the name of the layer
+class (e.g., 'Linear', 'Dropout', etc.) and can include additional keys for the layer
+parameters.
The first 'Linear' layer in the configuration has its number of input features set to n_in
,
+and any 'Linear' layer with 'out_features' set to 'nscreens' has its number of output features
+set to nscreens
.
Args:
+ + +Parameters:
+config
+ (dict
)
+ –
+ The global configuration dictionary containing the layer configurations.
+n_initial
+ (int
)
+ –
+ The number of input features for the first 'Linear' layer.
+nscreens
+ (int
)
+ –
+ The number of output features for any 'Linear' layer with 'out_features' set to 'nscreens'.
+Returns:
+torch.nn.Sequential: A sequential container of the configured PyTorch layers.
+ –
+ src/speckcn2/scnn.py
57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 |
|
This module defines several image transformation classes using PyTorch and +NumPy.
+The PolarCoordinateTransform
class converts a Cartesian image to polar
+coordinates, which can be useful for certain types of image analysis.
+The ShiftRowsTransform
class shifts the rows of an image so that the row with the
+smallest sum is positioned at the bottom, which can help in aligning images for further processing.
+The ToUnboundTensor
class converts an image to a tensor without normalizing it,
+preserving the original pixel values.
+Lastly, the SpiderMask
class applies a circular mask to the image, simulating
+the effect of a spider by setting pixels outside the mask to a background value,
+which can be useful in certain experimental setups.
PolarCoordinateTransform()
+
+
+ Bases: Module
Transform a Cartesian image to polar coordinates.
+ +src/speckcn2/transformations.py
24 +25 |
|
forward(img)
+
+forward method of the transform +Args: + img (PIL Image or Tensor): Image to be scaled.
+Returns: + PIL Image or Tensor: Rescaled image.
+ +src/speckcn2/transformations.py
27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 |
|
ShiftRowsTransform()
+
+
+ Bases: Module
Shift the rows of an image such that the row with the smallest sum is at +the bottom.
+ +src/speckcn2/transformations.py
101 +102 |
|
SpiderMask()
+
+
+ Bases: Module
Apply a circular mask to the image, representing the effect of the +spider.
+The pixels outside the spider are set to -0.01, such that their +value is lower than no light in the detector (0).
+ +src/speckcn2/transformations.py
139 +140 |
|
ToUnboundTensor()
+
+
+ Bases: Module
Transform the image into a tensor, but do not normalize it like +torchvision.ToTensor.
+ +src/speckcn2/transformations.py
122 +123 |
|
This module provides utility functions for image processing and model +optimization.
+It includes functions to plot original and preprocessed images along +with their tags, ensure the existence of specified directories, set up +optimizers based on configuration files, and create circular masks with +an inner "spider" circle removed. These utilities facilitate various +tasks in image analysis and machine learning model training.
+ + + +create_circular_mask_with_spider(resolution, bkg_value=0)
+
+Creates a circular mask with an inner "spider" circle removed.
+ + +Parameters:
+resolution
+ (int
)
+ –
+ The resolution of the square mask.
+bkg_value
+ (int
, default:
+ 0
+)
+ –
+ The background value to set for the masked areas. Defaults to 0.
+Returns:
+torch.Tensor : np.ndarray
+ –
+ A 2D tensor representing the mask.
+src/speckcn2/utils.py
110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 |
|
ensure_directory(data_directory)
+
+Ensure that the directory exists.
+ + +Parameters:
+data_directory
+ (str
)
+ –
+ The directory to ensure
+src/speckcn2/utils.py
72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 |
|
plot_preprocessed_image(image_orig, image, tags, counter, datadirectory, mname, file_name, polar=False)
+
+Plots the original and preprocessed image, and the tags.
+ + +Parameters:
+image_orig
+ (tensor
)
+ –
+ The original image
+image
+ (tensor
)
+ –
+ The preprocessed image
+tags
+ (tensor
)
+ –
+ The screen tags
+counter
+ (int
)
+ –
+ The counter of the image
+datadirectory
+ (str
)
+ –
+ The directory containing the data
+mname
+ (str
)
+ –
+ The name of the model
+file_name
+ (str
)
+ –
+ The name of the original image
+polar
+ (bool
, default:
+ False
+)
+ –
+ If the image is in polar coordinates, by default False
+src/speckcn2/utils.py
20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 |
|
setup_optimizer(config, model)
+
+Returns the optimizer specified in the configuration file.
+ + +Parameters:
+config
+ (dict
)
+ –
+ Dictionary containing the configuration
+model
+ (Module
)
+ –
+ The model to optimize
+Returns:
+optimizer
( Module
+) –
+ The optimizer with the loaded state
+src/speckcn2/utils.py
85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 |
|